diff --git a/libnetwork/Godeps/Godeps.json b/libnetwork/Godeps/Godeps.json index 4e8654b474..7d2323ecda 100644 --- a/libnetwork/Godeps/Godeps.json +++ b/libnetwork/Godeps/Godeps.json @@ -1,6 +1,7 @@ { "ImportPath": "github.com/docker/libnetwork", - "GoVersion": "go1.4.2", + "GoVersion": "go1.5", + "GodepVersion": "v62", "Packages": [ "./..." ], @@ -20,6 +21,7 @@ }, { "ImportPath": "github.com/Microsoft/go-winio", + "Comment": "v0.1.0", "Rev": "8f9387ea7efabb228a981b9c381142be7667967f" }, { @@ -41,8 +43,8 @@ }, { "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.0-117-g0f053fa", - "Rev": "0f053fabc06119583d61937a0a06ef0ba0f1b301" + "Comment": "v1.2.0", + "Rev": "c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631" }, { "ImportPath": "github.com/codegangsta/cli", @@ -81,108 +83,108 @@ }, { "ImportPath": "github.com/docker/docker/opts", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/discovery", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/discovery/kv", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/homedir", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/ioutils", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/longpath", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/mflag", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/parsers/kernel", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/plugins", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/plugins/transport", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/proxy", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/random", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/reexec", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/signal", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/stringid", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/symlink", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/docker/pkg/tlsconfig", - "Comment": "v1.4.1-11287-geaf138a", - "Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a" + "Comment": "v1.4.1-11716-g24076ed", + "Rev": "24076ed4d9c82d387029b8a65e21873db5676f6a" }, { "ImportPath": "github.com/docker/go-connections/sockets", @@ -235,8 +237,8 @@ }, { "ImportPath": "github.com/godbus/dbus", - "Comment": "v3", - "Rev": "c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f" + "Comment": "v4.0.0", + "Rev": "5f6efc7ef2759c81b7ba876593971bfce311eab3" }, { "ImportPath": "github.com/golang/protobuf/proto", diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 5a8e332545..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md deleted file mode 100644 index 24421eb703..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Implements the TOML test suite interface - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for my -[toml parser written in Go](https://github.com/BurntSushi/toml). -In particular, it maps TOML data on `stdin` to a JSON format on `stdout`. - - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) - diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go deleted file mode 100644 index 14e7557005..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go +++ /dev/null @@ -1,90 +0,0 @@ -// Command toml-test-decoder satisfies the toml-test interface for testing -// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout. -package main - -import ( - "encoding/json" - "flag" - "fmt" - "log" - "os" - "path" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil { - log.Fatalf("Error decoding TOML: %s", err) - } - - typedTmp := translate(tmp) - if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil { - log.Fatalf("Error encoding JSON: %s", err) - } -} - -func translate(tomlData interface{}) interface{} { - switch orig := tomlData.(type) { - case map[string]interface{}: - typed := make(map[string]interface{}, len(orig)) - for k, v := range orig { - typed[k] = translate(v) - } - return typed - case []map[string]interface{}: - typed := make([]map[string]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v).(map[string]interface{}) - } - return typed - case []interface{}: - typed := make([]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v) - } - - // We don't really need to tag arrays, but let's be future proof. - // (If TOML ever supports tuples, we'll need this.) - return tag("array", typed) - case time.Time: - return tag("datetime", orig.Format("2006-01-02T15:04:05Z")) - case bool: - return tag("bool", fmt.Sprintf("%v", orig)) - case int64: - return tag("integer", fmt.Sprintf("%d", orig)) - case float64: - return tag("float", fmt.Sprintf("%v", orig)) - case string: - return tag("string", orig) - } - - panic(fmt.Sprintf("Unknown type: %T", tomlData)) -} - -func tag(typeName string, data interface{}) map[string]interface{} { - return map[string]interface{}{ - "type": typeName, - "value": data, - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 5a8e332545..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md deleted file mode 100644 index 45a603f298..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Implements the TOML test suite interface for TOML encoders - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for the -[TOML encoder](https://github.com/BurntSushi/toml). -In particular, it maps JSON data on `stdin` to a TOML format on `stdout`. - - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) - diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go deleted file mode 100644 index 092cc68449..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go +++ /dev/null @@ -1,131 +0,0 @@ -// Command toml-test-encoder satisfies the toml-test interface for testing -// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout. -package main - -import ( - "encoding/json" - "flag" - "log" - "os" - "path" - "strconv" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil { - log.Fatalf("Error decoding JSON: %s", err) - } - - tomlData := translate(tmp) - if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil { - log.Fatalf("Error encoding TOML: %s", err) - } -} - -func translate(typedJson interface{}) interface{} { - switch v := typedJson.(type) { - case map[string]interface{}: - if len(v) == 2 && in("type", v) && in("value", v) { - return untag(v) - } - m := make(map[string]interface{}, len(v)) - for k, v2 := range v { - m[k] = translate(v2) - } - return m - case []interface{}: - tabArray := make([]map[string]interface{}, len(v)) - for i := range v { - if m, ok := translate(v[i]).(map[string]interface{}); ok { - tabArray[i] = m - } else { - log.Fatalf("JSON arrays may only contain objects. This " + - "corresponds to only tables being allowed in " + - "TOML table arrays.") - } - } - return tabArray - } - log.Fatalf("Unrecognized JSON format '%T'.", typedJson) - panic("unreachable") -} - -func untag(typed map[string]interface{}) interface{} { - t := typed["type"].(string) - v := typed["value"] - switch t { - case "string": - return v.(string) - case "integer": - v := v.(string) - n, err := strconv.Atoi(v) - if err != nil { - log.Fatalf("Could not parse '%s' as integer: %s", v, err) - } - return n - case "float": - v := v.(string) - f, err := strconv.ParseFloat(v, 64) - if err != nil { - log.Fatalf("Could not parse '%s' as float64: %s", v, err) - } - return f - case "datetime": - v := v.(string) - t, err := time.Parse("2006-01-02T15:04:05Z", v) - if err != nil { - log.Fatalf("Could not parse '%s' as a datetime: %s", v, err) - } - return t - case "bool": - v := v.(string) - switch v { - case "true": - return true - case "false": - return false - } - log.Fatalf("Could not parse '%s' as a boolean.", v) - case "array": - v := v.([]interface{}) - array := make([]interface{}, len(v)) - for i := range v { - if m, ok := v[i].(map[string]interface{}); ok { - array[i] = untag(m) - } else { - log.Fatalf("Arrays may only contain other arrays or "+ - "primitive values, but found a '%T'.", m) - } - } - return array - } - log.Fatalf("Unrecognized tag type '%s'.", t) - panic("unreachable") -} - -func in(key string, m map[string]interface{}) bool { - _, ok := m[key] - return ok -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 5a8e332545..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md deleted file mode 100644 index 5df0dc32bb..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# TOML Validator - -If Go is installed, it's simple to try it out: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -You can see the types of every key in a TOML file with: - -```bash -tomlv -types some-toml-file.toml -``` - -At the moment, only one error message is reported at a time. Error messages -include line numbers. No output means that the files given are valid TOML, or -there is a bug in `tomlv`. - -Compatible with TOML version -[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md) - diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go deleted file mode 100644 index c7d689a7e9..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go +++ /dev/null @@ -1,61 +0,0 @@ -// Command tomlv validates TOML documents and prints each key's type. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "path" - "strings" - "text/tabwriter" - - "github.com/BurntSushi/toml" -) - -var ( - flagTypes = false -) - -func init() { - log.SetFlags(0) - - flag.BoolVar(&flagTypes, "types", flagTypes, - "When set, the types of every defined key will be shown.") - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s toml-file [ toml-file ... ]\n", - path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() < 1 { - flag.Usage() - } - for _, f := range flag.Args() { - var tmp interface{} - md, err := toml.DecodeFile(f, &tmp) - if err != nil { - log.Fatalf("Error in '%s': %s", f, err) - } - if flagTypes { - printTypes(md) - } - } -} - -func printTypes(md toml.MetaData) { - tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - for _, key := range md.Keys() { - fmt.Fprintf(tabw, "%s%s\t%s\n", - strings.Repeat(" ", len(key)-1), key, md.Type(key...)) - } - tabw.Flush() -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go deleted file mode 100644 index 3805931f2d..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go +++ /dev/null @@ -1,950 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "reflect" - "testing" - "time" -) - -func init() { - log.SetFlags(0) -} - -func TestDecodeSimple(t *testing.T) { - var testSimple = ` -age = 250 -andrew = "gallant" -kait = "brady" -now = 1987-07-05T05:45:00Z -yesOrNo = true -pi = 3.14 -colors = [ - ["red", "green", "blue"], - ["cyan", "magenta", "yellow", "black"], -] - -[My.Cats] -plato = "cat 1" -cauchy = "cat 2" -` - - type cats struct { - Plato string - Cauchy string - } - type simple struct { - Age int - Colors [][]string - Pi float64 - YesOrNo bool - Now time.Time - Andrew string - Kait string - My map[string]cats - } - - var val simple - _, err := Decode(testSimple, &val) - if err != nil { - t.Fatal(err) - } - - now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00") - if err != nil { - panic(err) - } - var answer = simple{ - Age: 250, - Andrew: "gallant", - Kait: "brady", - Now: now, - YesOrNo: true, - Pi: 3.14, - Colors: [][]string{ - {"red", "green", "blue"}, - {"cyan", "magenta", "yellow", "black"}, - }, - My: map[string]cats{ - "Cats": cats{Plato: "cat 1", Cauchy: "cat 2"}, - }, - } - if !reflect.DeepEqual(val, answer) { - t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n", - answer, val) - } -} - -func TestDecodeEmbedded(t *testing.T) { - type Dog struct{ Name string } - type Age int - - tests := map[string]struct { - input string - decodeInto interface{} - wantDecoded interface{} - }{ - "embedded struct": { - input: `Name = "milton"`, - decodeInto: &struct{ Dog }{}, - wantDecoded: &struct{ Dog }{Dog{"milton"}}, - }, - "embedded non-nil pointer to struct": { - input: `Name = "milton"`, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, - }, - "embedded nil pointer to struct": { - input: ``, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{nil}, - }, - "embedded int": { - input: `Age = -5`, - decodeInto: &struct{ Age }{}, - wantDecoded: &struct{ Age }{-5}, - }, - } - - for label, test := range tests { - _, err := Decode(test.input, test.decodeInto) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { - t.Errorf("%s: want decoded == %+v, got %+v", - label, test.wantDecoded, test.decodeInto) - } - } -} - -func TestTableArrays(t *testing.T) { - var tomlTableArrays = ` -[[albums]] -name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] -name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - - type Song struct { - Name string - } - - type Album struct { - Name string - Songs []Song - } - - type Music struct { - Albums []Album - } - - expected := Music{[]Album{ - {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }} - var got Music - if _, err := Decode(tomlTableArrays, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -// Case insensitive matching tests. -// A bit more comprehensive than needed given the current implementation, -// but implementations change. -// Probably still missing demonstrations of some ugly corner cases regarding -// case insensitive matching and multiple fields. -func TestCase(t *testing.T) { - var caseToml = ` -tOpString = "string" -tOpInt = 1 -tOpFloat = 1.1 -tOpBool = true -tOpdate = 2006-01-02T15:04:05Z -tOparray = [ "array" ] -Match = "i should be in Match only" -MatcH = "i should be in MatcH only" -once = "just once" -[nEst.eD] -nEstedString = "another string" -` - - type InsensitiveEd struct { - NestedString string - } - - type InsensitiveNest struct { - Ed InsensitiveEd - } - - type Insensitive struct { - TopString string - TopInt int - TopFloat float64 - TopBool bool - TopDate time.Time - TopArray []string - Match string - MatcH string - Once string - OncE string - Nest InsensitiveNest - } - - tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - panic(err) - } - expected := Insensitive{ - TopString: "string", - TopInt: 1, - TopFloat: 1.1, - TopBool: true, - TopDate: tme, - TopArray: []string{"array"}, - MatcH: "i should be in MatcH only", - Match: "i should be in Match only", - Once: "just once", - OncE: "", - Nest: InsensitiveNest{ - Ed: InsensitiveEd{NestedString: "another string"}, - }, - } - var got Insensitive - if _, err := Decode(caseToml, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestPointers(t *testing.T) { - type Object struct { - Type string - Description string - } - - type Dict struct { - NamedObject map[string]*Object - BaseObject *Object - Strptr *string - Strptrs []*string - } - s1, s2, s3 := "blah", "abc", "def" - expected := &Dict{ - Strptr: &s1, - Strptrs: []*string{&s2, &s3}, - NamedObject: map[string]*Object{ - "foo": {"FOO", "fooooo!!!"}, - "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"}, - }, - BaseObject: &Object{"BASE", "da base"}, - } - - ex1 := ` -Strptr = "blah" -Strptrs = ["abc", "def"] - -[NamedObject.foo] -Type = "FOO" -Description = "fooooo!!!" - -[NamedObject.bar] -Type = "BAR" -Description = "ba-ba-ba-ba-barrrr!!!" - -[BaseObject] -Type = "BASE" -Description = "da base" -` - dict := new(Dict) - _, err := Decode(ex1, dict) - if err != nil { - t.Errorf("Decode error: %v", err) - } - if !reflect.DeepEqual(expected, dict) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict) - } -} - -type sphere struct { - Center [3]float64 - Radius float64 -} - -func TestDecodeSimpleArray(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil { - t.Fatal(err) - } -} - -func TestDecodeArrayWrongSize(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil { - t.Fatal("Expected array type mismatch error") - } -} - -func TestDecodeLargeIntoSmallInt(t *testing.T) { - type table struct { - Value int8 - } - var tab table - if _, err := Decode(`value = 500`, &tab); err == nil { - t.Fatal("Expected integer out-of-bounds error.") - } -} - -func TestDecodeSizedInts(t *testing.T) { - type table struct { - U8 uint8 - U16 uint16 - U32 uint32 - U64 uint64 - U uint - I8 int8 - I16 int16 - I32 int32 - I64 int64 - I int - } - answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1} - toml := ` - u8 = 1 - u16 = 1 - u32 = 1 - u64 = 1 - u = 1 - i8 = -1 - i16 = -1 - i32 = -1 - i64 = -1 - i = -1 - ` - var tab table - if _, err := Decode(toml, &tab); err != nil { - t.Fatal(err.Error()) - } - if answer != tab { - t.Fatalf("Expected %#v but got %#v", answer, tab) - } -} - -func TestUnmarshaler(t *testing.T) { - - var tomlBlob = ` -[dishes.hamboogie] -name = "Hamboogie with fries" -price = 10.99 - -[[dishes.hamboogie.ingredients]] -name = "Bread Bun" - -[[dishes.hamboogie.ingredients]] -name = "Lettuce" - -[[dishes.hamboogie.ingredients]] -name = "Real Beef Patty" - -[[dishes.hamboogie.ingredients]] -name = "Tomato" - -[dishes.eggsalad] -name = "Egg Salad with rice" -price = 3.99 - -[[dishes.eggsalad.ingredients]] -name = "Egg" - -[[dishes.eggsalad.ingredients]] -name = "Mayo" - -[[dishes.eggsalad.ingredients]] -name = "Rice" -` - m := &menu{} - if _, err := Decode(tomlBlob, m); err != nil { - log.Fatal(err) - } - - if len(m.Dishes) != 2 { - t.Log("two dishes should be loaded with UnmarshalTOML()") - t.Errorf("expected %d but got %d", 2, len(m.Dishes)) - } - - eggSalad := m.Dishes["eggsalad"] - if _, ok := interface{}(eggSalad).(dish); !ok { - t.Errorf("expected a dish") - } - - if eggSalad.Name != "Egg Salad with rice" { - t.Errorf("expected the dish to be named 'Egg Salad with rice'") - } - - if len(eggSalad.Ingredients) != 3 { - t.Log("dish should be loaded with UnmarshalTOML()") - t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients)) - } - - found := false - for _, i := range eggSalad.Ingredients { - if i.Name == "Rice" { - found = true - break - } - } - if !found { - t.Error("Rice was not loaded in UnmarshalTOML()") - } - - // test on a value - must be passed as * - o := menu{} - if _, err := Decode(tomlBlob, &o); err != nil { - log.Fatal(err) - } - -} - -type menu struct { - Dishes map[string]dish -} - -func (m *menu) UnmarshalTOML(p interface{}) error { - m.Dishes = make(map[string]dish) - data, _ := p.(map[string]interface{}) - dishes := data["dishes"].(map[string]interface{}) - for n, v := range dishes { - if d, ok := v.(map[string]interface{}); ok { - nd := dish{} - nd.UnmarshalTOML(d) - m.Dishes[n] = nd - } else { - return fmt.Errorf("not a dish") - } - } - return nil -} - -type dish struct { - Name string - Price float32 - Ingredients []ingredient -} - -func (d *dish) UnmarshalTOML(p interface{}) error { - data, _ := p.(map[string]interface{}) - d.Name, _ = data["name"].(string) - d.Price, _ = data["price"].(float32) - ingredients, _ := data["ingredients"].([]map[string]interface{}) - for _, e := range ingredients { - n, _ := interface{}(e).(map[string]interface{}) - name, _ := n["name"].(string) - i := ingredient{name} - d.Ingredients = append(d.Ingredients, i) - } - return nil -} - -type ingredient struct { - Name string -} - -func ExampleMetaData_PrimitiveDecode() { - var md MetaData - var err error - - var tomlBlob = ` -ranking = ["Springsteen", "J Geils"] - -[bands.Springsteen] -started = 1973 -albums = ["Greetings", "WIESS", "Born to Run", "Darkness"] - -[bands."J Geils"] -started = 1970 -albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"] -` - - type band struct { - Started int - Albums []string - } - type classics struct { - Ranking []string - Bands map[string]Primitive - } - - // Do the initial decode. Reflection is delayed on Primitive values. - var music classics - if md, err = Decode(tomlBlob, &music); err != nil { - log.Fatal(err) - } - - // MetaData still includes information on Primitive values. - fmt.Printf("Is `bands.Springsteen` defined? %v\n", - md.IsDefined("bands", "Springsteen")) - - // Decode primitive data into Go values. - for _, artist := range music.Ranking { - // A band is a primitive value, so we need to decode it to get a - // real `band` value. - primValue := music.Bands[artist] - - var aBand band - if err = md.PrimitiveDecode(primValue, &aBand); err != nil { - log.Fatal(err) - } - fmt.Printf("%s started in %d.\n", artist, aBand.Started) - } - // Check to see if there were any fields left undecoded. - // Note that this won't be empty before decoding the Primitive value! - fmt.Printf("Undecoded: %q\n", md.Undecoded()) - - // Output: - // Is `bands.Springsteen` defined? true - // Springsteen started in 1973. - // J Geils started in 1970. - // Undecoded: [] -} - -func ExampleDecode() { - var tomlBlob = ` -# Some comments. -[alpha] -ip = "10.0.0.1" - - [alpha.config] - Ports = [ 8001, 8002 ] - Location = "Toronto" - Created = 1987-07-05T05:45:00Z - -[beta] -ip = "10.0.0.2" - - [beta.config] - Ports = [ 9001, 9002 ] - Location = "New Jersey" - Created = 1887-01-05T05:55:00Z -` - - type serverConfig struct { - Ports []int - Location string - Created time.Time - } - - type server struct { - IP string `toml:"ip"` - Config serverConfig `toml:"config"` - } - - type servers map[string]server - - var config servers - if _, err := Decode(tomlBlob, &config); err != nil { - log.Fatal(err) - } - - for _, name := range []string{"alpha", "beta"} { - s := config[name] - fmt.Printf("Server: %s (ip: %s) in %s created on %s\n", - name, s.IP, s.Config.Location, - s.Config.Created.Format("2006-01-02")) - fmt.Printf("Ports: %v\n", s.Config.Ports) - } - - // Output: - // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05 - // Ports: [8001 8002] - // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05 - // Ports: [9001 9002] -} - -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} - -// Example Unmarshaler shows how to decode TOML strings into your own -// custom data type. -func Example_unmarshaler() { - blob := ` -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -` - type song struct { - Name string - Duration duration - } - type songs struct { - Song []song - } - var favorites songs - if _, err := Decode(blob, &favorites); err != nil { - log.Fatal(err) - } - - // Code to implement the TextUnmarshaler interface for `duration`: - // - // type duration struct { - // time.Duration - // } - // - // func (d *duration) UnmarshalText(text []byte) error { - // var err error - // d.Duration, err = time.ParseDuration(string(text)) - // return err - // } - - for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) - } - // Output: - // Thunder Road (4m49s) - // Stairway to Heaven (8m3s) -} - -// Example StrictDecoding shows how to detect whether there are keys in the -// TOML document that weren't decoded into the value given. This is useful -// for returning an error to the user if they've included extraneous fields -// in their configuration. -func Example_strictDecoding() { - var blob = ` -key1 = "value1" -key2 = "value2" -key3 = "value3" -` - type config struct { - Key1 string - Key3 string - } - - var conf config - md, err := Decode(blob, &conf) - if err != nil { - log.Fatal(err) - } - fmt.Printf("Undecoded keys: %q\n", md.Undecoded()) - // Output: - // Undecoded keys: ["key2"] -} - -// Example UnmarshalTOML shows how to implement a struct type that knows how to -// unmarshal itself. The struct must take full responsibility for mapping the -// values passed into the struct. The method may be used with interfaces in a -// struct in cases where the actual type is not known until the data is -// examined. -func Example_unmarshalTOML() { - - var blob = ` -[[parts]] -type = "valve" -id = "valve-1" -size = 1.2 -rating = 4 - -[[parts]] -type = "valve" -id = "valve-2" -size = 2.1 -rating = 5 - -[[parts]] -type = "pipe" -id = "pipe-1" -length = 2.1 -diameter = 12 - -[[parts]] -type = "cable" -id = "cable-1" -length = 12 -rating = 3.1 -` - o := &order{} - err := Unmarshal([]byte(blob), o) - if err != nil { - log.Fatal(err) - } - - fmt.Println(len(o.parts)) - - for _, part := range o.parts { - fmt.Println(part.Name()) - } - - // Code to implement UmarshalJSON. - - // type order struct { - // // NOTE `order.parts` is a private slice of type `part` which is an - // // interface and may only be loaded from toml using the - // // UnmarshalTOML() method of the Umarshaler interface. - // parts parts - // } - - // func (o *order) UnmarshalTOML(data interface{}) error { - - // // NOTE the example below contains detailed type casting to show how - // // the 'data' is retrieved. In operational use, a type cast wrapper - // // may be prefered e.g. - // // - // // func AsMap(v interface{}) (map[string]interface{}, error) { - // // return v.(map[string]interface{}) - // // } - // // - // // resulting in: - // // d, _ := AsMap(data) - // // - - // d, _ := data.(map[string]interface{}) - // parts, _ := d["parts"].([]map[string]interface{}) - - // for _, p := range parts { - - // typ, _ := p["type"].(string) - // id, _ := p["id"].(string) - - // // detect the type of part and handle each case - // switch p["type"] { - // case "valve": - - // size := float32(p["size"].(float64)) - // rating := int(p["rating"].(int64)) - - // valve := &valve{ - // Type: typ, - // ID: id, - // Size: size, - // Rating: rating, - // } - - // o.parts = append(o.parts, valve) - - // case "pipe": - - // length := float32(p["length"].(float64)) - // diameter := int(p["diameter"].(int64)) - - // pipe := &pipe{ - // Type: typ, - // ID: id, - // Length: length, - // Diameter: diameter, - // } - - // o.parts = append(o.parts, pipe) - - // case "cable": - - // length := int(p["length"].(int64)) - // rating := float32(p["rating"].(float64)) - - // cable := &cable{ - // Type: typ, - // ID: id, - // Length: length, - // Rating: rating, - // } - - // o.parts = append(o.parts, cable) - - // } - // } - - // return nil - // } - - // type parts []part - - // type part interface { - // Name() string - // } - - // type valve struct { - // Type string - // ID string - // Size float32 - // Rating int - // } - - // func (v *valve) Name() string { - // return fmt.Sprintf("VALVE: %s", v.ID) - // } - - // type pipe struct { - // Type string - // ID string - // Length float32 - // Diameter int - // } - - // func (p *pipe) Name() string { - // return fmt.Sprintf("PIPE: %s", p.ID) - // } - - // type cable struct { - // Type string - // ID string - // Length int - // Rating float32 - // } - - // func (c *cable) Name() string { - // return fmt.Sprintf("CABLE: %s", c.ID) - // } - - // Output: - // 4 - // VALVE: valve-1 - // VALVE: valve-2 - // PIPE: pipe-1 - // CABLE: cable-1 - -} - -type order struct { - // NOTE `order.parts` is a private slice of type `part` which is an - // interface and may only be loaded from toml using the UnmarshalTOML() - // method of the Umarshaler interface. - parts parts -} - -func (o *order) UnmarshalTOML(data interface{}) error { - - // NOTE the example below contains detailed type casting to show how - // the 'data' is retrieved. In operational use, a type cast wrapper - // may be prefered e.g. - // - // func AsMap(v interface{}) (map[string]interface{}, error) { - // return v.(map[string]interface{}) - // } - // - // resulting in: - // d, _ := AsMap(data) - // - - d, _ := data.(map[string]interface{}) - parts, _ := d["parts"].([]map[string]interface{}) - - for _, p := range parts { - - typ, _ := p["type"].(string) - id, _ := p["id"].(string) - - // detect the type of part and handle each case - switch p["type"] { - case "valve": - - size := float32(p["size"].(float64)) - rating := int(p["rating"].(int64)) - - valve := &valve{ - Type: typ, - ID: id, - Size: size, - Rating: rating, - } - - o.parts = append(o.parts, valve) - - case "pipe": - - length := float32(p["length"].(float64)) - diameter := int(p["diameter"].(int64)) - - pipe := &pipe{ - Type: typ, - ID: id, - Length: length, - Diameter: diameter, - } - - o.parts = append(o.parts, pipe) - - case "cable": - - length := int(p["length"].(int64)) - rating := float32(p["rating"].(float64)) - - cable := &cable{ - Type: typ, - ID: id, - Length: length, - Rating: rating, - } - - o.parts = append(o.parts, cable) - - } - } - - return nil -} - -type parts []part - -type part interface { - Name() string -} - -type valve struct { - Type string - ID string - Size float32 - Rating int -} - -func (v *valve) Name() string { - return fmt.Sprintf("VALVE: %s", v.ID) -} - -type pipe struct { - Type string - ID string - Length float32 - Diameter int -} - -func (p *pipe) Name() string { - return fmt.Sprintf("PIPE: %s", p.ID) -} - -type cable struct { - Type string - ID string - Length int - Rating float32 -} - -func (c *cable) Name() string { - return fmt.Sprintf("CABLE: %s", c.ID) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go b/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go deleted file mode 100644 index 74a5ee5d23..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go +++ /dev/null @@ -1,506 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "log" - "net" - "testing" - "time" -) - -func TestEncodeRoundTrip(t *testing.T) { - type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time - Ipaddress net.IP - } - - var inputs = Config{ - 13, - []string{"one", "two", "three"}, - 3.145, - []int{11, 2, 3, 4}, - time.Now(), - net.ParseIP("192.168.59.254"), - } - - var firstBuffer bytes.Buffer - e := NewEncoder(&firstBuffer) - err := e.Encode(inputs) - if err != nil { - t.Fatal(err) - } - var outputs Config - if _, err := Decode(firstBuffer.String(), &outputs); err != nil { - log.Printf("Could not decode:\n-----\n%s\n-----\n", - firstBuffer.String()) - t.Fatal(err) - } - - // could test each value individually, but I'm lazy - var secondBuffer bytes.Buffer - e2 := NewEncoder(&secondBuffer) - err = e2.Encode(outputs) - if err != nil { - t.Fatal(err) - } - if firstBuffer.String() != secondBuffer.String() { - t.Error( - firstBuffer.String(), - "\n\n is not identical to\n\n", - secondBuffer.String()) - } -} - -// XXX(burntsushi) -// I think these tests probably should be removed. They are good, but they -// ought to be obsolete by toml-test. -func TestEncode(t *testing.T) { - type Embedded struct { - Int int `toml:"_int"` - } - type NonStruct int - - date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600)) - dateStr := "2014-05-11T19:30:40Z" - - tests := map[string]struct { - input interface{} - wantOutput string - wantError error - }{ - "bool field": { - input: struct { - BoolTrue bool - BoolFalse bool - }{true, false}, - wantOutput: "BoolTrue = true\nBoolFalse = false\n", - }, - "int fields": { - input: struct { - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - }{1, 2, 3, 4, 5}, - wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n", - }, - "uint fields": { - input: struct { - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - }{1, 2, 3, 4, 5}, - wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" + - "\nUint64 = 5\n", - }, - "float fields": { - input: struct { - Float32 float32 - Float64 float64 - }{1.5, 2.5}, - wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n", - }, - "string field": { - input: struct{ String string }{"foo"}, - wantOutput: "String = \"foo\"\n", - }, - "string field and unexported field": { - input: struct { - String string - unexported int - }{"foo", 0}, - wantOutput: "String = \"foo\"\n", - }, - "datetime field in UTC": { - input: struct{ Date time.Time }{date}, - wantOutput: fmt.Sprintf("Date = %s\n", dateStr), - }, - "datetime field as primitive": { - // Using a map here to fail if isStructOrMap() returns true for - // time.Time. - input: map[string]interface{}{ - "Date": date, - "Int": 1, - }, - wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr), - }, - "array fields": { - input: struct { - IntArray0 [0]int - IntArray3 [3]int - }{[0]int{}, [3]int{1, 2, 3}}, - wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n", - }, - "slice fields": { - input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{ - nil, []int{}, []int{1, 2, 3}, - }, - wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n", - }, - "datetime slices": { - input: struct{ DatetimeSlice []time.Time }{ - []time.Time{date, date}, - }, - wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n", - dateStr, dateStr), - }, - "nested arrays and slices": { - input: struct { - SliceOfArrays [][2]int - ArrayOfSlices [2][]int - SliceOfArraysOfSlices [][2][]int - ArrayOfSlicesOfArrays [2][][2]int - SliceOfMixedArrays [][2]interface{} - ArrayOfMixedSlices [2][]interface{} - }{ - [][2]int{{1, 2}, {3, 4}}, - [2][]int{{1, 2}, {3, 4}}, - [][2][]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [2][][2]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [][2]interface{}{ - {1, 2}, {"a", "b"}, - }, - [2][]interface{}{ - {1, 2}, {"a", "b"}, - }, - }, - wantOutput: `SliceOfArrays = [[1, 2], [3, 4]] -ArrayOfSlices = [[1, 2], [3, 4]] -SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -SliceOfMixedArrays = [[1, 2], ["a", "b"]] -ArrayOfMixedSlices = [[1, 2], ["a", "b"]] -`, - }, - "empty slice": { - input: struct{ Empty []interface{} }{[]interface{}{}}, - wantOutput: "Empty = []\n", - }, - "(error) slice with element type mismatch (string and integer)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}}, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with element type mismatch (integer and float)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}}, - wantError: errArrayMixedElementTypes, - }, - "slice with elems of differing Go types, same TOML types": { - input: struct { - MixedInts []interface{} - MixedFloats []interface{} - }{ - []interface{}{ - int(1), int8(2), int16(3), int32(4), int64(5), - uint(1), uint8(2), uint16(3), uint32(4), uint64(5), - }, - []interface{}{float32(1.5), float64(2.5)}, - }, - wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" + - "MixedFloats = [1.5, 2.5]\n", - }, - "(error) slice w/ element type mismatch (one is nested array)": { - input: struct{ Mixed []interface{} }{ - []interface{}{1, []interface{}{2}}, - }, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with 1 nil element": { - input: struct{ NilElement1 []interface{} }{[]interface{}{nil}}, - wantError: errArrayNilElement, - }, - "(error) slice with 1 nil element (and other non-nil elements)": { - input: struct{ NilElement []interface{} }{ - []interface{}{1, nil}, - }, - wantError: errArrayNilElement, - }, - "simple map": { - input: map[string]int{"a": 1, "b": 2}, - wantOutput: "a = 1\nb = 2\n", - }, - "map with interface{} value type": { - input: map[string]interface{}{"a": 1, "b": "c"}, - wantOutput: "a = 1\nb = \"c\"\n", - }, - "map with interface{} value type, some of which are structs": { - input: map[string]interface{}{ - "a": struct{ Int int }{2}, - "b": 1, - }, - wantOutput: "b = 1\n\n[a]\n Int = 2\n", - }, - "nested map": { - input: map[string]map[string]int{ - "a": {"b": 1}, - "c": {"d": 2}, - }, - wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n", - }, - "nested struct": { - input: struct{ Struct struct{ Int int } }{ - struct{ Int int }{1}, - }, - wantOutput: "[Struct]\n Int = 1\n", - }, - "nested struct and non-struct field": { - input: struct { - Struct struct{ Int int } - Bool bool - }{struct{ Int int }{1}, true}, - wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n", - }, - "2 nested structs": { - input: struct{ Struct1, Struct2 struct{ Int int } }{ - struct{ Int int }{1}, struct{ Int int }{2}, - }, - wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n", - }, - "deeply nested structs": { - input: struct { - Struct1, Struct2 struct{ Struct3 *struct{ Int int } } - }{ - struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}}, - struct{ Struct3 *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" + - "\n\n[Struct2]\n", - }, - "nested struct with nil struct elem": { - input: struct { - Struct struct{ Inner *struct{ Int int } } - }{ - struct{ Inner *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct]\n", - }, - "nested struct with no fields": { - input: struct { - Struct struct{ Inner struct{} } - }{ - struct{ Inner struct{} }{struct{}{}}, - }, - wantOutput: "[Struct]\n [Struct.Inner]\n", - }, - "struct with tags": { - input: struct { - Struct struct { - Int int `toml:"_int"` - } `toml:"_struct"` - Bool bool `toml:"_bool"` - }{ - struct { - Int int `toml:"_int"` - }{1}, true, - }, - wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n", - }, - "embedded struct": { - input: struct{ Embedded }{Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "embedded *struct": { - input: struct{ *Embedded }{&Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "nested embedded struct": { - input: struct { - Struct struct{ Embedded } `toml:"_struct"` - }{struct{ Embedded }{Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "nested embedded *struct": { - input: struct { - Struct struct{ *Embedded } `toml:"_struct"` - }{struct{ *Embedded }{&Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "array of tables": { - input: struct { - Structs []*struct{ Int int } `toml:"struct"` - }{ - []*struct{ Int int }{{1}, {3}}, - }, - wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n", - }, - "array of tables order": { - input: map[string]interface{}{ - "map": map[string]interface{}{ - "zero": 5, - "arr": []map[string]int{ - map[string]int{ - "friend": 5, - }, - }, - }, - }, - wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n", - }, - "(error) top-level slice": { - input: []struct{ Int int }{{1}, {2}, {3}}, - wantError: errNoKey, - }, - "(error) slice of slice": { - input: struct { - Slices [][]struct{ Int int } - }{ - [][]struct{ Int int }{{{1}}, {{2}}, {{3}}}, - }, - wantError: errArrayNoTable, - }, - "(error) map no string key": { - input: map[int]string{1: ""}, - wantError: errNonString, - }, - "(error) anonymous non-struct": { - input: struct{ NonStruct }{5}, - wantError: errAnonNonStruct, - }, - "(error) empty key name": { - input: map[string]int{"": 1}, - wantError: errAnything, - }, - "(error) empty map name": { - input: map[string]interface{}{ - "": map[string]int{"v": 1}, - }, - wantError: errAnything, - }, - } - for label, test := range tests { - encodeExpected(t, label, test.input, test.wantOutput, test.wantError) - } -} - -func TestEncodeNestedTableArrays(t *testing.T) { - type song struct { - Name string `toml:"name"` - } - type album struct { - Name string `toml:"name"` - Songs []song `toml:"songs"` - } - type springsteen struct { - Albums []album `toml:"albums"` - } - value := springsteen{ - []album{ - {"Born to Run", - []song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", - []song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }, - } - expected := `[[albums]] - name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] - name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - encodeExpected(t, "nested table arrays", value, expected, nil) -} - -func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) { - type Alpha struct { - V int - } - type Beta struct { - V int - } - type Conf struct { - V int - A Alpha - B []Beta - } - - val := Conf{ - V: 1, - A: Alpha{2}, - B: []Beta{{3}}, - } - expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n" - encodeExpected(t, "array hash with normal hash order", val, expected, nil) -} - -func encodeExpected( - t *testing.T, label string, val interface{}, wantStr string, wantErr error, -) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - err := enc.Encode(val) - if err != wantErr { - if wantErr != nil { - if wantErr == errAnything && err != nil { - return - } - t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err) - } else { - t.Errorf("%s: Encode failed: %s", label, err) - } - } - if err != nil { - return - } - if got := buf.String(); wantStr != got { - t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n", - label, wantStr, got) - } -} - -func ExampleEncoder_Encode() { - date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC") - var config = map[string]interface{}{ - "date": date, - "counts": []int{1, 1, 2, 3, 5, 8}, - "hash": map[string]string{ - "key1": "val1", - "key2": "val2", - }, - } - buf := new(bytes.Buffer) - if err := NewEncoder(buf).Encode(config); err != nil { - log.Fatal(err) - } - fmt.Println(buf.String()) - - // Output: - // counts = [1, 1, 2, 3, 5, 8] - // date = 2010-03-14T18:00:00Z - // - // [hash] - // key1 = "val1" - // key2 = "val2" -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/backup_test.go b/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/backup_test.go deleted file mode 100644 index cc5a0c5ff0..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/backup_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package winio - -import ( - "io" - "io/ioutil" - "os" - "syscall" - "testing" -) - -var testFileName string - -func TestMain(m *testing.M) { - f, err := ioutil.TempFile("", "tmp") - if err != nil { - panic(err) - } - testFileName = f.Name() - f.Close() - defer os.Remove(testFileName) - os.Exit(m.Run()) -} - -func makeTestFile(makeADS bool) error { - os.Remove(testFileName) - f, err := os.Create(testFileName) - if err != nil { - return err - } - defer f.Close() - _, err = f.Write([]byte("testing 1 2 3\n")) - if err != nil { - return err - } - if makeADS { - a, err := os.Create(testFileName + ":ads.txt") - if err != nil { - return err - } - defer a.Close() - _, err = a.Write([]byte("alternate data stream\n")) - if err != nil { - return err - } - } - return nil -} - -func TestBackupRead(t *testing.T) { - err := makeTestFile(true) - if err != nil { - t.Fatal(err) - } - - f, err := os.Open(testFileName) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := NewBackupFileReader(f, false) - defer r.Close() - b, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - if len(b) == 0 { - t.Fatal("no data") - } -} - -func TestBackupStreamRead(t *testing.T) { - err := makeTestFile(true) - if err != nil { - t.Fatal(err) - } - - f, err := os.Open(testFileName) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := NewBackupFileReader(f, false) - defer r.Close() - - br := NewBackupStreamReader(r) - gotData := false - gotAltData := false - for { - hdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - - switch hdr.Id { - case BackupData: - if gotData { - t.Fatal("duplicate data") - } - if hdr.Name != "" { - t.Fatalf("unexpected name %s", hdr.Name) - } - b, err := ioutil.ReadAll(br) - if err != nil { - t.Fatal(err) - } - if string(b) != "testing 1 2 3\n" { - t.Fatalf("incorrect data %v", b) - } - gotData = true - case BackupAlternateData: - if gotAltData { - t.Fatal("duplicate alt data") - } - if hdr.Name != ":ads.txt:$DATA" { - t.Fatalf("incorrect name %s", hdr.Name) - } - b, err := ioutil.ReadAll(br) - if err != nil { - t.Fatal(err) - } - if string(b) != "alternate data stream\n" { - t.Fatalf("incorrect data %v", b) - } - gotAltData = true - default: - t.Fatalf("unknown stream ID %d", hdr.Id) - } - } - if !gotData || !gotAltData { - t.Fatal("missing stream") - } -} - -func TestBackupStreamWrite(t *testing.T) { - f, err := os.Create(testFileName) - if err != nil { - t.Fatal(err) - } - defer f.Close() - w := NewBackupFileWriter(f, false) - defer w.Close() - - data := "testing 1 2 3\n" - altData := "alternate stream\n" - - br := NewBackupStreamWriter(w) - err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))}) - if err != nil { - t.Fatal(err) - } - n, err := br.Write([]byte(data)) - if err != nil { - t.Fatal(err) - } - if n != len(data) { - t.Fatal("short write") - } - - err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"}) - if err != nil { - t.Fatal(err) - } - n, err = br.Write([]byte(altData)) - if err != nil { - t.Fatal(err) - } - if n != len(altData) { - t.Fatal("short write") - } - - f.Close() - - b, err := ioutil.ReadFile(testFileName) - if err != nil { - t.Fatal(err) - } - if string(b) != data { - t.Fatalf("wrong data %v", b) - } - - b, err = ioutil.ReadFile(testFileName + ":ads.txt") - if err != nil { - t.Fatal(err) - } - if string(b) != altData { - t.Fatalf("wrong data %v", b) - } -} - -func makeSparseFile() error { - os.Remove(testFileName) - f, err := os.Create(testFileName) - if err != nil { - return err - } - defer f.Close() - - const ( - FSCTL_SET_SPARSE = 0x000900c4 - FSCTL_SET_ZERO_DATA = 0x000980c8 - ) - - err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil) - if err != nil { - return err - } - - _, err = f.Write([]byte("testing 1 2 3\n")) - if err != nil { - return err - } - - _, err = f.Seek(1000000, 0) - if err != nil { - return err - } - - _, err = f.Write([]byte("more data later\n")) - if err != nil { - return err - } - - return nil -} - -func TestBackupSparseFile(t *testing.T) { - err := makeSparseFile() - if err != nil { - t.Fatal(err) - } - - f, err := os.Open(testFileName) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := NewBackupFileReader(f, false) - defer r.Close() - - br := NewBackupStreamReader(r) - for { - hdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - - t.Log(hdr) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/pipe_test.go b/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/pipe_test.go deleted file mode 100644 index 49dad98b32..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/pipe_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package winio - -import ( - "bufio" - "io" - "net" - "os" - "syscall" - "testing" - "time" -) - -var testPipeName = `\\.\pipe\winiotestpipe` - -func TestDialUnknownFailsImmediately(t *testing.T) { - _, err := DialPipe(testPipeName, nil) - if err.(*os.PathError).Err != syscall.ENOENT { - t.Fatalf("expected ENOENT got %v", err) - } -} - -func TestDialListenerTimesOut(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - var d = time.Duration(10 * time.Millisecond) - _, err = DialPipe(testPipeName, &d) - if err != ErrTimeout { - t.Fatalf("expected ErrTimeout, got %v", err) - } -} - -func TestDialAccessDeniedWithRestrictedSD(t *testing.T) { - c := PipeConfig{ - SecurityDescriptor: "D:P(A;;0x1200FF;;;WD)", - } - l, err := ListenPipe(testPipeName, &c) - if err != nil { - t.Fatal(err) - } - defer l.Close() - _, err = DialPipe(testPipeName, nil) - if err.(*os.PathError).Err != syscall.ERROR_ACCESS_DENIED { - t.Fatalf("expected ERROR_ACCESS_DENIED, got %v", err) - } -} - -func getConnection(cfg *PipeConfig) (client net.Conn, server net.Conn, err error) { - l, err := ListenPipe(testPipeName, cfg) - if err != nil { - return - } - defer l.Close() - - type response struct { - c net.Conn - err error - } - ch := make(chan response) - go func() { - c, err := l.Accept() - ch <- response{c, err} - }() - - c, err := DialPipe(testPipeName, nil) - if err != nil { - return - } - - r := <-ch - if err = r.err; err != nil { - c.Close() - return - } - - client = c - server = r.c - return -} - -func TestReadTimeout(t *testing.T) { - c, s, err := getConnection(nil) - if err != nil { - t.Fatal(err) - } - defer c.Close() - defer s.Close() - - c.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - - buf := make([]byte, 10) - _, err = c.Read(buf) - if err != ErrTimeout { - t.Fatalf("expected ErrTimeout, got %v", err) - } -} - -func server(l net.Listener, ch chan int) { - c, err := l.Accept() - if err != nil { - panic(err) - } - rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) - s, err := rw.ReadString('\n') - if err != nil { - panic(err) - } - _, err = rw.WriteString("got " + s) - if err != nil { - panic(err) - } - err = rw.Flush() - if err != nil { - panic(err) - } - c.Close() - ch <- 1 -} - -func TestFullListenDialReadWrite(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - - ch := make(chan int) - go server(l, ch) - - c, err := DialPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) - _, err = rw.WriteString("hello world\n") - if err != nil { - t.Fatal(err) - } - err = rw.Flush() - if err != nil { - t.Fatal(err) - } - - s, err := rw.ReadString('\n') - if err != nil { - t.Fatal(err) - } - ms := "got hello world\n" - if s != ms { - t.Errorf("expected '%s', got '%s'", ms, s) - } - - <-ch -} - -func TestCloseAbortsListen(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - - ch := make(chan error) - go func() { - _, err := l.Accept() - ch <- err - }() - - time.Sleep(30 * time.Millisecond) - l.Close() - - err = <-ch - if err != ErrPipeListenerClosed { - t.Fatalf("expected ErrPipeListenerClosed, got %v", err) - } -} - -func ensureEOFOnClose(t *testing.T, r io.Reader, w io.Closer) { - b := make([]byte, 10) - w.Close() - n, err := r.Read(b) - if n > 0 { - t.Errorf("unexpected byte count %d", n) - } - if err != io.EOF { - t.Errorf("expected EOF: %v", err) - } -} - -func TestCloseClientEOFServer(t *testing.T) { - c, s, err := getConnection(nil) - if err != nil { - t.Fatal(err) - } - defer c.Close() - defer s.Close() - ensureEOFOnClose(t, c, s) -} - -func TestCloseServerEOFClient(t *testing.T) { - c, s, err := getConnection(nil) - if err != nil { - t.Fatal(err) - } - defer c.Close() - defer s.Close() - ensureEOFOnClose(t, s, c) -} - -func TestCloseWriteEOF(t *testing.T) { - cfg := &PipeConfig{ - MessageMode: true, - } - c, s, err := getConnection(cfg) - if err != nil { - t.Fatal(err) - } - defer c.Close() - defer s.Close() - - type closeWriter interface { - CloseWrite() error - } - - err = c.(closeWriter).CloseWrite() - if err != nil { - t.Fatal(err) - } - - b := make([]byte, 10) - _, err = s.Read(b) - if err != io.EOF { - t.Fatal(err) - } -} - -func TestAcceptAfterCloseFails(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - l.Close() - _, err = l.Accept() - if err != ErrPipeListenerClosed { - t.Fatalf("expected ErrPipeListenerClosed, got %v", err) - } -} - -func TestDialTimesOutByDefault(t *testing.T) { - l, err := ListenPipe(testPipeName, nil) - if err != nil { - t.Fatal(err) - } - defer l.Close() - _, err = DialPipe(testPipeName, nil) - if err != ErrTimeout { - t.Fatalf("expected ErrTimeout, got %v", err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/privileges_test.go b/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/privileges_test.go deleted file mode 100644 index 5e94c48c23..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/privileges_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package winio - -import "testing" - -func TestRunWithUnavailablePrivilege(t *testing.T) { - err := RunWithPrivilege("SeCreateTokenPrivilege", func() error { return nil }) - if _, ok := err.(*PrivilegeError); err == nil || !ok { - t.Fatal("expected PrivilegeError") - } -} - -func TestRunWithPrivileges(t *testing.T) { - err := RunWithPrivilege("SeShutdownPrivilege", func() error { return nil }) - if err != nil { - t.Fatal(err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/sd_test.go b/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/sd_test.go deleted file mode 100644 index 847db3c162..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/Microsoft/go-winio/sd_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package winio - -import "testing" - -func TestLookupInvalidSid(t *testing.T) { - _, err := LookupSidByName(".\\weoifjdsklfj") - aerr, ok := err.(*AccountLookupError) - if !ok || aerr.Err != cERROR_NONE_MAPPED { - t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) - } -} - -func TestLookupValidSid(t *testing.T) { - sid, err := LookupSidByName("Everyone") - if err != nil || sid != "S-1-1-0" { - t.Fatal("expected S-1-1-0, got %s, %s", sid, err) - } -} - -func TestLookupEmptyNameFails(t *testing.T) { - _, err := LookupSidByName(".\\weoifjdsklfj") - aerr, ok := err.(*AccountLookupError) - if !ok || aerr.Err != cERROR_NONE_MAPPED { - t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go b/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go deleted file mode 100644 index 9bbca5f254..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package metrics - -import ( - "bytes" - "os" - "strings" - "syscall" - "testing" - "time" -) - -func TestInmemSignal(t *testing.T) { - buf := bytes.NewBuffer(nil) - inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond) - sig := NewInmemSignal(inm, syscall.SIGUSR1, buf) - defer sig.Stop() - - inm.SetGauge([]string{"foo"}, 42) - inm.EmitKey([]string{"bar"}, 42) - inm.IncrCounter([]string{"baz"}, 42) - inm.AddSample([]string{"wow"}, 42) - - // Wait for period to end - time.Sleep(15 * time.Millisecond) - - // Send signal! - syscall.Kill(os.Getpid(), syscall.SIGUSR1) - - // Wait for flush - time.Sleep(10 * time.Millisecond) - - // Check the output - out := string(buf.Bytes()) - if !strings.Contains(out, "[G] 'foo': 42") { - t.Fatalf("bad: %v", out) - } - if !strings.Contains(out, "[P] 'bar': 42") { - t.Fatalf("bad: %v", out) - } - if !strings.Contains(out, "[C] 'baz': Count: 1 Sum: 42") { - t.Fatalf("bad: %v", out) - } - if !strings.Contains(out, "[S] 'wow': Count: 1 Sum: 42") { - t.Fatalf("bad: %v", out) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go b/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go deleted file mode 100644 index 14ba31b382..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package metrics - -import ( - "math" - "testing" - "time" -) - -func TestInmemSink(t *testing.T) { - inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond) - - data := inm.Data() - if len(data) != 1 { - t.Fatalf("bad: %v", data) - } - - // Add data points - inm.SetGauge([]string{"foo", "bar"}, 42) - inm.EmitKey([]string{"foo", "bar"}, 42) - inm.IncrCounter([]string{"foo", "bar"}, 20) - inm.IncrCounter([]string{"foo", "bar"}, 22) - inm.AddSample([]string{"foo", "bar"}, 20) - inm.AddSample([]string{"foo", "bar"}, 22) - - data = inm.Data() - if len(data) != 1 { - t.Fatalf("bad: %v", data) - } - - intvM := data[0] - intvM.RLock() - - if time.Now().Sub(intvM.Interval) > 10*time.Millisecond { - t.Fatalf("interval too old") - } - if intvM.Gauges["foo.bar"] != 42 { - t.Fatalf("bad val: %v", intvM.Gauges) - } - if intvM.Points["foo.bar"][0] != 42 { - t.Fatalf("bad val: %v", intvM.Points) - } - - agg := intvM.Counters["foo.bar"] - if agg.Count != 2 { - t.Fatalf("bad val: %v", agg) - } - if agg.Sum != 42 { - t.Fatalf("bad val: %v", agg) - } - if agg.SumSq != 884 { - t.Fatalf("bad val: %v", agg) - } - if agg.Min != 20 { - t.Fatalf("bad val: %v", agg) - } - if agg.Max != 22 { - t.Fatalf("bad val: %v", agg) - } - if agg.Mean() != 21 { - t.Fatalf("bad val: %v", agg) - } - if agg.Stddev() != math.Sqrt(2) { - t.Fatalf("bad val: %v", agg) - } - - if agg = intvM.Samples["foo.bar"]; agg == nil { - t.Fatalf("missing sample") - } - - intvM.RUnlock() - - for i := 1; i < 10; i++ { - time.Sleep(10 * time.Millisecond) - inm.SetGauge([]string{"foo", "bar"}, 42) - data = inm.Data() - if len(data) != min(i+1, 5) { - t.Fatalf("bad: %v", data) - } - } - - // Should not exceed 5 intervals! - time.Sleep(10 * time.Millisecond) - inm.SetGauge([]string{"foo", "bar"}, 42) - data = inm.Data() - if len(data) != 5 { - t.Fatalf("bad: %v", data) - } -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go b/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go deleted file mode 100644 index 9d7558ea9b..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package metrics - -import ( - "reflect" - "runtime" - "testing" - "time" -) - -func mockMetric() (*MockSink, *Metrics) { - m := &MockSink{} - met := &Metrics{sink: m} - return m, met -} - -func TestMetrics_SetGauge(t *testing.T) { - m, met := mockMetric() - met.SetGauge([]string{"key"}, float32(1)) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.HostName = "test" - met.EnableHostname = true - met.SetGauge([]string{"key"}, float32(1)) - if m.keys[0][0] != "test" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.EnableTypePrefix = true - met.SetGauge([]string{"key"}, float32(1)) - if m.keys[0][0] != "gauge" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.ServiceName = "service" - met.SetGauge([]string{"key"}, float32(1)) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } -} - -func TestMetrics_EmitKey(t *testing.T) { - m, met := mockMetric() - met.EmitKey([]string{"key"}, float32(1)) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.EnableTypePrefix = true - met.EmitKey([]string{"key"}, float32(1)) - if m.keys[0][0] != "kv" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.ServiceName = "service" - met.EmitKey([]string{"key"}, float32(1)) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } -} - -func TestMetrics_IncrCounter(t *testing.T) { - m, met := mockMetric() - met.IncrCounter([]string{"key"}, float32(1)) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.EnableTypePrefix = true - met.IncrCounter([]string{"key"}, float32(1)) - if m.keys[0][0] != "counter" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.ServiceName = "service" - met.IncrCounter([]string{"key"}, float32(1)) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } -} - -func TestMetrics_AddSample(t *testing.T) { - m, met := mockMetric() - met.AddSample([]string{"key"}, float32(1)) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.EnableTypePrefix = true - met.AddSample([]string{"key"}, float32(1)) - if m.keys[0][0] != "sample" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.ServiceName = "service" - met.AddSample([]string{"key"}, float32(1)) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } -} - -func TestMetrics_MeasureSince(t *testing.T) { - m, met := mockMetric() - met.TimerGranularity = time.Millisecond - n := time.Now() - met.MeasureSince([]string{"key"}, n) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] > 0.1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.TimerGranularity = time.Millisecond - met.EnableTypePrefix = true - met.MeasureSince([]string{"key"}, n) - if m.keys[0][0] != "timer" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] > 0.1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.TimerGranularity = time.Millisecond - met.ServiceName = "service" - met.MeasureSince([]string{"key"}, n) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] > 0.1 { - t.Fatalf("") - } -} - -func TestMetrics_EmitRuntimeStats(t *testing.T) { - runtime.GC() - m, met := mockMetric() - met.emitRuntimeStats() - - if m.keys[0][0] != "runtime" || m.keys[0][1] != "num_goroutines" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[0] <= 1 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[1][0] != "runtime" || m.keys[1][1] != "alloc_bytes" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[1] <= 100000 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[2][0] != "runtime" || m.keys[2][1] != "sys_bytes" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[2] <= 100000 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[3][0] != "runtime" || m.keys[3][1] != "malloc_count" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[3] <= 100 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[4][0] != "runtime" || m.keys[4][1] != "free_count" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[4] <= 100 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[5][0] != "runtime" || m.keys[5][1] != "heap_objects" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[5] <= 200 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[6][0] != "runtime" || m.keys[6][1] != "total_gc_pause_ns" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[6] <= 100000 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[7][0] != "runtime" || m.keys[7][1] != "total_gc_runs" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[7] <= 1 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[8][0] != "runtime" || m.keys[8][1] != "gc_pause_ns" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[8] <= 1000 { - t.Fatalf("bad val: %v", m.vals) - } -} - -func TestInsert(t *testing.T) { - k := []string{"hi", "bob"} - exp := []string{"hi", "there", "bob"} - out := insert(1, "there", k) - if !reflect.DeepEqual(exp, out) { - t.Fatalf("bad insert %v %v", exp, out) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go b/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go deleted file mode 100644 index 15c5d771aa..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package metrics - -import ( - "reflect" - "testing" -) - -type MockSink struct { - keys [][]string - vals []float32 -} - -func (m *MockSink) SetGauge(key []string, val float32) { - m.keys = append(m.keys, key) - m.vals = append(m.vals, val) -} -func (m *MockSink) EmitKey(key []string, val float32) { - m.keys = append(m.keys, key) - m.vals = append(m.vals, val) -} -func (m *MockSink) IncrCounter(key []string, val float32) { - m.keys = append(m.keys, key) - m.vals = append(m.vals, val) -} -func (m *MockSink) AddSample(key []string, val float32) { - m.keys = append(m.keys, key) - m.vals = append(m.vals, val) -} - -func TestFanoutSink_Gauge(t *testing.T) { - m1 := &MockSink{} - m2 := &MockSink{} - fh := &FanoutSink{m1, m2} - - k := []string{"test"} - v := float32(42.0) - fh.SetGauge(k, v) - - if !reflect.DeepEqual(m1.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m2.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m1.vals[0], v) { - t.Fatalf("val not equal") - } - if !reflect.DeepEqual(m2.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func TestFanoutSink_Key(t *testing.T) { - m1 := &MockSink{} - m2 := &MockSink{} - fh := &FanoutSink{m1, m2} - - k := []string{"test"} - v := float32(42.0) - fh.EmitKey(k, v) - - if !reflect.DeepEqual(m1.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m2.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m1.vals[0], v) { - t.Fatalf("val not equal") - } - if !reflect.DeepEqual(m2.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func TestFanoutSink_Counter(t *testing.T) { - m1 := &MockSink{} - m2 := &MockSink{} - fh := &FanoutSink{m1, m2} - - k := []string{"test"} - v := float32(42.0) - fh.IncrCounter(k, v) - - if !reflect.DeepEqual(m1.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m2.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m1.vals[0], v) { - t.Fatalf("val not equal") - } - if !reflect.DeepEqual(m2.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func TestFanoutSink_Sample(t *testing.T) { - m1 := &MockSink{} - m2 := &MockSink{} - fh := &FanoutSink{m1, m2} - - k := []string{"test"} - v := float32(42.0) - fh.AddSample(k, v) - - if !reflect.DeepEqual(m1.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m2.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m1.vals[0], v) { - t.Fatalf("val not equal") - } - if !reflect.DeepEqual(m2.vals[0], v) { - t.Fatalf("val not equal") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go b/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go deleted file mode 100644 index 8b3210c15f..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package metrics - -import ( - "reflect" - "testing" - "time" -) - -func TestDefaultConfig(t *testing.T) { - conf := DefaultConfig("service") - if conf.ServiceName != "service" { - t.Fatalf("Bad name") - } - if conf.HostName == "" { - t.Fatalf("missing hostname") - } - if !conf.EnableHostname || !conf.EnableRuntimeMetrics { - t.Fatalf("expect true") - } - if conf.EnableTypePrefix { - t.Fatalf("expect false") - } - if conf.TimerGranularity != time.Millisecond { - t.Fatalf("bad granularity") - } - if conf.ProfileInterval != time.Second { - t.Fatalf("bad interval") - } -} - -func Test_GlobalMetrics_SetGauge(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - - k := []string{"test"} - v := float32(42.0) - SetGauge(k, v) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func Test_GlobalMetrics_EmitKey(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - - k := []string{"test"} - v := float32(42.0) - EmitKey(k, v) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func Test_GlobalMetrics_IncrCounter(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - - k := []string{"test"} - v := float32(42.0) - IncrCounter(k, v) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func Test_GlobalMetrics_AddSample(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - - k := []string{"test"} - v := float32(42.0) - AddSample(k, v) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func Test_GlobalMetrics_MeasureSince(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - globalMetrics.TimerGranularity = time.Millisecond - - k := []string{"test"} - now := time.Now() - MeasureSince(k, now) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if m.vals[0] > 0.1 { - t.Fatalf("val too large %v", m.vals[0]) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go b/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go deleted file mode 100644 index 622eb5d3aa..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package metrics - -import ( - "bufio" - "bytes" - "net" - "testing" - "time" -) - -func TestStatsd_Flatten(t *testing.T) { - s := &StatsdSink{} - flat := s.flattenKey([]string{"a", "b", "c", "d"}) - if flat != "a.b.c.d" { - t.Fatalf("Bad flat") - } -} - -func TestStatsd_PushFullQueue(t *testing.T) { - q := make(chan string, 1) - q <- "full" - - s := &StatsdSink{metricQueue: q} - s.pushMetric("omit") - - out := <-q - if out != "full" { - t.Fatalf("bad val %v", out) - } - - select { - case v := <-q: - t.Fatalf("bad val %v", v) - default: - } -} - -func TestStatsd_Conn(t *testing.T) { - addr := "127.0.0.1:7524" - done := make(chan bool) - go func() { - list, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 7524}) - if err != nil { - panic(err) - } - defer list.Close() - buf := make([]byte, 1500) - n, err := list.Read(buf) - if err != nil { - panic(err) - } - buf = buf[:n] - reader := bufio.NewReader(bytes.NewReader(buf)) - - line, err := reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "gauge.val:1.000000|g\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "key.other:2.000000|kv\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "counter.me:3.000000|c\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "sample.slow_thingy:4.000000|ms\n" { - t.Fatalf("bad line %s", line) - } - - done <- true - }() - s, err := NewStatsdSink(addr) - if err != nil { - t.Fatalf("bad error") - } - - s.SetGauge([]string{"gauge", "val"}, float32(1)) - s.EmitKey([]string{"key", "other"}, float32(2)) - s.IncrCounter([]string{"counter", "me"}, float32(3)) - s.AddSample([]string{"sample", "slow thingy"}, float32(4)) - - select { - case <-done: - s.Shutdown() - case <-time.After(3 * time.Second): - t.Fatalf("timeout") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go b/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go deleted file mode 100644 index d9c744f416..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package metrics - -import ( - "bufio" - "net" - "testing" - "time" -) - -func acceptConn(addr string) net.Conn { - ln, _ := net.Listen("tcp", addr) - conn, _ := ln.Accept() - return conn -} - -func TestStatsite_Flatten(t *testing.T) { - s := &StatsiteSink{} - flat := s.flattenKey([]string{"a", "b", "c", "d"}) - if flat != "a.b.c.d" { - t.Fatalf("Bad flat") - } -} - -func TestStatsite_PushFullQueue(t *testing.T) { - q := make(chan string, 1) - q <- "full" - - s := &StatsiteSink{metricQueue: q} - s.pushMetric("omit") - - out := <-q - if out != "full" { - t.Fatalf("bad val %v", out) - } - - select { - case v := <-q: - t.Fatalf("bad val %v", v) - default: - } -} - -func TestStatsite_Conn(t *testing.T) { - addr := "localhost:7523" - done := make(chan bool) - go func() { - conn := acceptConn(addr) - reader := bufio.NewReader(conn) - - line, err := reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "gauge.val:1.000000|g\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "key.other:2.000000|kv\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "counter.me:3.000000|c\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "sample.slow_thingy:4.000000|ms\n" { - t.Fatalf("bad line %s", line) - } - - conn.Close() - done <- true - }() - s, err := NewStatsiteSink(addr) - if err != nil { - t.Fatalf("bad error") - } - - s.SetGauge([]string{"gauge", "val"}, float32(1)) - s.EmitKey([]string{"key", "other"}, float32(2)) - s.IncrCounter([]string{"counter", "me"}, float32(3)) - s.AddSample([]string{"sample", "slow thingy"}, float32(4)) - - select { - case <-done: - s.Shutdown() - case <-time.After(3 * time.Second): - t.Fatalf("timeout") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile index cfbed514bb..e035e63adc 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile @@ -1,54 +1,18 @@ -TEST=. -BENCH=. -COVERPROFILE=/tmp/c.out BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" default: build -bench: - go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) - -# http://cloc.sourceforge.net/ -cloc: - @cloc --not-match-f='Makefile|_test.go' . - -cover: fmt - go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . - go tool cover -html=$(COVERPROFILE) - rm $(COVERPROFILE) - -cpuprofile: fmt - @go test -c - @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" # go get github.com/kisielk/errcheck errcheck: - @echo "=== errcheck ===" - @errcheck github.com/boltdb/bolt + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt -fmt: - @go fmt ./... +test: + @go test -v -cover . + @go test -v ./cmd/bolt -get: - @go get -d ./... - -build: get - @mkdir -p bin - @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt - -test: fmt - @go get github.com/stretchr/testify/assert - @echo "=== TESTS ===" - @go test -v -cover -test.run=$(TEST) - @echo "" - @echo "" - @echo "=== CLI ===" - @go test -v -test.run=$(TEST) ./cmd/bolt - @echo "" - @echo "" - @echo "=== RACE DETECTOR ===" - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -.PHONY: bench cloc cover cpuprofile fmt memprofile test +.PHONY: fmt test diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/README.md b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/README.md index 00fad6afb8..66b19ace8a 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/README.md +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/README.md @@ -1,8 +1,8 @@ -Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) +Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg) ==== -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and -the [LMDB project][lmdb]. The goal of the project is to provide a simple, +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database server such as Postgres or MySQL. @@ -13,7 +13,6 @@ and setting values. That's it. [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ - ## Project Status Bolt is stable and the API is fixed. Full unit test coverage and randomized @@ -22,6 +21,36 @@ Bolt is currently in high-load production environments serving databases as large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed services every day. +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started @@ -180,8 +209,8 @@ and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but _please_ be sure to close the -transaction. +You can use the `Tx.Begin()` function directly but **please** be sure to close +the transaction. ```go // Start a writable transaction. @@ -256,7 +285,7 @@ db.View(func(tx *bolt.Tx) error { ``` The `Get()` function does not return an error because its operation is -guarenteed to work (unless there is some kind of system failure). If the key +guaranteed to work (unless there is some kind of system failure). If the key exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. @@ -268,6 +297,49 @@ transaction is open. If you need to use a value outside of the transaction then you must use `copy()` to copy it to another byte slice. +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ = b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + ### Iterating over keys Bolt stores its keys in byte-sorted order within a bucket. This makes sequential @@ -276,7 +348,9 @@ iteration over these keys extremely fast. To iterate over keys we'll use a ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys b := tx.Bucket([]byte("MyBucket")) + c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -300,10 +374,15 @@ Next() Move to the next key. Prev() Move to the previous key. ``` -When you have iterated to the end of the cursor then `Next()` will return `nil`. -You must seek to a position using `First()`, `Last()`, or `Seek()` before -calling `Next()` or `Prev()`. If you do not seek to a position then these -functions will return `nil`. +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. #### Prefix scans @@ -312,6 +391,7 @@ To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") @@ -331,7 +411,7 @@ date range like this: ```go db.View(func(tx *bolt.Tx) error { - // Assume our events bucket has RFC3339 encoded time keys. + // Assume our events bucket exists and has RFC3339 encoded time keys. c := tx.Bucket([]byte("Events")).Cursor() // Our time range spans the 90's decade. @@ -355,7 +435,9 @@ all the keys in a bucket: ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys b := tx.Bucket([]byte("MyBucket")) + b.ForEach(func(k, v []byte) error { fmt.Printf("key=%s, value=%s\n", k, v) return nil @@ -382,8 +464,11 @@ func (*Bucket) DeleteBucket(key []byte) error Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` function to write a consistent view of the database to a writer. If you call this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. It will also use `O_DIRECT` when available -to prevent page cache trashing. +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. One common use case is to backup over HTTP so you can use tools like `cURL` to do database backups: @@ -465,6 +550,84 @@ if err != nil { } ``` +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +contstructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` ## Resources @@ -500,7 +663,7 @@ they are libraries bundled into the application, however, their underlying structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes random writes by using a write ahead log and multi-tiered, sorted files called SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade offs. +have trade-offs. If you require a high random write throughput (>10,000 w/sec) or you need to use spinning disks then LevelDB could be a good choice. If your application is @@ -536,9 +699,8 @@ It's important to pick the right tool for the job and Bolt is no exception. Here are a few things to note when evaluating and using Bolt: * Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can add a write-ahead log or - [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt - to mitigate this issue. + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. * Bolt uses a B+tree internally so there can be a lot of random page access. SSDs provide a significant performance boost over spinning disks. @@ -568,11 +730,13 @@ Here are a few things to note when evaluating and using Bolt: can in memory and will release memory as needed to other processes. This means that Bolt can show very high memory usage when working with large databases. However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM. + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. * The data structures in the Bolt database are memory mapped so the data file will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most + little endian machine to a big endian machine and have it work. For most users this is not a concern since most modern CPUs are little endian. * Because of the way pages are laid out on disk, Bolt cannot truncate data files @@ -587,6 +751,56 @@ Here are a few things to note when evaluating and using Bolt: [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + ## Other Projects Using Bolt Below is a list of public, open source projects that use Bolt: @@ -597,25 +811,34 @@ Below is a list of public, open source projects that use Bolt: * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. * [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. -* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. * [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/appveyor.yml b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/appveyor.yml new file mode 100644 index 0000000000..6e26e941d6 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/appveyor.yml @@ -0,0 +1,18 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\boltdb\bolt + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - go version + - go env + - go get -v -t ./... + +build_script: + - go test -v ./... diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go deleted file mode 100644 index 84acae6bbf..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go +++ /dev/null @@ -1,138 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go deleted file mode 100644 index b745a371f5..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "hash/fnv" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func validateBatchBench(b *testing.B, db *TestDB) { - var rollback = errors.New("sentinel error to cause rollback") - validate := func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte("bench")) - h := fnv.New32a() - buf := make([]byte, 4) - for id := uint32(0); id < 1000; id++ { - binary.LittleEndian.PutUint32(buf, id) - h.Reset() - h.Write(buf[:]) - k := h.Sum(nil) - v := bucket.Get(k) - if v == nil { - b.Errorf("not found id=%d key=%x", id, k) - continue - } - if g, e := v, []byte("filler"); !bytes.Equal(g, e) { - b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) - } - if err := bucket.Delete(k); err != nil { - return err - } - } - // should be empty now - c := bucket.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - b.Errorf("unexpected key: %x = %q", k, v) - } - return rollback - } - if err := db.Update(validate); err != nil && err != rollback { - b.Error(err) - } -} - -func BenchmarkDBBatchAutomatic(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Batch(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchSingle(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Update(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchManual10x100(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for major := 0; major < 10; major++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - insert100 := func(tx *bolt.Tx) error { - h := fnv.New32a() - buf := make([]byte, 4) - for minor := uint32(0); minor < 100; minor++ { - binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) - h.Reset() - h.Write(buf[:]) - k := h.Sum(nil) - b := tx.Bucket([]byte("bench")) - if err := b.Put(k, []byte("filler")); err != nil { - return err - } - } - return nil - } - if err := db.Update(insert100); err != nil { - b.Fatal(err) - } - }(uint32(major)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go deleted file mode 100644 index 74eff8af98..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package bolt_test - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "log" - "math/rand" - "net/http" - "net/http/httptest" - "os" - - "github.com/boltdb/bolt" -) - -// Set this to see how the counts are actually updated. -const verbose = false - -// Counter updates a counter in Bolt for every URL path requested. -type counter struct { - db *bolt.DB -} - -func (c counter) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - // Communicates the new count from a successful database - // transaction. - var result uint64 - - increment := func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("hits")) - if err != nil { - return err - } - key := []byte(req.URL.String()) - // Decode handles key not found for us. - count := decode(b.Get(key)) + 1 - b.Put(key, encode(count)) - // All good, communicate new count. - result = count - return nil - } - if err := c.db.Batch(increment); err != nil { - http.Error(rw, err.Error(), 500) - return - } - - if verbose { - log.Printf("server: %s: %d", req.URL.String(), result) - } - - rw.Header().Set("Content-Type", "application/octet-stream") - fmt.Fprintf(rw, "%d\n", result) -} - -func client(id int, base string, paths []string) error { - // Process paths in random order. - rng := rand.New(rand.NewSource(int64(id))) - permutation := rng.Perm(len(paths)) - - for i := range paths { - path := paths[permutation[i]] - resp, err := http.Get(base + path) - if err != nil { - return err - } - defer resp.Body.Close() - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if verbose { - log.Printf("client: %s: %s", path, buf) - } - } - return nil -} - -func ExampleDB_Batch() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start our web server - count := counter{db} - srv := httptest.NewServer(count) - defer srv.Close() - - // Decrease the batch size to make things more interesting. - db.MaxBatchSize = 3 - - // Get every path multiple times concurrently. - const clients = 10 - paths := []string{ - "/foo", - "/bar", - "/baz", - "/quux", - "/thud", - "/xyzzy", - } - errors := make(chan error, clients) - for i := 0; i < clients; i++ { - go func(id int) { - errors <- client(id, srv.URL, paths) - }(i) - } - // Check all responses to make sure there's no error. - for i := 0; i < clients; i++ { - if err := <-errors; err != nil { - fmt.Printf("client error: %v", err) - return - } - } - - // Check the final result - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("hits")) - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("hits to %s: %d\n", k, decode(v)) - } - return nil - }) - - // Output: - // hits to /bar: 10 - // hits to /baz: 10 - // hits to /foo: 10 - // hits to /quux: 10 - // hits to /thud: 10 - // hits to /xyzzy: 10 -} - -// encode marshals a counter. -func encode(n uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, n) - return buf -} - -// decode unmarshals a counter. Nil buffers are decoded as 0. -func decode(buf []byte) uint64 { - if buf == nil { - return 0 - } - return binary.BigEndian.Uint64(buf) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go deleted file mode 100644 index 0b5075fddf..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package bolt_test - -import ( - "testing" - "time" - - "github.com/boltdb/bolt" -) - -// Ensure two functions can perform updates in a single batch. -func TestDB_Batch(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - // Iterate over multiple updates in separate goroutines. - n := 2 - ch := make(chan error) - for i := 0; i < n; i++ { - go func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - }(i) - } - - // Check all responses to make sure there's no error. - for i := 0; i < n; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < n; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} - -func TestDB_Batch_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var sentinel int - var bork = &sentinel - var problem interface{} - var err error - - // Execute a function inside a batch that panics. - func() { - defer func() { - if p := recover(); p != nil { - problem = p - } - }() - err = db.Batch(func(tx *bolt.Tx) error { - panic(bork) - }) - }() - - // Verify there is no error. - if g, e := err, error(nil); g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } - // Verify the panic was captured. - if g, e := problem, bork; g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } -} - -func TestDB_BatchFull(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - const size = 3 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = size - // high enough to never trigger here - db.MaxBatchDelay = 1 * time.Hour - - go put(1) - go put(2) - - // Give the batch a chance to exhibit bugs. - time.Sleep(10 * time.Millisecond) - - // not triggered yet - select { - case <-ch: - t.Fatalf("batch triggered too early") - default: - } - - go put(3) - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} - -func TestDB_BatchTime(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - const size = 1 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = 1000 - db.MaxBatchDelay = 0 - - go put(1) - - // Batch must trigger by time alone. - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 0000000000..6d2309352e --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go index e9d1c907b6..2b67666140 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go @@ -4,8 +4,6 @@ import ( "syscall" ) -var odirect = syscall.O_DIRECT - // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return syscall.Fdatasync(int(db.file.Fd())) diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go index 7c1bef1a4f..7058c3d734 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go @@ -11,8 +11,6 @@ const ( msInvalidate // invalidate cached data ) -var odirect int - func msync(db *DB) error { _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) if errno != 0 { diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 0000000000..645ddc3edc --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 0000000000..2dc6be02e3 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 0000000000..8351e129f6 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 0000000000..f4dd26bbba --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go deleted file mode 100644 index b7bea1fc59..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package bolt_test - -import ( - "fmt" - "path/filepath" - "reflect" - "runtime" - "testing" -) - -// assert fails the test if the condition is false. -func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { - if !condition { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) - tb.FailNow() - } -} - -// ok fails the test if an err is not nil. -func ok(tb testing.TB, err error) { - if err != nil { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) - tb.FailNow() - } -} - -// equals fails the test if exp is not equal to act. -func equals(tb testing.TB, exp, act interface{}) { - if !reflect.DeepEqual(exp, act) { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) - tb.FailNow() - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go index 17ca318bf7..cad62dda1e 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!plan9 +// +build !windows,!plan9,!solaris package bolt @@ -11,7 +11,7 @@ import ( ) // flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, exclusive bool, timeout time.Duration) error { +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time for { // If we're beyond our timeout then return an error. @@ -27,7 +27,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error { } // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB) + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { @@ -40,25 +40,14 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error { } // funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { - return syscall.Flock(int(f.Fd()), syscall.LOCK_UN) +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) } // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) if err != nil { return err } diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 0000000000..307bf2b3ee --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go index 8b782be5f9..d538e6afd7 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go @@ -8,7 +8,39 @@ import ( "unsafe" ) -var odirect int +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { @@ -16,13 +48,49 @@ func fdatasync(db *DB) error { } // flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, _ bool, _ time.Duration) error { - return nil +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } } // funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { - return nil +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path+lockExt) + return err } // mmap memory maps a DB's data file. diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go index 8db89776fe..f50442523c 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go @@ -2,8 +2,6 @@ package bolt -var odirect int - // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return db.file.Sync() diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go index 6766992100..d2f8c524e4 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go @@ -11,7 +11,7 @@ const ( MaxKeySize = 32768 // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = 4294967295 + MaxValueSize = (1 << 31) - 2 ) const ( @@ -99,6 +99,7 @@ func (b *Bucket) Cursor() *Cursor { // Bucket retrieves a nested bucket by name. // Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) Bucket(name []byte) *Bucket { if b.buckets != nil { if child := b.buckets[string(name)]; child != nil { @@ -148,6 +149,7 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if b.tx.db == nil { return nil, ErrTxClosed @@ -192,6 +194,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { child, err := b.CreateBucket(key) if err == ErrBucketExists { @@ -270,6 +273,7 @@ func (b *Bucket) Get(key []byte) []byte { // Put sets the value for a key in the bucket. // If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. func (b *Bucket) Put(key []byte, value []byte) error { if b.tx.db == nil { @@ -346,7 +350,8 @@ func (b *Bucket) NextSequence() (uint64, error) { // ForEach executes a function for each key/value pair in a bucket. // If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { return ErrTxClosed diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go deleted file mode 100644 index 62b8c58783..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go +++ /dev/null @@ -1,1169 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math/rand" - "os" - "strconv" - "strings" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a bucket that gets a non-existent key returns nil. -func TestBucket_Get_NonExistent(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that a bucket can read a value that is not flushed yet. -func TestBucket_Get_FromNode(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - value := b.Get([]byte("foo")) - equals(t, []byte("bar"), value) - return nil - }) -} - -// Ensure that a bucket retrieved via Get() returns a nil. -func TestBucket_Get_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that a bucket can write a key/value. -func TestBucket_Put(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - ok(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - equals(t, value, []byte("bar")) - return nil - }) -} - -// Ensure that a bucket can rewrite a key in the same transaction. -func TestBucket_Put_Repeat(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - ok(t, b.Put([]byte("foo"), []byte("bar"))) - ok(t, b.Put([]byte("foo"), []byte("baz"))) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - equals(t, value, []byte("baz")) - return nil - }) -} - -// Ensure that a bucket can write a bunch of large values. -func TestBucket_Put_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - - count, factor := 100, 200 - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - ok(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) - } - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - value := b.Get([]byte(strings.Repeat("0", i*factor))) - equals(t, []byte(strings.Repeat("X", (count-i)*factor)), value) - } - return nil - }) -} - -// Ensure that a database can perform multiple large appends safely. -func TestDB_Put_VeryLarge(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - n, batchN := 400000, 200000 - ksize, vsize := 8, 500 - - db := NewTestDB() - defer db.Close() - - for i := 0; i < n; i += batchN { - err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for j := 0; j < batchN; j++ { - k, v := make([]byte, ksize), make([]byte, vsize) - binary.BigEndian.PutUint32(k, uint32(i+j)) - ok(t, b.Put(k, v)) - } - return nil - }) - ok(t, err) - } -} - -// Ensure that a setting a value on a key with a bucket value returns an error. -func TestBucket_Put_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - return nil - }) -} - -// Ensure that a setting a value while the transaction is closed returns an error. -func TestBucket_Put_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - equals(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) -} - -// Ensure that setting a value on a read-only bucket returns an error. -func TestBucket_Put_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Put([]byte("foo"), []byte("bar")) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that a bucket can delete an existing key. -func TestBucket_Delete(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - ok(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that deleting a large set of keys will work correctly. -func TestBucket_Delete_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - var b, _ = tx.CreateBucket([]byte("widgets")) - for i := 0; i < 100; i++ { - ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) - } - return nil - }) - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - ok(t, b.Delete([]byte(strconv.Itoa(i)))) - } - return nil - }) - db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - assert(t, b.Get([]byte(strconv.Itoa(i))) == nil, "") - } - return nil - }) -} - -// Deleting a very large list of keys will cause the freelist to use overflow. -func TestBucket_Delete_FreelistOverflow(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := NewTestDB() - defer db.Close() - k := make([]byte, 16) - for i := uint64(0); i < 10000; i++ { - err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("0")) - if err != nil { - t.Fatalf("bucket error: %s", err) - } - - for j := uint64(0); j < 1000; j++ { - binary.BigEndian.PutUint64(k[:8], i) - binary.BigEndian.PutUint64(k[8:], j) - if err := b.Put(k, nil); err != nil { - t.Fatalf("put error: %s", err) - } - } - - return nil - }) - - if err != nil { - t.Fatalf("update error: %s", err) - } - } - - // Delete all of them in one large transaction - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("0")) - c := b.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - b.Delete(k) - } - return nil - }) - - // Check that a freelist overflow occurred. - ok(t, err) -} - -// Ensure that accessing and updating nested buckets is ok across transactions. -func TestBucket_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - // Create a widgets bucket. - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - - // Create a widgets/foo bucket. - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - - // Create a widgets/bar key. - ok(t, b.Put([]byte("bar"), []byte("0000"))) - - return nil - }) - db.MustCheck() - - // Update widgets/bar. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - ok(t, b.Put([]byte("bar"), []byte("xxxx"))) - return nil - }) - db.MustCheck() - - // Cause a split. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 10000; i++ { - ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) - } - return nil - }) - db.MustCheck() - - // Insert into widgets/foo/baz. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - ok(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) - return nil - }) - db.MustCheck() - - // Verify. - db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - equals(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) - equals(t, []byte("xxxx"), b.Get([]byte("bar"))) - for i := 0; i < 10000; i++ { - equals(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) - } - return nil - }) -} - -// Ensure that deleting a bucket using Delete() returns an error. -func TestBucket_Delete_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - _, err := b.CreateBucket([]byte("foo")) - ok(t, err) - equals(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) - return nil - }) -} - -// Ensure that deleting a key on a read-only bucket returns an error. -func TestBucket_Delete_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Delete([]byte("foo")) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that a deleting value while the transaction is closed returns an error. -func TestBucket_Delete_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - equals(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) -} - -// Ensure that deleting a bucket causes nested buckets to be deleted. -func TestBucket_DeleteBucket_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - ok(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) - return nil - }) -} - -// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. -func TestBucket_DeleteBucket_Nested2(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - return nil - }) - db.Update(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) != nil, "") - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")) != nil, "") - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) - ok(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) == nil, "") - return nil - }) -} - -// Ensure that deleting a child bucket with multiple pages causes all pages to get collected. -func TestBucket_DeleteBucket_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) - for i := 0; i < 1000; i++ { - ok(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) - } - return nil - }) - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) - - // NOTE: Consistency check in TestDB.Close() will panic if pages not freed properly. -} - -// Ensure that a simple value retrieved via Bucket() returns a nil. -func TestBucket_Bucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that creating a bucket on an existing non-bucket key returns an error. -func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - equals(t, bolt.ErrIncompatibleValue, err) - return nil - }) -} - -// Ensure that deleting a bucket on an existing non-bucket key returns an error. -func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) - return nil - }) -} - -// Ensure that a bucket can return an autoincrementing sequence. -func TestBucket_NextSequence(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) - - // Make sure sequence increments. - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - ok(t, err) - equals(t, seq, uint64(1)) - seq, err = tx.Bucket([]byte("widgets")).NextSequence() - ok(t, err) - equals(t, seq, uint64(2)) - - // Buckets should be separate. - seq, err = tx.Bucket([]byte("woojits")).NextSequence() - ok(t, err) - equals(t, seq, uint64(1)) - return nil - }) -} - -// Ensure that a bucket will persist an autoincrementing sequence even if its -// the only thing updated on the bucket. -// https://github.com/boltdb/bolt/issues/296 -func TestBucket_NextSequence_Persist(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, _ = tx.CreateBucket([]byte("widgets")) - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - _, _ = tx.Bucket([]byte("widgets")).NextSequence() - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } - return nil - }) -} - -// Ensure that retrieving the next sequence on a read-only bucket returns an error. -func TestBucket_NextSequence_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - i, err := b.NextSequence() - equals(t, i, uint64(0)) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that retrieving the next sequence for a bucket on a closed database return an error. -func TestBucket_NextSequence_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - _, err := b.NextSequence() - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure a user can loop over all key/value pairs in a bucket. -func TestBucket_ForEach(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002")) - - var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - switch index { - case 0: - equals(t, k, []byte("bar")) - equals(t, v, []byte("0002")) - case 1: - equals(t, k, []byte("baz")) - equals(t, v, []byte("0001")) - case 2: - equals(t, k, []byte("foo")) - equals(t, v, []byte("0000")) - } - index++ - return nil - }) - ok(t, err) - equals(t, index, 3) - return nil - }) -} - -// Ensure a database can stop iteration early. -func TestBucket_ForEach_ShortCircuit(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - - var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - index++ - if bytes.Equal(k, []byte("baz")) { - return errors.New("marker") - } - return nil - }) - equals(t, errors.New("marker"), err) - equals(t, 2, index) - return nil - }) -} - -// Ensure that looping over a bucket on a closed database returns an error. -func TestBucket_ForEach_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - err := b.ForEach(func(k, v []byte) error { return nil }) - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure that an error is returned when inserting with an empty key. -func TestBucket_Put_EmptyKey(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) - equals(t, err, bolt.ErrKeyRequired) - err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) - equals(t, err, bolt.ErrKeyRequired) - return nil - }) -} - -// Ensure that an error is returned when inserting with a key that's too large. -func TestBucket_Put_KeyTooLarge(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) - equals(t, err, bolt.ErrKeyTooLarge) - return nil - }) -} - -// Ensure that an error is returned when inserting a value that's too large. -func TestBucket_Put_ValueTooLarge(t *testing.T) { - if os.Getenv("DRONE") == "true" { - t.Skip("not enough RAM for test") - } - - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)) - equals(t, err, bolt.ErrValueTooLarge) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Add bucket with fewer keys but one big value. - big_key := []byte("really-big-value") - for i := 0; i < 500; i++ { - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) - }) - } - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put(big_key, []byte(strings.Repeat("*", 10000))) - }) - - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("woojits")) - stats := b.Stats() - equals(t, 1, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 7, stats.LeafPageN) - equals(t, 2, stats.LeafOverflowN) - equals(t, 501, stats.KeyN) - equals(t, 2, stats.Depth) - - branchInuse := 16 // branch page header - branchInuse += 7 * 16 // branch elements - branchInuse += 7 * 3 // branch keys (6 3-byte keys) - equals(t, branchInuse, stats.BranchInuse) - - leafInuse := 7 * 16 // leaf page header - leafInuse += 501 * 16 // leaf elements - leafInuse += 500*3 + len(big_key) // leaf keys - leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values - equals(t, leafInuse, stats.LeafInuse) - - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 4096, stats.BranchAlloc) - equals(t, 36864, stats.LeafAlloc) - } - - equals(t, 1, stats.BucketN) - equals(t, 0, stats.InlineBucketN) - equals(t, 0, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a bucket with random insertion utilizes fill percentage correctly. -func TestBucket_Stats_RandomFill(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } else if os.Getpagesize() != 4096 { - t.Skip("invalid page size for test") - } - - db := NewTestDB() - defer db.Close() - - // Add a set of values in random order. It will be the same random - // order so we can maintain consistency between test runs. - var count int - r := rand.New(rand.NewSource(42)) - for _, i := range r.Perm(1000) { - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - b.FillPercent = 0.9 - for _, j := range r.Perm(100) { - index := (j * 10000) + i - b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")) - count++ - } - return nil - }) - } - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - s := tx.Bucket([]byte("woojits")).Stats() - equals(t, 100000, s.KeyN) - - equals(t, 98, s.BranchPageN) - equals(t, 0, s.BranchOverflowN) - equals(t, 130984, s.BranchInuse) - equals(t, 401408, s.BranchAlloc) - - equals(t, 3412, s.LeafPageN) - equals(t, 0, s.LeafOverflowN) - equals(t, 4742482, s.LeafInuse) - equals(t, 13975552, s.LeafAlloc) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Small(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - b, err := tx.CreateBucket([]byte("whozawhats")) - ok(t, err) - b.Put([]byte("foo"), []byte("bar")) - - return nil - }) - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 0, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 1, stats.KeyN) - equals(t, 1, stats.Depth) - equals(t, 0, stats.BranchInuse) - equals(t, 0, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 0, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, 16+16+6, stats.InlineBucketInuse) - return nil - }) -} - -func TestBucket_Stats_EmptyBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - _, err := tx.CreateBucket([]byte("whozawhats")) - ok(t, err) - return nil - }) - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 0, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 0, stats.KeyN) - equals(t, 1, stats.Depth) - equals(t, 0, stats.BranchInuse) - equals(t, 0, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 0, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, 16, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - ok(t, err) - for i := 0; i < 100; i++ { - b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) - } - bar, err := b.CreateBucket([]byte("bar")) - ok(t, err) - for i := 0; i < 10; i++ { - bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) - } - baz, err := bar.CreateBucket([]byte("baz")) - ok(t, err) - for i := 0; i < 10; i++ { - baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) - } - return nil - }) - - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("foo")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 2, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 122, stats.KeyN) - equals(t, 3, stats.Depth) - equals(t, 0, stats.BranchInuse) - - foo := 16 // foo (pghdr) - foo += 101 * 16 // foo leaf elements - foo += 100*2 + 100*2 // foo leaf key/values - foo += 3 + 16 // foo -> bar key/value - - bar := 16 // bar (pghdr) - bar += 11 * 16 // bar leaf elements - bar += 10 + 10 // bar leaf key/values - bar += 3 + 16 // bar -> baz key/value - - baz := 16 // baz (inline) (pghdr) - baz += 10 * 16 // baz leaf elements - baz += 10 + 10 // baz leaf key/values - - equals(t, foo+bar+baz, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 8192, stats.LeafAlloc) - } - equals(t, 3, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, baz, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a large bucket can calculate stats. -func TestBucket_Stats_Large(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := NewTestDB() - defer db.Close() - - var index int - for i := 0; i < 100; i++ { - db.Update(func(tx *bolt.Tx) error { - // Add bucket with lots of keys. - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for i := 0; i < 1000; i++ { - b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) - index++ - } - return nil - }) - } - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - stats := b.Stats() - equals(t, 13, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 1196, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 100000, stats.KeyN) - equals(t, 3, stats.Depth) - equals(t, 25257, stats.BranchInuse) - equals(t, 2596916, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 53248, stats.BranchAlloc) - equals(t, 4898816, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 0, stats.InlineBucketN) - equals(t, 0, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure that a bucket can write random keys and values across multiple transactions. -func TestBucket_Put_Single(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - index := 0 - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - m := make(map[string][]byte) - - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - for _, item := range items { - db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { - panic("put error: " + err.Error()) - } - m[string(item.Key)] = item.Value - return nil - }) - - // Verify all key/values so far. - db.View(func(tx *bolt.Tx) error { - i := 0 - for k, v := range m { - value := tx.Bucket([]byte("widgets")).Get([]byte(k)) - if !bytes.Equal(value, v) { - t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) - db.CopyTempFile() - t.FailNow() - } - i++ - } - return nil - }) - } - - index++ - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can insert multiple key/value pairs at once. -func TestBucket_Put_Multiple(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - // Bulk insert all values. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - return nil - }) - ok(t, err) - - // Verify all items exist. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - value := b.Get(item.Key) - if !bytes.Equal(item.Value, value) { - db.CopyTempFile() - t.Fatalf("exp=%x; got=%x", item.Value, value) - } - } - return nil - }) - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can delete all key/value pairs and return to a single leaf page. -func TestBucket_Delete_Quick(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - // Bulk insert all values. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - return nil - }) - ok(t, err) - - // Remove items one at a time and check consistency. - for _, item := range items { - err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete(item.Key) - }) - ok(t, err) - } - - // Anything before our deletion index should be nil. - db.View(func(tx *bolt.Tx) error { - tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) - return nil - }) - return nil - }) - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -func ExampleBucket_Put() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - tx.CreateBucket([]byte("widgets")) - - // Set the value "bar" for the key "foo". - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Read value back in a different read-only transaction. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }) - - // Output: - // The value of 'foo' is: bar -} - -func ExampleBucket_Delete() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - - // Set the value "bar" for the key "foo". - b.Put([]byte("foo"), []byte("bar")) - - // Retrieve the key back from the database and verify it. - value := b.Get([]byte("foo")) - fmt.Printf("The value of 'foo' was: %s\n", value) - return nil - }) - - // Delete the key in a different write transaction. - db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - }) - - // Retrieve the key again. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if value == nil { - fmt.Printf("The value of 'foo' is now: nil\n") - } - return nil - }) - - // Output: - // The value of 'foo' was: bar - // The value of 'foo' is now: nil -} - -func ExampleBucket_ForEach() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Insert data into a bucket. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("animals")) - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Iterate over items in sorted key order. - b.ForEach(func(k, v []byte) error { - fmt.Printf("A %s is %s.\n", k, v) - return nil - }) - return nil - }) - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go deleted file mode 100644 index c41ebe404d..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go +++ /dev/null @@ -1,1529 +0,0 @@ -package main - -import ( - "bytes" - "encoding/binary" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" - "unsafe" - - "github.com/boltdb/bolt" -) - -var ( - // ErrUsage is returned when a usage message was printed and the process - // should simply exit with an error. - ErrUsage = errors.New("usage") - - // ErrUnknownCommand is returned when a CLI command is not specified. - ErrUnknownCommand = errors.New("unknown command") - - // ErrPathRequired is returned when the path to a Bolt database is not specified. - ErrPathRequired = errors.New("path required") - - // ErrFileNotFound is returned when a Bolt database does not exist. - ErrFileNotFound = errors.New("file not found") - - // ErrInvalidValue is returned when a benchmark reads an unexpected value. - ErrInvalidValue = errors.New("invalid value") - - // ErrCorrupt is returned when a checking a data file finds errors. - ErrCorrupt = errors.New("invalid value") - - // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly - // divided by the iteration count. - ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") - - // ErrPageIDRequired is returned when a required page id is not specified. - ErrPageIDRequired = errors.New("page id required") - - // ErrPageNotFound is returned when specifying a page above the high water mark. - ErrPageNotFound = errors.New("page not found") - - // ErrPageFreed is returned when reading a page that has already been freed. - ErrPageFreed = errors.New("page freed") -) - -// PageHeaderSize represents the size of the bolt.page header. -const PageHeaderSize = 16 - -func main() { - m := NewMain() - if err := m.Run(os.Args[1:]...); err == ErrUsage { - os.Exit(2) - } else if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } -} - -// Main represents the main program execution. -type Main struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewMain returns a new instance of Main connect to the standard input/output. -func NewMain() *Main { - return &Main{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (m *Main) Run(args ...string) error { - // Require a command at the beginning. - if len(args) == 0 || strings.HasPrefix(args[0], "-") { - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - } - - // Execute command. - switch args[0] { - case "help": - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - case "bench": - return newBenchCommand(m).Run(args[1:]...) - case "check": - return newCheckCommand(m).Run(args[1:]...) - case "dump": - return newDumpCommand(m).Run(args[1:]...) - case "info": - return newInfoCommand(m).Run(args[1:]...) - case "page": - return newPageCommand(m).Run(args[1:]...) - case "pages": - return newPagesCommand(m).Run(args[1:]...) - case "stats": - return newStatsCommand(m).Run(args[1:]...) - default: - return ErrUnknownCommand - } -} - -// Usage returns the help message. -func (m *Main) Usage() string { - return strings.TrimLeft(` -Bolt is a tool for inspecting bolt databases. - -Usage: - - bolt command [arguments] - -The commands are: - - bench run synthetic benchmark against bolt - check verifies integrity of bolt database - info print basic info - help print this screen - pages print list of pages with their types - stats iterate over all pages and generate usage stats - -Use "bolt [command] -h" for more information about a command. -`, "\n") -} - -// CheckCommand represents the "check" command execution. -type CheckCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewCheckCommand returns a CheckCommand. -func newCheckCommand(m *Main) *CheckCommand { - return &CheckCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *CheckCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Perform consistency check. - return db.View(func(tx *bolt.Tx) error { - var count int - ch := tx.Check() - loop: - for { - select { - case err, ok := <-ch: - if !ok { - break loop - } - fmt.Fprintln(cmd.Stdout, err) - count++ - } - } - - // Print summary of errors. - if count > 0 { - fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) - return ErrCorrupt - } - - // Notify user that database is valid. - fmt.Fprintln(cmd.Stdout, "OK") - return nil - }) -} - -// Usage returns the help message. -func (cmd *CheckCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt check PATH - -Check opens a database at PATH and runs an exhaustive check to verify that -all pages are accessible or are marked as freed. It also verifies that no -pages are double referenced. - -Verification errors will stream out as they are found and the process will -return after all pages have been checked. -`, "\n") -} - -// InfoCommand represents the "info" command execution. -type InfoCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewInfoCommand returns a InfoCommand. -func newInfoCommand(m *Main) *InfoCommand { - return &InfoCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *InfoCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open the database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Print basic database info. - info := db.Info() - fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) - - return nil -} - -// Usage returns the help message. -func (cmd *InfoCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt info PATH - -Info prints basic information about the Bolt database at PATH. -`, "\n") -} - -// DumpCommand represents the "dump" command execution. -type DumpCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newDumpCommand returns a DumpCommand. -func newDumpCommand(m *Main) *DumpCommand { - return &DumpCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *DumpCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database to retrieve page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return err - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Print page to stdout. - if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { - return err - } - } - - return nil -} - -// PrintPage prints a given page as hexidecimal. -func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *DumpCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt dump -page PAGEID PATH - -Dump prints a hexidecimal dump of a single page. -`, "\n") -} - -// PageCommand represents the "page" command execution. -type PageCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newPageCommand returns a PageCommand. -func newPageCommand(m *Main) *PageCommand { - return &PageCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Retrieve page info and page size. - p, buf, err := ReadPage(path, pageID) - if err != nil { - return err - } - - // Print basic page info. - fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) - fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) - fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) - - // Print type-specific data. - switch p.Type() { - case "meta": - err = cmd.PrintMeta(cmd.Stdout, buf) - case "leaf": - err = cmd.PrintLeaf(cmd.Stdout, buf) - case "branch": - err = cmd.PrintBranch(cmd.Stdout, buf) - case "freelist": - err = cmd.PrintFreelist(cmd.Stdout, buf) - } - if err != nil { - return err - } - } - - return nil -} - -// PrintMeta prints the data from the meta page. -func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - fmt.Fprintf(w, "Version: %d\n", m.version) - fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) - fmt.Fprintf(w, "Flags: %08x\n", m.flags) - fmt.Fprintf(w, "Root: \n", m.root.root) - fmt.Fprintf(w, "Freelist: \n", m.freelist) - fmt.Fprintf(w, "HWM: \n", m.pgid) - fmt.Fprintf(w, "Txn ID: %d\n", m.txid) - fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) - fmt.Fprintf(w, "\n") - return nil -} - -// PrintLeaf prints the data for a leaf page. -func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - // Format value as string. - var v string - if (e.flags & uint32(bucketLeafFlag)) != 0 { - b := (*bucket)(unsafe.Pointer(&e.value()[0])) - v = fmt.Sprintf("", b.root, b.sequence) - } else if isPrintable(string(e.value())) { - k = fmt.Sprintf("%q", string(e.value())) - } else { - k = fmt.Sprintf("%x", string(e.value())) - } - - fmt.Fprintf(w, "%s: %s\n", k, v) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintBranch prints the data for a leaf page. -func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.branchPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - fmt.Fprintf(w, "%s: \n", k, e.pgid) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintFreelist prints the data for a freelist page. -func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each page in the freelist. - ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) - for i := uint16(0); i < p.count; i++ { - fmt.Fprintf(w, "%d\n", ids[i]) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintPage prints a given page as hexidecimal. -func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *PageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt page -page PATH pageid [pageid...] - -Page prints one or more pages in human readable format. -`, "\n") -} - -// PagesCommand represents the "pages" command execution. -type PagesCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewPagesCommand returns a PagesCommand. -func newPagesCommand(m *Main) *PagesCommand { - return &PagesCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PagesCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer func() { _ = db.Close() }() - - // Write header. - fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") - fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") - - return db.Update(func(tx *bolt.Tx) error { - var id int - for { - p, err := tx.Page(id) - if err != nil { - return &PageError{ID: id, Err: err} - } else if p == nil { - break - } - - // Only display count and overflow if this is a non-free page. - var count, overflow string - if p.Type != "free" { - count = strconv.Itoa(p.Count) - if p.OverflowCount > 0 { - overflow = strconv.Itoa(p.OverflowCount) - } - } - - // Print table row. - fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) - - // Move to the next non-overflow page. - id += 1 - if p.Type != "free" { - id += p.OverflowCount - } - } - return nil - }) -} - -// Usage returns the help message. -func (cmd *PagesCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt pages PATH - -Pages prints a table of pages with their type (meta, leaf, branch, freelist). -Leaf and branch pages will show a key count in the "items" column while the -freelist will show the number of free pages in the "items" column. - -The "overflow" column shows the number of blocks that the page spills over -into. Normally there is no overflow but large keys and values can cause -a single page to take up multiple blocks. -`, "\n") -} - -// StatsCommand represents the "stats" command execution. -type StatsCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewStatsCommand returns a StatsCommand. -func newStatsCommand(m *Main) *StatsCommand { - return &StatsCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *StatsCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path, prefix := fs.Arg(0), fs.Arg(1) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - return db.View(func(tx *bolt.Tx) error { - var s bolt.BucketStats - var count int - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - if bytes.HasPrefix(name, []byte(prefix)) { - s.Add(b.Stats()) - count += 1 - } - return nil - }); err != nil { - return err - } - - fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) - - fmt.Fprintln(cmd.Stdout, "Page count statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) - fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) - - fmt.Fprintln(cmd.Stdout, "Tree statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) - fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) - - fmt.Fprintln(cmd.Stdout, "Page size utilization") - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) - var percentage int - if s.BranchAlloc != 0 { - percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) - percentage = 0 - if s.LeafAlloc != 0 { - percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) - - fmt.Fprintln(cmd.Stdout, "Bucket statistics") - fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) - percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) - fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) - percentage = 0 - if s.LeafInuse != 0 { - percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) - - return nil - }) -} - -// Usage returns the help message. -func (cmd *StatsCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt stats PATH - -Stats performs an extensive search of the database to track every page -reference. It starts at the current meta page and recursively iterates -through every accessible bucket. - -The following errors can be reported: - - already freed - The page is referenced more than once in the freelist. - - unreachable unfreed - The page is not referenced by a bucket or in the freelist. - - reachable freed - The page is referenced by a bucket but is also in the freelist. - - out of bounds - A page is referenced that is above the high water mark. - - multiple references - A page is referenced by more than one other page. - - invalid type - The page type is not "meta", "leaf", "branch", or "freelist". - -No errors should occur in your database. However, if for some reason you -experience corruption, please submit a ticket to the Bolt project page: - - https://github.com/boltdb/bolt/issues -`, "\n") -} - -var benchBucketName = []byte("bench") - -// BenchCommand represents the "bench" command execution. -type BenchCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewBenchCommand returns a BenchCommand using the -func newBenchCommand(m *Main) *BenchCommand { - return &BenchCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the "bench" command. -func (cmd *BenchCommand) Run(args ...string) error { - // Parse CLI arguments. - options, err := cmd.ParseFlags(args) - if err != nil { - return err - } - - // Remove path if "-work" is not set. Otherwise keep path. - if options.Work { - fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) - } else { - defer os.Remove(options.Path) - } - - // Create database. - db, err := bolt.Open(options.Path, 0666, nil) - if err != nil { - return err - } - db.NoSync = options.NoSync - defer db.Close() - - // Write to the database. - var results BenchResults - if err := cmd.runWrites(db, options, &results); err != nil { - return fmt.Errorf("write: %v", err) - } - - // Read from the database. - if err := cmd.runReads(db, options, &results); err != nil { - return fmt.Errorf("bench: read: %s", err) - } - - // Print results. - fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) - fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) - fmt.Fprintln(os.Stderr, "") - return nil -} - -// ParseFlags parses the command line flags. -func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { - var options BenchOptions - - // Parse flagset. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") - fs.StringVar(&options.WriteMode, "write-mode", "seq", "") - fs.StringVar(&options.ReadMode, "read-mode", "seq", "") - fs.IntVar(&options.Iterations, "count", 1000, "") - fs.IntVar(&options.BatchSize, "batch-size", 0, "") - fs.IntVar(&options.KeySize, "key-size", 8, "") - fs.IntVar(&options.ValueSize, "value-size", 32, "") - fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") - fs.StringVar(&options.MemProfile, "memprofile", "", "") - fs.StringVar(&options.BlockProfile, "blockprofile", "", "") - fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") - fs.BoolVar(&options.NoSync, "no-sync", false, "") - fs.BoolVar(&options.Work, "work", false, "") - fs.StringVar(&options.Path, "path", "", "") - fs.SetOutput(cmd.Stderr) - if err := fs.Parse(args); err != nil { - return nil, err - } - - // Set batch size to iteration size if not set. - // Require that batch size can be evenly divided by the iteration count. - if options.BatchSize == 0 { - options.BatchSize = options.Iterations - } else if options.Iterations%options.BatchSize != 0 { - return nil, ErrNonDivisibleBatchSize - } - - // Generate temp path if one is not passed in. - if options.Path == "" { - f, err := ioutil.TempFile("", "bolt-bench-") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - f.Close() - os.Remove(f.Name()) - options.Path = f.Name() - } - - return &options, nil -} - -// Writes to the database. -func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for writes. - if options.ProfileMode == "rw" || options.ProfileMode == "w" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.WriteMode { - case "seq": - err = cmd.runWritesSequential(db, options, results) - case "rnd": - err = cmd.runWritesRandom(db, options, results) - case "seq-nest": - err = cmd.runWritesSequentialNested(db, options, results) - case "rnd-nest": - err = cmd.runWritesRandomNested(db, options, results) - default: - return fmt.Errorf("invalid write mode: %s", options.WriteMode) - } - - // Save time to write. - results.WriteDuration = time.Since(t) - - // Stop profiling for writes only. - if options.ProfileMode == "w" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists(benchBucketName) - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - key := make([]byte, options.KeySize) - value := make([]byte, options.ValueSize) - - // Write key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert key/value. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - top, err := tx.CreateBucketIfNotExists(benchBucketName) - if err != nil { - return err - } - top.FillPercent = options.FillPercent - - // Create bucket key. - name := make([]byte, options.KeySize) - binary.BigEndian.PutUint32(name, keySource()) - - // Create bucket. - b, err := top.CreateBucketIfNotExists(name) - if err != nil { - return err - } - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - var key = make([]byte, options.KeySize) - var value = make([]byte, options.ValueSize) - - // Generate key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert value into subbucket. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -// Reads from the database. -func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for reads. - if options.ProfileMode == "r" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.ReadMode { - case "seq": - switch options.WriteMode { - case "seq-nest", "rnd-nest": - err = cmd.runReadsSequentialNested(db, options, results) - default: - err = cmd.runReadsSequential(db, options, results) - } - default: - return fmt.Errorf("invalid read mode: %s", options.ReadMode) - } - - // Save read time. - results.ReadDuration = time.Since(t) - - // Stop profiling for reads. - if options.ProfileMode == "rw" || options.ProfileMode == "r" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - - c := tx.Bucket(benchBucketName).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return errors.New("invalid value") - } - count++ - } - - if options.WriteMode == "seq" && count != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - var top = tx.Bucket(benchBucketName) - if err := top.ForEach(func(name, _ []byte) error { - c := top.Bucket(name).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return ErrInvalidValue - } - count++ - } - return nil - }); err != nil { - return err - } - - if options.WriteMode == "seq-nest" && count != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -// File handlers for the various profiles. -var cpuprofile, memprofile, blockprofile *os.File - -// Starts all profiles set on the options. -func (cmd *BenchCommand) startProfiling(options *BenchOptions) { - var err error - - // Start CPU profiling. - if options.CPUProfile != "" { - cpuprofile, err = os.Create(options.CPUProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) - os.Exit(1) - } - pprof.StartCPUProfile(cpuprofile) - } - - // Start memory profiling. - if options.MemProfile != "" { - memprofile, err = os.Create(options.MemProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) - os.Exit(1) - } - runtime.MemProfileRate = 4096 - } - - // Start fatal profiling. - if options.BlockProfile != "" { - blockprofile, err = os.Create(options.BlockProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) - os.Exit(1) - } - runtime.SetBlockProfileRate(1) - } -} - -// Stops all profiles. -func (cmd *BenchCommand) stopProfiling() { - if cpuprofile != nil { - pprof.StopCPUProfile() - cpuprofile.Close() - cpuprofile = nil - } - - if memprofile != nil { - pprof.Lookup("heap").WriteTo(memprofile, 0) - memprofile.Close() - memprofile = nil - } - - if blockprofile != nil { - pprof.Lookup("block").WriteTo(blockprofile, 0) - blockprofile.Close() - blockprofile = nil - runtime.SetBlockProfileRate(0) - } -} - -// BenchOptions represents the set of options that can be passed to "bolt bench". -type BenchOptions struct { - ProfileMode string - WriteMode string - ReadMode string - Iterations int - BatchSize int - KeySize int - ValueSize int - CPUProfile string - MemProfile string - BlockProfile string - StatsInterval time.Duration - FillPercent float64 - NoSync bool - Work bool - Path string -} - -// BenchResults represents the performance results of the benchmark. -type BenchResults struct { - WriteOps int - WriteDuration time.Duration - ReadOps int - ReadDuration time.Duration -} - -// Returns the duration for a single write operation. -func (r *BenchResults) WriteOpDuration() time.Duration { - if r.WriteOps == 0 { - return 0 - } - return r.WriteDuration / time.Duration(r.WriteOps) -} - -// Returns average number of write operations that can be performed per second. -func (r *BenchResults) WriteOpsPerSecond() int { - var op = r.WriteOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -// Returns the duration for a single read operation. -func (r *BenchResults) ReadOpDuration() time.Duration { - if r.ReadOps == 0 { - return 0 - } - return r.ReadDuration / time.Duration(r.ReadOps) -} - -// Returns average number of read operations that can be performed per second. -func (r *BenchResults) ReadOpsPerSecond() int { - var op = r.ReadOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -type PageError struct { - ID int - Err error -} - -func (e *PageError) Error() string { - return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) -} - -// isPrintable returns true if the string is valid unicode and contains only printable runes. -func isPrintable(s string) bool { - if !utf8.ValidString(s) { - return false - } - for _, ch := range s { - if !unicode.IsPrint(ch) { - return false - } - } - return true -} - -// ReadPage reads page info & full page data from a path. -// This is not transactionally safe. -func ReadPage(path string, pageID int) (*page, []byte, error) { - // Find page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return nil, nil, fmt.Errorf("read page size: %s", err) - } - - // Open database file. - f, err := os.Open(path) - if err != nil { - return nil, nil, err - } - defer f.Close() - - // Read one block into buffer. - buf := make([]byte, pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - - // Determine total number of blocks. - p := (*page)(unsafe.Pointer(&buf[0])) - overflowN := p.overflow - - // Re-read entire page (with overflow) into buffer. - buf = make([]byte, (int(overflowN)+1)*pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - p = (*page)(unsafe.Pointer(&buf[0])) - - return p, buf, nil -} - -// ReadPageSize reads page size a path. -// This is not transactionally safe. -func ReadPageSize(path string) (int, error) { - // Open database file. - f, err := os.Open(path) - if err != nil { - return 0, err - } - defer f.Close() - - // Read 4KB chunk. - buf := make([]byte, 4096) - if _, err := io.ReadFull(f, buf); err != nil { - return 0, err - } - - // Read page size from metadata. - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - return int(m.pageSize), nil -} - -// atois parses a slice of strings into integers. -func atois(strs []string) ([]int, error) { - var a []int - for _, str := range strs { - i, err := strconv.Atoi(str) - if err != nil { - return nil, err - } - a = append(a, i) - } - return a, nil -} - -// DO NOT EDIT. Copied from the "bolt" package. -const maxAllocSize = 0xFFFFFFF - -// DO NOT EDIT. Copied from the "bolt" package. -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -// DO NOT EDIT. Copied from the "bolt" package. -const bucketLeafFlag = 0x01 - -// DO NOT EDIT. Copied from the "bolt" package. -type pgid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type txid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type bucket struct { - root pgid - sequence uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) Type() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go deleted file mode 100644 index b9e8c671f3..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package main_test - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/boltdb/bolt" - "github.com/boltdb/bolt/cmd/bolt" -) - -// Ensure the "info" command can print information about a database. -func TestInfoCommand_Run(t *testing.T) { - db := MustOpen(0666, nil) - db.DB.Close() - defer db.Close() - - // Run the info command. - m := NewMain() - if err := m.Run("info", db.Path); err != nil { - t.Fatal(err) - } -} - -// Ensure the "stats" command can execute correctly. -func TestStatsCommand_Run(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - - if err := db.Update(func(tx *bolt.Tx) error { - // Create "foo" bucket. - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - return err - } - for i := 0; i < 10; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "bar" bucket. - b, err = tx.CreateBucket([]byte("bar")) - if err != nil { - return err - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "baz" bucket. - b, err = tx.CreateBucket([]byte("baz")) - if err != nil { - return err - } - if err := b.Put([]byte("key"), []byte("value")); err != nil { - return err - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 3 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 1\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 111\n" + - "\tNumber of levels in B+tree: 1\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 4096\n" + - "\tBytes actually used for leaf data: 1996 (48%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 3\n" + - "\tTotal number on inlined buckets: 2 (66%)\n" + - "\tBytes used for inlined buckets: 236 (11%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Main represents a test wrapper for main.Main that records output. -type Main struct { - *main.Main - Stdin bytes.Buffer - Stdout bytes.Buffer - Stderr bytes.Buffer -} - -// NewMain returns a new instance of Main. -func NewMain() *Main { - m := &Main{Main: main.NewMain()} - m.Main.Stdin = &m.Stdin - m.Main.Stdout = &m.Stdout - m.Main.Stderr = &m.Stderr - return m -} - -// MustOpen creates a Bolt database in a temporary location. -func MustOpen(mode os.FileMode, options *bolt.Options) *DB { - // Create temporary path. - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - - db, err := bolt.Open(f.Name(), mode, options) - if err != nil { - panic(err.Error()) - } - return &DB{DB: db, Path: f.Name()} -} - -// DB is a test wrapper for bolt.DB. -type DB struct { - *bolt.DB - Path string -} - -// Close closes and removes the database. -func (db *DB) Close() error { - defer os.Remove(db.Path) - return db.DB.Close() -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go index 006c54889e..1be9f35e3e 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go @@ -34,6 +34,13 @@ func (c *Cursor) First() (key []byte, value []byte) { p, n := c.bucket.pageNode(c.bucket.root) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + k, v, flags := c.keyValue() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil @@ -209,28 +216,37 @@ func (c *Cursor) last() { // next moves to the next leaf element and returns the key and value. // If the cursor is at the last leaf element then it stays there and returns nil. func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } } - } - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - return c.keyValue() + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } } // search recursively performs a binary search against a given page/node until it finds a given key. diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go deleted file mode 100644 index b12e1f9153..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go +++ /dev/null @@ -1,511 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "fmt" - "os" - "sort" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a cursor can return a reference to the bucket that created it. -func TestCursor_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - c := b.Cursor() - equals(t, b, c.Bucket()) - return nil - }) -} - -// Ensure that a Tx cursor can seek to the appropriate keys. -func TestCursor_Seek(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, b.Put([]byte("foo"), []byte("0001"))) - ok(t, b.Put([]byte("bar"), []byte("0002"))) - ok(t, b.Put([]byte("baz"), []byte("0003"))) - _, err = b.CreateBucket([]byte("bkt")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - - // Exact match should go to the key. - k, v := c.Seek([]byte("bar")) - equals(t, []byte("bar"), k) - equals(t, []byte("0002"), v) - - // Inexact match should go to the next key. - k, v = c.Seek([]byte("bas")) - equals(t, []byte("baz"), k) - equals(t, []byte("0003"), v) - - // Low key should go to the first key. - k, v = c.Seek([]byte("")) - equals(t, []byte("bar"), k) - equals(t, []byte("0002"), v) - - // High key should return no key. - k, v = c.Seek([]byte("zzz")) - assert(t, k == nil, "") - assert(t, v == nil, "") - - // Buckets should return their key but no value. - k, v = c.Seek([]byte("bkt")) - equals(t, []byte("bkt"), k) - assert(t, v == nil, "") - - return nil - }) -} - -func TestCursor_Delete(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var count = 1000 - - // Insert every other key between 0 and $count. - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - for i := 0; i < count; i += 1 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(i)) - b.Put(k, make([]byte, 100)) - } - b.CreateBucket([]byte("sub")) - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - bound := make([]byte, 8) - binary.BigEndian.PutUint64(bound, uint64(count/2)) - for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { - if err := c.Delete(); err != nil { - return err - } - } - c.Seek([]byte("sub")) - err := c.Delete() - equals(t, err, bolt.ErrIncompatibleValue) - return nil - }) - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - equals(t, b.Stats().KeyN, count/2+1) - return nil - }) -} - -// Ensure that a Tx cursor can seek to the appropriate keys when there are a -// large number of keys. This test also checks that seek will always move -// forward to the next key. -// -// Related: https://github.com/boltdb/bolt/pull/187 -func TestCursor_Seek_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var count = 10000 - - // Insert every other key between 0 and $count. - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - for i := 0; i < count; i += 100 { - for j := i; j < i+100; j += 2 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(j)) - b.Put(k, make([]byte, 100)) - } - } - return nil - }) - - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - for i := 0; i < count; i++ { - seek := make([]byte, 8) - binary.BigEndian.PutUint64(seek, uint64(i)) - - k, _ := c.Seek(seek) - - // The last seek is beyond the end of the the range so - // it should return nil. - if i == count-1 { - assert(t, k == nil, "") - continue - } - - // Otherwise we should seek to the exact key or the next key. - num := binary.BigEndian.Uint64(k) - if i%2 == 0 { - equals(t, uint64(i), num) - } else { - equals(t, uint64(i+1), num) - } - } - - return nil - }) -} - -// Ensure that a cursor can iterate over an empty bucket without error. -func TestCursor_EmptyBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.First() - assert(t, k == nil, "") - assert(t, v == nil, "") - return nil - }) -} - -// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. -func TestCursor_EmptyBucketReverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.Last() - assert(t, k == nil, "") - assert(t, v == nil, "") - return nil - }) -} - -// Ensure that a Tx cursor can iterate over a single root with a couple elements. -func TestCursor_Iterate_Leaf(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) - return nil - }) - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, v := c.First() - equals(t, string(k), "bar") - equals(t, v, []byte{1}) - - k, v = c.Next() - equals(t, string(k), "baz") - equals(t, v, []byte{}) - - k, v = c.Next() - equals(t, string(k), "foo") - equals(t, v, []byte{0}) - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - tx.Rollback() -} - -// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. -func TestCursor_LeafRootReverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) - return nil - }) - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, v := c.Last() - equals(t, string(k), "foo") - equals(t, v, []byte{0}) - - k, v = c.Prev() - equals(t, string(k), "baz") - equals(t, v, []byte{}) - - k, v = c.Prev() - equals(t, string(k), "bar") - equals(t, v, []byte{1}) - - k, v = c.Prev() - assert(t, k == nil, "") - assert(t, v == nil, "") - - k, v = c.Prev() - assert(t, k == nil, "") - assert(t, v == nil, "") - - tx.Rollback() -} - -// Ensure that a Tx cursor can restart from the beginning. -func TestCursor_Restart(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{}) - return nil - }) - - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, _ := c.First() - equals(t, string(k), "bar") - - k, _ = c.Next() - equals(t, string(k), "foo") - - k, _ = c.First() - equals(t, string(k), "bar") - - k, _ = c.Next() - equals(t, string(k), "foo") - - tx.Rollback() -} - -// Ensure that a Tx can iterate over all elements in a bucket. -func TestCursor_QuickCheck(t *testing.T) { - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - ok(t, tx.Commit()) - - // Sort test data. - sort.Sort(items) - - // Iterate over all items and check consistency. - var index = 0 - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - equals(t, k, items[index].Key) - equals(t, v, items[index].Value) - index++ - } - equals(t, len(items), index) - tx.Rollback() - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can iterate over all elements in a bucket in reverse. -func TestCursor_QuickCheck_Reverse(t *testing.T) { - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - ok(t, tx.Commit()) - - // Sort test data. - sort.Sort(revtestdata(items)) - - // Iterate over all items and check consistency. - var index = 0 - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - equals(t, k, items[index].Key) - equals(t, v, items[index].Value) - index++ - } - equals(t, len(items), index) - tx.Rollback() - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a Tx cursor can iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - _, err = b.CreateBucket([]byte("bar")) - ok(t, err) - _, err = b.CreateBucket([]byte("baz")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - names = append(names, string(k)) - assert(t, v == nil, "") - } - equals(t, names, []string{"bar", "baz", "foo"}) - return nil - }) -} - -// Ensure that a Tx cursor can reverse iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - _, err = b.CreateBucket([]byte("bar")) - ok(t, err) - _, err = b.CreateBucket([]byte("baz")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil; k, v = c.Prev() { - names = append(names, string(k)) - assert(t, v == nil, "") - } - equals(t, names, []string{"foo", "baz", "bar"}) - return nil - }) -} - -func ExampleCursor() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a read-write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - tx.CreateBucket([]byte("animals")) - - // Insert data into a bucket. - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in sorted key order. This starts from the - // first key/value pair and updates the k/v variables to the - // next key/value on each iteration. - // - // The loop finishes at the end of the cursor when a nil key is returned. - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }) - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} - -func ExampleCursor_reverse() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a read-write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - tx.CreateBucket([]byte("animals")) - - // Insert data into a bucket. - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in reverse sorted key order. This starts - // from the last key/value pair and updates the k/v variables to - // the previous key/value on each iteration. - // - // The loop finishes at the beginning of the cursor when a nil key - // is returned. - for k, v := c.Last(); k != nil; k, v = c.Prev() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }) - - // Output: - // A liger is awesome. - // A dog is fun. - // A cat is lame. -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/db.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/db.go index d39c4aa9cc..501d36aac2 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/db.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/db.go @@ -1,8 +1,10 @@ package bolt import ( + "errors" "fmt" "hash/fnv" + "log" "os" "runtime" "runtime/debug" @@ -24,13 +26,14 @@ const magic uint32 = 0xED0CDAED // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronzied using the msync(2) syscall. +// must be synchronized using the msync(2) syscall. const IgnoreNoSync = runtime.GOOS == "openbsd" // Default values if not set in a DB instance. const ( DefaultMaxBatchSize int = 1000 DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 ) // DB represents a collection of buckets persisted to a file on disk. @@ -63,6 +66,10 @@ type DB struct { // https://github.com/boltdb/bolt/issues/284 NoGrowSync bool + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + // MaxBatchSize is the maximum size of a batch. Default value is // copied from DefaultMaxBatchSize in Open. // @@ -79,11 +86,18 @@ type DB struct { // Do not change concurrently with calls to Batch. MaxBatchDelay time.Duration + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + path string file *os.File + lockfile *os.File // windows only dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int + filesz int // current on disk file size meta0 *meta meta1 *meta pageSize int @@ -136,10 +150,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { options = DefaultOptions } db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize flag := os.O_RDWR if options.ReadOnly { @@ -162,7 +178,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db.file, !db.readOnly, options.Timeout); err != nil { + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { _ = db.close() return nil, err } @@ -172,7 +188,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { - return nil, fmt.Errorf("stat error: %s", err) + return nil, err } else if info.Size() == 0 { // Initialize new files with meta pages. if err := db.init(); err != nil { @@ -184,14 +200,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if _, err := db.file.ReadAt(buf[:], 0); err == nil { m := db.pageInBuffer(buf[:], 0).meta() if err := m.validate(); err != nil { - return nil, fmt.Errorf("meta0 error: %s", err) + return nil, err } db.pageSize = int(m.pageSize) } } // Memory map the data file. - if err := db.mmap(0); err != nil { + if err := db.mmap(options.InitialMmapSize); err != nil { _ = db.close() return nil, err } @@ -248,10 +264,10 @@ func (db *DB) mmap(minsz int) error { // Validate the meta pages. if err := db.meta0.validate(); err != nil { - return fmt.Errorf("meta0 error: %s", err) + return err } if err := db.meta1.validate(); err != nil { - return fmt.Errorf("meta1 error: %s", err) + return err } return nil @@ -266,7 +282,7 @@ func (db *DB) munmap() error { } // mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 1MB and doubles until it reaches 1GB. +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. // Returns an error if the new mmap size is greater than the max allowed. func (db *DB) mmapSize(size int) (int, error) { // Double the size from 32KB until 1GB. @@ -364,6 +380,10 @@ func (db *DB) Close() error { } func (db *DB) close() error { + if !db.opened { + return nil + } + db.opened = false db.freelist = nil @@ -382,7 +402,9 @@ func (db *DB) close() error { // No need to unlock read-only file. if !db.readOnly { // Unlock the file. - _ = funlock(db.file) + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } } // Close the file descriptor. @@ -401,11 +423,15 @@ func (db *DB) close() error { // will cause the calls to block and be serialized until the current write // transaction finishes. // -// Transactions should not be depedent on one another. Opening a read +// Transactions should not be dependent on one another. Opening a read // transaction and a write transaction in the same goroutine can cause the // writer to deadlock because the database periodically needs to re-mmap itself // as it grows and it cannot do that while a read transaction is open. // +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. func (db *DB) Begin(writable bool) (*Tx, error) { @@ -589,6 +615,136 @@ func (db *DB) View(fn func(*Tx) error) error { return nil } +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + // Sync executes fdatasync() against the database file handle. // // This is not necessary under normal operation, however, if you use NoSync @@ -655,6 +811,38 @@ func (db *DB) allocate(count int) (*page, error) { return p, nil } +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + func (db *DB) IsReadOnly() bool { return db.readOnly } @@ -672,6 +860,19 @@ type Options struct { // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int } // DefaultOptions represent the options used if nil options are passed into Open(). diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go deleted file mode 100644 index dddf22b46f..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go +++ /dev/null @@ -1,903 +0,0 @@ -package bolt_test - -import ( - "encoding/binary" - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "runtime" - "sort" - "strings" - "testing" - "time" - - "github.com/boltdb/bolt" -) - -var statsFlag = flag.Bool("stats", false, "show performance stats") - -// Ensure that opening a database with a bad path returns an error. -func TestOpen_BadPath(t *testing.T) { - db, err := bolt.Open("", 0666, nil) - assert(t, err != nil, "err: %s", err) - assert(t, db == nil, "") -} - -// Ensure that a database can be opened without error. -func TestOpen(t *testing.T) { - path := tempfile() - defer os.Remove(path) - db, err := bolt.Open(path, 0666, nil) - assert(t, db != nil, "") - ok(t, err) - equals(t, db.Path(), path) - ok(t, db.Close()) -} - -// Ensure that opening an already open database file will timeout. -func TestOpen_Timeout(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("timeout not supported on windows") - } - - path := tempfile() - defer os.Remove(path) - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - assert(t, db0 != nil, "") - ok(t, err) - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) - assert(t, db1 == nil, "") - equals(t, bolt.ErrTimeout, err) - assert(t, time.Since(start) > 100*time.Millisecond, "") - - db0.Close() -} - -// Ensure that opening an already open database file will wait until its closed. -func TestOpen_Wait(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("timeout not supported on windows") - } - - path := tempfile() - defer os.Remove(path) - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - assert(t, db0 != nil, "") - ok(t, err) - - // Close it in just a bit. - time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) - assert(t, db1 != nil, "") - ok(t, err) - assert(t, time.Since(start) > 100*time.Millisecond, "") -} - -// Ensure that opening a database does not increase its size. -// https://github.com/boltdb/bolt/issues/291 -func TestOpen_Size(t *testing.T) { - // Open a data file. - db := NewTestDB() - path := db.Path() - defer db.Close() - - // Insert until we get above the minimum 4MB size. - ok(t, db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for i := 0; i < 10000; i++ { - ok(t, b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000))) - } - return nil - })) - - // Close database and grab the size. - db.DB.Close() - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) - ok(t, db0.Close()) - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - if sz != newSz { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that opening a database beyond the max step size does not increase its size. -// https://github.com/boltdb/bolt/issues/303 -func TestOpen_Size_Large(t *testing.T) { - if testing.Short() { - t.Skip("short mode") - } - - // Open a data file. - db := NewTestDB() - path := db.Path() - defer db.Close() - - // Insert until we get above the minimum 4MB size. - var index uint64 - for i := 0; i < 10000; i++ { - ok(t, db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for j := 0; j < 1000; j++ { - ok(t, b.Put(u64tob(index), make([]byte, 50))) - index++ - } - return nil - })) - } - - // Close database and grab the size. - db.DB.Close() - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } else if sz < (1 << 30) { - t.Fatalf("expected larger initial size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) - ok(t, db0.Close()) - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - if sz != newSz { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that a re-opened database is consistent. -func TestOpen_Check(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - db, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) - db.Close() - - db, err = bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) - db.Close() -} - -// Ensure that the database returns an error if the file handle cannot be open. -func TestDB_Open_FileError(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - _, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil) - assert(t, err.(*os.PathError) != nil, "") - equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path) - equals(t, "open", err.(*os.PathError).Op) -} - -// Ensure that write errors to the meta file handler during initialization are returned. -func TestDB_Open_MetaInitWriteError(t *testing.T) { - t.Skip("pending") -} - -// Ensure that a database that is too small returns an error. -func TestDB_Open_FileTooSmall(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - db, err := bolt.Open(path, 0666, nil) - ok(t, err) - db.Close() - - // corrupt the database - ok(t, os.Truncate(path, int64(os.Getpagesize()))) - - db, err = bolt.Open(path, 0666, nil) - equals(t, errors.New("file size too small"), err) -} - -// Ensure that a database can be opened in read-only mode by multiple processes -// and that a database can not be opened in read-write mode and in read-only -// mode at the same time. -func TestOpen_ReadOnly(t *testing.T) { - bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`) - - path := tempfile() - defer os.Remove(path) - - // Open in read-write mode. - db, err := bolt.Open(path, 0666, nil) - ok(t, db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(bucket) - if err != nil { - return err - } - return b.Put(key, value) - })) - assert(t, db != nil, "") - assert(t, !db.IsReadOnly(), "") - ok(t, err) - ok(t, db.Close()) - - // Open in read-only mode. - db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - ok(t, err) - defer db0.Close() - - // Opening in read-write mode should return an error. - _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}) - assert(t, err != nil, "") - - // And again (in read-only mode). - db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - ok(t, err) - defer db1.Close() - - // Verify both read-only databases are accessible. - for _, db := range []*bolt.DB{db0, db1} { - // Verify is is in read only mode indeed. - assert(t, db.IsReadOnly(), "") - - // Read-only databases should not allow updates. - assert(t, - bolt.ErrDatabaseReadOnly == db.Update(func(*bolt.Tx) error { - panic(`should never get here`) - }), - "") - - // Read-only databases should not allow beginning writable txns. - _, err = db.Begin(true) - assert(t, bolt.ErrDatabaseReadOnly == err, "") - - // Verify the data. - ok(t, db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(bucket) - if b == nil { - return fmt.Errorf("expected bucket `%s`", string(bucket)) - } - - got := string(b.Get(key)) - expected := string(value) - if got != expected { - return fmt.Errorf("expected `%s`, got `%s`", expected, got) - } - return nil - })) - } -} - -// TODO(benbjohnson): Test corruption at every byte of the first two pages. - -// Ensure that a database cannot open a transaction when it's not open. -func TestDB_Begin_DatabaseNotOpen(t *testing.T) { - var db bolt.DB - tx, err := db.Begin(false) - assert(t, tx == nil, "") - equals(t, err, bolt.ErrDatabaseNotOpen) -} - -// Ensure that a read-write transaction can be retrieved. -func TestDB_BeginRW(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, err := db.Begin(true) - assert(t, tx != nil, "") - ok(t, err) - assert(t, tx.DB() == db.DB, "") - equals(t, tx.Writable(), true) - ok(t, tx.Commit()) -} - -// Ensure that opening a transaction while the DB is closed returns an error. -func TestDB_BeginRW_Closed(t *testing.T) { - var db bolt.DB - tx, err := db.Begin(true) - equals(t, err, bolt.ErrDatabaseNotOpen) - assert(t, tx == nil, "") -} - -func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } -func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } - -// Ensure that a database cannot close while transactions are open. -func testDB_Close_PendingTx(t *testing.T, writable bool) { - db := NewTestDB() - defer db.Close() - - // Start transaction. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - // Open update in separate goroutine. - done := make(chan struct{}) - go func() { - db.Close() - close(done) - }() - - // Ensure database hasn't closed. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - t.Fatal("database closed too early") - default: - } - - // Commit transaction. - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Ensure database closed now. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - default: - t.Fatal("database did not close") - } -} - -// Ensure a database can provide a transactional block. -func TestDB_Update(t *testing.T) { - db := NewTestDB() - defer db.Close() - err := db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - b.Put([]byte("baz"), []byte("bat")) - b.Delete([]byte("foo")) - return nil - }) - ok(t, err) - err = db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) - ok(t, err) -} - -// Ensure a closed database returns an error while running a transaction block -func TestDB_Update_Closed(t *testing.T) { - var db bolt.DB - err := db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - equals(t, err, bolt.ErrDatabaseNotOpen) -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_Update_ManualCommit(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Commit() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_Update_ManualRollback(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Rollback() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_View_ManualCommit(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Commit() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_View_ManualRollback(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Rollback() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a write transaction that panics does not hold open locks. -func TestDB_Update_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: update", r) - } - }() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - panic("omg") - }) - }() - - // Verify we can update again. - err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - ok(t, err) - - // Verify that our change persisted. - err = db.Update(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - return nil - }) -} - -// Ensure a database can return an error through a read-only transactional block. -func TestDB_View_Error(t *testing.T) { - db := NewTestDB() - defer db.Close() - err := db.View(func(tx *bolt.Tx) error { - return errors.New("xxx") - }) - equals(t, errors.New("xxx"), err) -} - -// Ensure a read transaction that panics does not hold open locks. -func TestDB_View_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: view", r) - } - }() - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - panic("omg") - }) - }() - - // Verify that we can still use read transactions. - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - return nil - }) -} - -// Ensure that an error is returned when a database write fails. -func TestDB_Commit_WriteFail(t *testing.T) { - t.Skip("pending") // TODO(benbjohnson) -} - -// Ensure that DB stats can be returned. -func TestDB_Stats(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - stats := db.Stats() - equals(t, 2, stats.TxStats.PageCount) - equals(t, 0, stats.FreePageN) - equals(t, 2, stats.PendingPageN) -} - -// Ensure that database pages are in expected order and type. -func TestDB_Consistency(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - for i := 0; i < 10; i++ { - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - return nil - }) - } - db.Update(func(tx *bolt.Tx) error { - p, _ := tx.Page(0) - assert(t, p != nil, "") - equals(t, "meta", p.Type) - - p, _ = tx.Page(1) - assert(t, p != nil, "") - equals(t, "meta", p.Type) - - p, _ = tx.Page(2) - assert(t, p != nil, "") - equals(t, "free", p.Type) - - p, _ = tx.Page(3) - assert(t, p != nil, "") - equals(t, "free", p.Type) - - p, _ = tx.Page(4) - assert(t, p != nil, "") - equals(t, "leaf", p.Type) - - p, _ = tx.Page(5) - assert(t, p != nil, "") - equals(t, "freelist", p.Type) - - p, _ = tx.Page(6) - assert(t, p == nil, "") - return nil - }) -} - -// Ensure that DB stats can be substracted from one another. -func TestDBStats_Sub(t *testing.T) { - var a, b bolt.Stats - a.TxStats.PageCount = 3 - a.FreePageN = 4 - b.TxStats.PageCount = 10 - b.FreePageN = 14 - diff := b.Sub(&a) - equals(t, 7, diff.TxStats.PageCount) - // free page stats are copied from the receiver and not subtracted - equals(t, 14, diff.FreePageN) -} - -func ExampleDB_Update() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Execute several commands within a write transaction. - err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }) - - // If our transactional block didn't return an error then our data is saved. - if err == nil { - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }) - } - - // Output: - // The value of 'foo' is: bar -} - -func ExampleDB_View() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Insert data into a bucket. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("people")) - b := tx.Bucket([]byte("people")) - b.Put([]byte("john"), []byte("doe")) - b.Put([]byte("susy"), []byte("que")) - return nil - }) - - // Access data from within a read-only transactional block. - db.View(func(tx *bolt.Tx) error { - v := tx.Bucket([]byte("people")).Get([]byte("john")) - fmt.Printf("John's last name is %s.\n", v) - return nil - }) - - // Output: - // John's last name is doe. -} - -func ExampleDB_Begin_ReadOnly() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - // Create several keys in a transaction. - tx, _ := db.Begin(true) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("john"), []byte("blue")) - b.Put([]byte("abby"), []byte("red")) - b.Put([]byte("zephyr"), []byte("purple")) - tx.Commit() - - // Iterate over the values in sorted key order. - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("%s likes %s\n", k, v) - } - tx.Rollback() - - // Output: - // abby likes red - // john likes blue - // zephyr likes purple -} - -// TestDB represents a wrapper around a Bolt DB to handle temporary file -// creation and automatic cleanup on close. -type TestDB struct { - *bolt.DB -} - -// NewTestDB returns a new instance of TestDB. -func NewTestDB() *TestDB { - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - panic("cannot open db: " + err.Error()) - } - return &TestDB{db} -} - -// MustView executes a read-only function. Panic on error. -func (db *TestDB) MustView(fn func(tx *bolt.Tx) error) { - if err := db.DB.View(func(tx *bolt.Tx) error { - return fn(tx) - }); err != nil { - panic(err.Error()) - } -} - -// MustUpdate executes a read-write function. Panic on error. -func (db *TestDB) MustUpdate(fn func(tx *bolt.Tx) error) { - if err := db.DB.View(func(tx *bolt.Tx) error { - return fn(tx) - }); err != nil { - panic(err.Error()) - } -} - -// MustCreateBucket creates a new bucket. Panic on error. -func (db *TestDB) MustCreateBucket(name []byte) { - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte(name)) - return err - }); err != nil { - panic(err.Error()) - } -} - -// Close closes the database and deletes the underlying file. -func (db *TestDB) Close() { - // Log statistics. - if *statsFlag { - db.PrintStats() - } - - // Check database consistency after every test. - db.MustCheck() - - // Close database and remove file. - defer os.Remove(db.Path()) - db.DB.Close() -} - -// PrintStats prints the database stats -func (db *TestDB) PrintStats() { - var stats = db.Stats() - fmt.Printf("[db] %-20s %-20s %-20s\n", - fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), - fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), - fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), - ) - fmt.Printf(" %-20s %-20s %-20s\n", - fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), - fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), - fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), - ) -} - -// MustCheck runs a consistency check on the database and panics if any errors are found. -func (db *TestDB) MustCheck() { - db.Update(func(tx *bolt.Tx) error { - // Collect all the errors. - var errors []error - for err := range tx.Check() { - errors = append(errors, err) - if len(errors) > 10 { - break - } - } - - // If errors occurred, copy the DB and print the errors. - if len(errors) > 0 { - var path = tempfile() - tx.CopyFile(path, 0600) - - // Print errors. - fmt.Print("\n\n") - fmt.Printf("consistency check failed (%d errors)\n", len(errors)) - for _, err := range errors { - fmt.Println(err) - } - fmt.Println("") - fmt.Println("db saved to:") - fmt.Println(path) - fmt.Print("\n\n") - os.Exit(-1) - } - - return nil - }) -} - -// CopyTempFile copies a database to a temporary file. -func (db *TestDB) CopyTempFile() { - path := tempfile() - db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) }) - fmt.Println("db copied to: ", path) -} - -// tempfile returns a temporary file path. -func tempfile() string { - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - return f.Name() -} - -// mustContainKeys checks that a bucket contains a given set of keys. -func mustContainKeys(b *bolt.Bucket, m map[string]string) { - found := make(map[string]string) - b.ForEach(func(k, _ []byte) error { - found[string(k)] = "" - return nil - }) - - // Check for keys found in bucket that shouldn't be there. - var keys []string - for k, _ := range found { - if _, ok := m[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) - } - - // Check for keys not found in bucket that should be there. - for k, _ := range m { - if _, ok := found[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) - } -} - -func trunc(b []byte, length int) []byte { - if length < len(b) { - return b[:length] - } - return b -} - -func truncDuration(d time.Duration) string { - return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") -} - -func fileSize(path string) int64 { - fi, err := os.Stat(path) - if err != nil { - return 0 - } - return fi.Size() -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go deleted file mode 100644 index 8caeab2ec4..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package bolt - -import ( - "math/rand" - "reflect" - "sort" - "testing" - "unsafe" -) - -// Ensure that a page is added to a transaction's freelist. -func TestFreelist_free(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12}) - if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) - } -} - -// Ensure that a page and its overflow is added to a transaction's freelist. -func TestFreelist_free_overflow(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 3}) - if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) - } -} - -// Ensure that a transaction's free pages can be released. -func TestFreelist_release(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 1}) - f.free(100, &page{id: 9}) - f.free(102, &page{id: 39}) - f.release(100) - f.release(101) - if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - f.release(102) - if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can find contiguous blocks of pages. -func TestFreelist_allocate(t *testing.T) { - f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} - if id := int(f.allocate(3)); id != 3 { - t.Fatalf("exp=3; got=%v", id) - } - if id := int(f.allocate(1)); id != 6 { - t.Fatalf("exp=6; got=%v", id) - } - if id := int(f.allocate(3)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(2)); id != 12 { - t.Fatalf("exp=12; got=%v", id) - } - if id := int(f.allocate(1)); id != 7 { - t.Fatalf("exp=7; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - if id := int(f.allocate(1)); id != 9 { - t.Fatalf("exp=9; got=%v", id) - } - if id := int(f.allocate(1)); id != 18 { - t.Fatalf("exp=18; got=%v", id) - } - if id := int(f.allocate(1)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can deserialize from a freelist page. -func TestFreelist_read(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = freelistPageFlag - page.count = 2 - - // Insert 2 page ids. - ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) - ids[0] = 23 - ids[1] = 50 - - // Deserialize page into a freelist. - f := newFreelist() - f.read(page) - - // Ensure that there are two page ids in the freelist. - if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can serialize into a freelist page. -func TestFreelist_write(t *testing.T) { - // Create a freelist and write it to a page. - var buf [4096]byte - f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} - f.pending[100] = []pgid{28, 11} - f.pending[101] = []pgid{3} - p := (*page)(unsafe.Pointer(&buf[0])) - f.write(p) - - // Read the page back out. - f2 := newFreelist() - f2.read(p) - - // Ensure that the freelist is correct. - // All pages should be present and in reverse order. - if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { - t.Fatalf("exp=%v; got=%v", exp, f2.ids) - } -} - -func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } -func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } -func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } -func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } - -func benchmark_FreelistRelease(b *testing.B, size int) { - ids := randomPgids(size) - pending := randomPgids(len(ids) / 400) - b.ResetTimer() - for i := 0; i < b.N; i++ { - f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} - f.release(1) - } -} - -func randomPgids(n int) []pgid { - rand.Seed(42) - pgids := make(pgids, n) - for i := range pgids { - pgids[i] = pgid(rand.Int63()) - } - sort.Sort(pgids) - return pgids -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/node.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/node.go index c9fb21c731..e9d64af81e 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/node.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/node.go @@ -463,43 +463,6 @@ func (n *node) rebalance() { target = n.prevSibling() } - // If target node has extra nodes then just move one over. - if target.numChildren() > target.minKeys() { - if useNextSibling { - // Reparent and move node. - if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - n.inodes = append(n.inodes, target.inodes[0]) - target.inodes = target.inodes[1:] - - // Update target key on parent. - target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0) - target.key = target.inodes[0].key - _assert(len(target.key) > 0, "rebalance(1): zero-length node key") - } else { - // Reparent and move node. - if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[1:], n.inodes) - n.inodes[0] = target.inodes[len(target.inodes)-1] - target.inodes = target.inodes[:len(target.inodes)-1] - } - - // Update parent key for node. - n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0) - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "rebalance(2): zero-length node key") - - return - } - // If both this node and the target node are too small then merge them. if useNextSibling { // Reparent all child nodes being moved. diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go deleted file mode 100644 index fa5d10f999..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package bolt - -import ( - "testing" - "unsafe" -) - -// Ensure that a node can insert a key/value. -func TestNode_put(t *testing.T) { - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} - n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) - n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) - - if len(n.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if n.inodes[2].flags != uint32(leafPageFlag) { - t.Fatalf("not a leaf: %d", n.inodes[2].flags) - } -} - -// Ensure that a node can deserialize from a leaf page. -func TestNode_read_LeafPage(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = leafPageFlag - page.count = 2 - - // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 - nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) - nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 - nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 - - // Write data for the nodes at the end. - data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) - copy(data[:], []byte("barfooz")) - copy(data[7:], []byte("helloworldbye")) - - // Deserialize page into a leaf. - n := &node{} - n.read(page) - - // Check that there are two inodes with correct data. - if !n.isLeaf { - t.Fatal("expected leaf") - } - if len(n.inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can serialize into a leaf page. -func TestNode_write_LeafPage(t *testing.T) { - // Create a node. - n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) - n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) - n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) - - // Write it to a page. - var buf [4096]byte - p := (*page)(unsafe.Pointer(&buf[0])) - n.write(p) - - // Read the page back in. - n2 := &node{} - n2.read(p) - - // Check that the two pages are the same. - if len(n2.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n2.inodes)) - } - if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can split into appropriate subgroups. -func TestNode_split(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split between 2 & 3. - n.split(100) - - var parent = n.parent - if len(parent.children) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children)) - } - if len(parent.children[0].inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) - } - if len(parent.children[1].inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) - } -} - -// Ensure that a page with the minimum number of inodes just returns a single node. -func TestNode_split_MinKeys(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(20) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} - -// Ensure that a node that has keys that all fit on a page just returns one leaf. -func TestNode_split_SinglePage(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(4096) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go deleted file mode 100644 index 59f4a30ed8..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package bolt - -import ( - "reflect" - "sort" - "testing" - "testing/quick" -) - -// Ensure that the page type can be returned in human readable format. -func TestPage_typ(t *testing.T) { - if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { - t.Fatalf("exp=branch; got=%v", typ) - } - if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { - t.Fatalf("exp=leaf; got=%v", typ) - } - if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { - t.Fatalf("exp=meta; got=%v", typ) - } - if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { - t.Fatalf("exp=freelist; got=%v", typ) - } - if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { - t.Fatalf("exp=unknown<4e20>; got=%v", typ) - } -} - -// Ensure that the hexdump debugging function doesn't blow up. -func TestPage_dump(t *testing.T) { - (&page{id: 256}).hexdump(16) -} - -func TestPgids_merge(t *testing.T) { - a := pgids{4, 5, 6, 10, 11, 12, 13, 27} - b := pgids{1, 3, 8, 9, 25, 30} - c := a.merge(b) - if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { - t.Errorf("mismatch: %v", c) - } - - a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} - b = pgids{8, 9, 25, 30} - c = a.merge(b) - if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { - t.Errorf("mismatch: %v", c) - } -} - -func TestPgids_merge_quick(t *testing.T) { - if err := quick.Check(func(a, b pgids) bool { - // Sort incoming lists. - sort.Sort(a) - sort.Sort(b) - - // Merge the two lists together. - got := a.merge(b) - - // The expected value should be the two lists combined and sorted. - exp := append(a, b...) - sort.Sort(exp) - - if !reflect.DeepEqual(exp, got) { - t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) - return false - } - - return true - }, nil); err != nil { - t.Fatal(err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go deleted file mode 100644 index 4da581775a..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package bolt_test - -import ( - "bytes" - "flag" - "fmt" - "math/rand" - "os" - "reflect" - "testing/quick" - "time" -) - -// testing/quick defaults to 5 iterations and a random seed. -// You can override these settings from the command line: -// -// -quick.count The number of iterations to perform. -// -quick.seed The seed to use for randomizing. -// -quick.maxitems The maximum number of items to insert into a DB. -// -quick.maxksize The maximum size of a key. -// -quick.maxvsize The maximum size of a value. -// - -var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int - -func init() { - flag.IntVar(&qcount, "quick.count", 5, "") - flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") - flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") - flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") - flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") - flag.Parse() - fmt.Fprintln(os.Stderr, "seed:", qseed) - fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) -} - -func qconfig() *quick.Config { - return &quick.Config{ - MaxCount: qcount, - Rand: rand.New(rand.NewSource(int64(qseed))), - } -} - -type testdata []testdataitem - -func (t testdata) Len() int { return len(t) } -func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } - -func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { - n := rand.Intn(qmaxitems-1) + 1 - items := make(testdata, n) - for i := 0; i < n; i++ { - item := &items[i] - item.Key = randByteSlice(rand, 1, qmaxksize) - item.Value = randByteSlice(rand, 0, qmaxvsize) - } - return reflect.ValueOf(items) -} - -type revtestdata []testdataitem - -func (t revtestdata) Len() int { return len(t) } -func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } - -type testdataitem struct { - Key []byte - Value []byte -} - -func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { - n := rand.Intn(maxSize-minSize) + minSize - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go deleted file mode 100644 index ceb8baef0c..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go +++ /dev/null @@ -1,327 +0,0 @@ -package bolt_test - -import ( - "bytes" - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } -func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } -func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } - -func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } -func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } -func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } -func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } - -func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } -func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } -func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } - -func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } - -// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. -func testSimulate(t *testing.T, threadCount, parallelism int) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - rand.Seed(int64(qseed)) - - // A list of operations that readers and writers can perform. - var readerHandlers = []simulateHandler{simulateGetHandler} - var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} - - var versions = make(map[int]*QuickDB) - versions[1] = NewQuickDB() - - db := NewTestDB() - defer db.Close() - - var mutex sync.Mutex - - // Run n threads in parallel, each with their own operation. - var wg sync.WaitGroup - var threads = make(chan bool, parallelism) - var i int - for { - threads <- true - wg.Add(1) - writable := ((rand.Int() % 100) < 20) // 20% writers - - // Choose an operation to execute. - var handler simulateHandler - if writable { - handler = writerHandlers[rand.Intn(len(writerHandlers))] - } else { - handler = readerHandlers[rand.Intn(len(readerHandlers))] - } - - // Execute a thread for the given operation. - go func(writable bool, handler simulateHandler) { - defer wg.Done() - - // Start transaction. - tx, err := db.Begin(writable) - if err != nil { - t.Fatal("tx begin: ", err) - } - - // Obtain current state of the dataset. - mutex.Lock() - var qdb = versions[tx.ID()] - if writable { - qdb = versions[tx.ID()-1].Copy() - } - mutex.Unlock() - - // Make sure we commit/rollback the tx at the end and update the state. - if writable { - defer func() { - mutex.Lock() - versions[tx.ID()] = qdb - mutex.Unlock() - - ok(t, tx.Commit()) - }() - } else { - defer tx.Rollback() - } - - // Ignore operation if we don't have data yet. - if qdb == nil { - return - } - - // Execute handler. - handler(tx, qdb) - - // Release a thread back to the scheduling loop. - <-threads - }(writable, handler) - - i++ - if i > threadCount { - break - } - } - - // Wait until all threads are done. - wg.Wait() -} - -type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) - -// Retrieves a key from the database and verifies that it is what is expected. -func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { - // Randomly retrieve an existing exist. - keys := qdb.Rand() - if len(keys) == 0 { - return - } - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) - } - - // Drill into nested buckets. - for _, key := range keys[1 : len(keys)-1] { - b = b.Bucket(key) - if b == nil { - panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) - } - } - - // Verify key/value on the final bucket. - expected := qdb.Get(keys) - actual := b.Get(keys[len(keys)-1]) - if !bytes.Equal(actual, expected) { - fmt.Println("=== EXPECTED ===") - fmt.Println(expected) - fmt.Println("=== ACTUAL ===") - fmt.Println(actual) - fmt.Println("=== END ===") - panic("value mismatch") - } -} - -// Inserts a key into the database. -func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { - var err error - keys, value := randKeys(), randValue() - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - b, err = tx.CreateBucket(keys[0]) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - - // Create nested buckets, if necessary. - for _, key := range keys[1 : len(keys)-1] { - child := b.Bucket(key) - if child != nil { - b = child - } else { - b, err = b.CreateBucket(key) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - } - - // Insert into database. - if err := b.Put(keys[len(keys)-1], value); err != nil { - panic("put: " + err.Error()) - } - - // Insert into in-memory database. - qdb.Put(keys, value) -} - -// QuickDB is an in-memory database that replicates the functionality of the -// Bolt DB type except that it is entirely in-memory. It is meant for testing -// that the Bolt database is consistent. -type QuickDB struct { - sync.RWMutex - m map[string]interface{} -} - -// NewQuickDB returns an instance of QuickDB. -func NewQuickDB() *QuickDB { - return &QuickDB{m: make(map[string]interface{})} -} - -// Get retrieves the value at a key path. -func (db *QuickDB) Get(keys [][]byte) []byte { - db.RLock() - defer db.RUnlock() - - m := db.m - for _, key := range keys[:len(keys)-1] { - value := m[string(key)] - if value == nil { - return nil - } - switch value := value.(type) { - case map[string]interface{}: - m = value - case []byte: - return nil - } - } - - // Only return if it's a simple value. - if value, ok := m[string(keys[len(keys)-1])].([]byte); ok { - return value - } - return nil -} - -// Put inserts a value into a key path. -func (db *QuickDB) Put(keys [][]byte, value []byte) { - db.Lock() - defer db.Unlock() - - // Build buckets all the way down the key path. - m := db.m - for _, key := range keys[:len(keys)-1] { - if _, ok := m[string(key)].([]byte); ok { - return // Keypath intersects with a simple value. Do nothing. - } - - if m[string(key)] == nil { - m[string(key)] = make(map[string]interface{}) - } - m = m[string(key)].(map[string]interface{}) - } - - // Insert value into the last key. - m[string(keys[len(keys)-1])] = value -} - -// Rand returns a random key path that points to a simple value. -func (db *QuickDB) Rand() [][]byte { - db.RLock() - defer db.RUnlock() - if len(db.m) == 0 { - return nil - } - var keys [][]byte - db.rand(db.m, &keys) - return keys -} - -func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { - i, index := 0, rand.Intn(len(m)) - for k, v := range m { - if i == index { - *keys = append(*keys, []byte(k)) - if v, ok := v.(map[string]interface{}); ok { - db.rand(v, keys) - } - return - } - i++ - } - panic("quickdb rand: out-of-range") -} - -// Copy copies the entire database. -func (db *QuickDB) Copy() *QuickDB { - db.RLock() - defer db.RUnlock() - return &QuickDB{m: db.copy(db.m)} -} - -func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { - clone := make(map[string]interface{}, len(m)) - for k, v := range m { - switch v := v.(type) { - case map[string]interface{}: - clone[k] = db.copy(v) - default: - clone[k] = v - } - } - return clone -} - -func randKey() []byte { - var min, max = 1, 1024 - n := rand.Intn(max-min) + min - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} - -func randKeys() [][]byte { - var keys [][]byte - var count = rand.Intn(2) + 2 - for i := 0; i < count; i++ { - keys = append(keys, randKey()) - } - return keys -} - -func randValue() []byte { - n := rand.Intn(8192) - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go index 6b52b2c896..b8510fdb87 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go +++ b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go @@ -5,6 +5,7 @@ import ( "io" "os" "sort" + "strings" "time" "unsafe" ) @@ -29,6 +30,14 @@ type Tx struct { pages map[pgid]*page stats TxStats commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int } // init initializes the transaction. @@ -87,18 +96,21 @@ func (tx *Tx) Stats() TxStats { // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) Bucket(name []byte) *Bucket { return tx.root.Bucket(name) } // CreateBucket creates a new bucket. // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { return tx.root.CreateBucket(name) } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist. // Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { return tx.root.CreateBucketIfNotExists(name) } @@ -157,6 +169,8 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root + opgid := tx.meta.pgid + // Free the freelist and allocate new pages for it. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) @@ -171,6 +185,14 @@ func (tx *Tx) Commit() error { } tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + // Write dirty pages to disk. startTime = time.Now() if err := tx.write(); err != nil { @@ -181,8 +203,17 @@ func (tx *Tx) Commit() error { // If strict mode is enabled then perform a consistency check. // Only the first consistency error is reported in the panic. if tx.db.StrictMode { - if err, ok := <-tx.Check(); ok { - panic("check fail: " + err.Error()) + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) } } @@ -236,7 +267,8 @@ func (tx *Tx) close() { var freelistPendingN = tx.db.freelist.pending_count() var freelistAlloc = tx.db.freelist.size() - // Remove writer lock. + // Remove transaction ref & writer lock. + tx.db.rwtx = nil tx.db.rwlock.Unlock() // Merge statistics. @@ -250,11 +282,16 @@ func (tx *Tx) close() { } else { tx.db.removeTx(tx) } + + // Clear all references. tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil } // Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() in +// This function exists for backwards compatibility. Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -263,29 +300,47 @@ func (tx *Tx) Copy(w io.Writer) error { // WriteTo writes the entire database to a writer. // If err == nil then exactly tx.Size() bytes will be written into the writer. func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader directly. - var f *os.File - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { - // Fallback to a regular open if that doesn't work. - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil { - return 0, err - } + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) } - // Copy the meta pages. - tx.db.metalock.Lock() - n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) - tx.db.metalock.Unlock() + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) if err != nil { - _ = f.Close() - return n, fmt.Errorf("meta copy: %s", err) + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) } // Copy data pages. wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) n += wn if err != nil { - _ = f.Close() return n, err } @@ -492,7 +547,7 @@ func (tx *Tx) writeMeta() error { } // page returns a reference to the page with a given id. -// If page has been written to then a temporary bufferred page is returned. +// If page has been written to then a temporary buffered page is returned. func (tx *Tx) page(id pgid) *page { // Check the dirty pages first. if tx.pages != nil { diff --git a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go b/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go deleted file mode 100644 index 6c8271a608..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go +++ /dev/null @@ -1,456 +0,0 @@ -package bolt_test - -import ( - "errors" - "fmt" - "os" - "testing" - - "github.com/boltdb/bolt" -) - -// Ensure that committing a closed transaction returns an error. -func TestTx_Commit_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("foo")) - ok(t, tx.Commit()) - equals(t, tx.Commit(), bolt.ErrTxClosed) -} - -// Ensure that rolling back a closed transaction returns an error. -func TestTx_Rollback_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - ok(t, tx.Rollback()) - equals(t, tx.Rollback(), bolt.ErrTxClosed) -} - -// Ensure that committing a read-only transaction returns an error. -func TestTx_Commit_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(false) - equals(t, tx.Commit(), bolt.ErrTxNotWritable) -} - -// Ensure that a transaction can retrieve a cursor on the root bucket. -func TestTx_Cursor(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) - c := tx.Cursor() - - k, v := c.First() - equals(t, "widgets", string(k)) - assert(t, v == nil, "") - - k, v = c.Next() - equals(t, "woojits", string(k)) - assert(t, v == nil, "") - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - return nil - }) -} - -// Ensure that creating a bucket with a read-only transaction returns an error. -func TestTx_CreateBucket_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.View(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - assert(t, b == nil, "") - equals(t, bolt.ErrTxNotWritable, err) - return nil - }) -} - -// Ensure that creating a bucket on a closed transaction returns an error. -func TestTx_CreateBucket_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.Commit() - b, err := tx.CreateBucket([]byte("foo")) - assert(t, b == nil, "") - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure that a Tx can retrieve a bucket. -func TestTx_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a Tx retrieving a non-existent key returns nil. -func TestTx_Get_Missing(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that a bucket can be created and retrieved. -func TestTx_CreateBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - return nil - }) - - // Read the bucket through a separate transaction. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a bucket can be created if it doesn't already exist. -func TestTx_CreateBucketIfNotExists(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - - b, err = tx.CreateBucketIfNotExists([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - - b, err = tx.CreateBucketIfNotExists([]byte{}) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - - b, err = tx.CreateBucketIfNotExists(nil) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - return nil - }) - - // Read the bucket through a separate transaction. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a bucket cannot be created twice. -func TestTx_CreateBucket_Exists(t *testing.T) { - db := NewTestDB() - defer db.Close() - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - return nil - }) - - // Create the same bucket again. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketExists, err) - return nil - }) -} - -// Ensure that a bucket is created with a non-blank name. -func TestTx_CreateBucket_NameRequired(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(nil) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - return nil - }) -} - -// Ensure that a bucket can be deleted. -func TestTx_DeleteBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Create a bucket and add a value. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Delete the bucket and make sure we can't get the value. - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.DeleteBucket([]byte("widgets"))) - assert(t, tx.Bucket([]byte("widgets")) == nil, "") - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - // Create the bucket again and make sure there's not a phantom value. - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that deleting a bucket on a closed transaction returns an error. -func TestTx_DeleteBucket_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.Commit() - equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) -} - -// Ensure that deleting a bucket with a read-only transaction returns an error. -func TestTx_DeleteBucket_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.View(func(tx *bolt.Tx) error { - equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that nothing happens when deleting a bucket that doesn't exist. -func TestTx_DeleteBucket_NotFound(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) - return nil - }) -} - -// Ensure that no error is returned when a tx.ForEach function does not return -// an error. -func TestTx_ForEach_NoError(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - - equals(t, nil, tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return nil - })) - return nil - }) -} - -// Ensure that an error is returned when a tx.ForEach function returns an error. -func TestTx_ForEach_WithError(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - - err := errors.New("foo") - equals(t, err, tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return err - })) - return nil - }) -} - -// Ensure that Tx commit handlers are called after a transaction successfully commits. -func TestTx_OnCommit(t *testing.T) { - var x int - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - equals(t, 3, x) -} - -// Ensure that Tx commit handlers are NOT called after a transaction rolls back. -func TestTx_OnCommit_Rollback(t *testing.T) { - var x int - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - tx.CreateBucket([]byte("widgets")) - return errors.New("rollback this commit") - }) - equals(t, 0, x) -} - -// Ensure that the database can be copied to a file path. -func TestTx_CopyFile(t *testing.T) { - db := NewTestDB() - defer db.Close() - var dest = tempfile() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) - - db2, err := bolt.Open(dest, 0600, nil) - ok(t, err) - defer db2.Close() - - db2.View(func(tx *bolt.Tx) error { - equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) -} - -type failWriterError struct{} - -func (failWriterError) Error() string { - return "error injected for tests" -} - -type failWriter struct { - // fail after this many bytes - After int -} - -func (f *failWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > f.After { - n = f.After - err = failWriterError{} - } - f.After -= n - return n, err -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Meta(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) }) - equals(t, err.Error(), "meta copy: error injected for tests") -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Normal(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) }) - equals(t, err.Error(), "error injected for tests") -} - -func ExampleTx_Rollback() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - // Set a value for a key. - db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - }) - - // Update the key but rollback the transaction so it never saves. - tx, _ := db.Begin(true) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("baz")) - tx.Rollback() - - // Ensure that our original value is still set. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' is still: %s\n", value) - return nil - }) - - // Output: - // The value for 'foo' is still: bar -} - -func ExampleTx_CopyFile() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket and a key. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Copy the database to another file. - toFile := tempfile() - db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) }) - defer os.Remove(toFile) - - // Open the cloned database. - db2, _ := bolt.Open(toFile, 0666, nil) - defer db2.Close() - - // Ensure that the key exists in the copy. - db2.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' in the clone is: %s\n", value) - return nil - }) - - // Output: - // The value for 'foo' in the clone is: bar -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go deleted file mode 100644 index 4c6787aebd..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go +++ /dev/null @@ -1,867 +0,0 @@ -package cli - -import ( - "bytes" - "flag" - "fmt" - "io" - "os" - "strings" - "testing" -) - -func ExampleApp() { - // set args for examples sake - os.Args = []string{"greet", "--name", "Jeremy"} - - app := NewApp() - app.Name = "greet" - app.Flags = []Flag{ - StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Action = func(c *Context) { - fmt.Printf("Hello %v\n", c.String("name")) - } - app.Author = "Harrison" - app.Email = "harrison@lolwut.com" - app.Authors = []Author{Author{Name: "Oliver Allen", Email: "oliver@toyshop.com"}} - app.Run(os.Args) - // Output: - // Hello Jeremy -} - -func ExampleAppSubcommand() { - // set args for examples sake - os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} - app := NewApp() - app.Name = "say" - app.Commands = []Command{ - { - Name: "hello", - Aliases: []string{"hi"}, - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []Command{ - { - Name: "english", - Aliases: []string{"en"}, - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []Flag{ - StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *Context) { - fmt.Println("Hello,", c.String("name")) - }, - }, - }, - }, - } - - app.Run(os.Args) - // Output: - // Hello, Jeremy -} - -func ExampleAppHelp() { - // set args for examples sake - os.Args = []string{"greet", "h", "describeit"} - - app := NewApp() - app.Name = "greet" - app.Flags = []Flag{ - StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Commands = []Command{ - { - Name: "describeit", - Aliases: []string{"d"}, - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *Context) { - fmt.Printf("i like to describe things") - }, - }, - } - app.Run(os.Args) - // Output: - // NAME: - // describeit - use it to see a description - // - // USAGE: - // command describeit [arguments...] - // - // DESCRIPTION: - // This is how we describe describeit the function -} - -func ExampleAppBashComplete() { - // set args for examples sake - os.Args = []string{"greet", "--generate-bash-completion"} - - app := NewApp() - app.Name = "greet" - app.EnableBashCompletion = true - app.Commands = []Command{ - { - Name: "describeit", - Aliases: []string{"d"}, - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *Context) { - fmt.Printf("i like to describe things") - }, - }, { - Name: "next", - Usage: "next example", - Description: "more stuff to see when generating bash completion", - Action: func(c *Context) { - fmt.Printf("the next example") - }, - }, - } - - app.Run(os.Args) - // Output: - // describeit - // d - // next - // help - // h -} - -func TestApp_Run(t *testing.T) { - s := "" - - app := NewApp() - app.Action = func(c *Context) { - s = s + c.Args().First() - } - - err := app.Run([]string{"command", "foo"}) - expect(t, err, nil) - err = app.Run([]string{"command", "bar"}) - expect(t, err, nil) - expect(t, s, "foobar") -} - -var commandAppTests = []struct { - name string - expected bool -}{ - {"foobar", true}, - {"batbaz", true}, - {"b", true}, - {"f", true}, - {"bat", false}, - {"nothing", false}, -} - -func TestApp_Command(t *testing.T) { - app := NewApp() - fooCommand := Command{Name: "foobar", Aliases: []string{"f"}} - batCommand := Command{Name: "batbaz", Aliases: []string{"b"}} - app.Commands = []Command{ - fooCommand, - batCommand, - } - - for _, test := range commandAppTests { - expect(t, app.Command(test.name) != nil, test.expected) - } -} - -func TestApp_CommandWithArgBeforeFlags(t *testing.T) { - var parsedOption, firstArg string - - app := NewApp() - command := Command{ - Name: "cmd", - Flags: []Flag{ - StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *Context) { - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) - - expect(t, parsedOption, "my-option") - expect(t, firstArg, "my-arg") -} - -func TestApp_RunAsSubcommandParseFlags(t *testing.T) { - var context *Context - - a := NewApp() - a.Commands = []Command{ - { - Name: "foo", - Action: func(c *Context) { - context = c - }, - Flags: []Flag{ - StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, - }, - Before: func(_ *Context) error { return nil }, - }, - } - a.Run([]string{"", "foo", "--lang", "spanish", "abcd"}) - - expect(t, context.Args().Get(0), "abcd") - expect(t, context.String("lang"), "spanish") -} - -func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) { - var parsedOption string - var args []string - - app := NewApp() - command := Command{ - Name: "cmd", - Flags: []Flag{ - StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *Context) { - parsedOption = c.String("option") - args = c.Args() - }, - } - app.Commands = []Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"}) - - expect(t, parsedOption, "my-option") - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "--notARealFlag") -} - -func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) { - var args []string - - app := NewApp() - command := Command{ - Name: "cmd", - Action: func(c *Context) { - args = c.Args() - }, - } - app.Commands = []Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"}) - - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "notAFlagAtAll") -} - -func TestApp_Float64Flag(t *testing.T) { - var meters float64 - - app := NewApp() - app.Flags = []Flag{ - Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, - } - app.Action = func(c *Context) { - meters = c.Float64("height") - } - - app.Run([]string{"", "--height", "1.93"}) - expect(t, meters, 1.93) -} - -func TestApp_ParseSliceFlags(t *testing.T) { - var parsedOption, firstArg string - var parsedIntSlice []int - var parsedStringSlice []string - - app := NewApp() - command := Command{ - Name: "cmd", - Flags: []Flag{ - IntSliceFlag{Name: "p", Value: &IntSlice{}, Usage: "set one or more ip addr"}, - StringSliceFlag{Name: "ip", Value: &StringSlice{}, Usage: "set one or more ports to open"}, - }, - Action: func(c *Context) { - parsedIntSlice = c.IntSlice("p") - parsedStringSlice = c.StringSlice("ip") - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []Command{command} - - app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) - - IntsEquals := func(a, b []int) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - - StrsEquals := func(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - var expectedIntSlice = []int{22, 80} - var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"} - - if !IntsEquals(parsedIntSlice, expectedIntSlice) { - t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice) - } - - if !StrsEquals(parsedStringSlice, expectedStringSlice) { - t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice) - } -} - -func TestApp_ParseSliceFlagsWithMissingValue(t *testing.T) { - var parsedIntSlice []int - var parsedStringSlice []string - - app := NewApp() - command := Command{ - Name: "cmd", - Flags: []Flag{ - IntSliceFlag{Name: "a", Usage: "set numbers"}, - StringSliceFlag{Name: "str", Usage: "set strings"}, - }, - Action: func(c *Context) { - parsedIntSlice = c.IntSlice("a") - parsedStringSlice = c.StringSlice("str") - }, - } - app.Commands = []Command{command} - - app.Run([]string{"", "cmd", "my-arg", "-a", "2", "-str", "A"}) - - var expectedIntSlice = []int{2} - var expectedStringSlice = []string{"A"} - - if parsedIntSlice[0] != expectedIntSlice[0] { - t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0]) - } - - if parsedStringSlice[0] != expectedStringSlice[0] { - t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0]) - } -} - -func TestApp_DefaultStdout(t *testing.T) { - app := NewApp() - - if app.Writer != os.Stdout { - t.Error("Default output writer not set.") - } -} - -type mockWriter struct { - written []byte -} - -func (fw *mockWriter) Write(p []byte) (n int, err error) { - if fw.written == nil { - fw.written = p - } else { - fw.written = append(fw.written, p...) - } - - return len(p), nil -} - -func (fw *mockWriter) GetWritten() (b []byte) { - return fw.written -} - -func TestApp_SetStdout(t *testing.T) { - w := &mockWriter{} - - app := NewApp() - app.Name = "test" - app.Writer = w - - err := app.Run([]string{"help"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if len(w.written) == 0 { - t.Error("App did not write output to desired writer.") - } -} - -func TestApp_BeforeFunc(t *testing.T) { - beforeRun, subcommandRun := false, false - beforeError := fmt.Errorf("fail") - var err error - - app := NewApp() - - app.Before = func(c *Context) error { - beforeRun = true - s := c.String("opt") - if s == "fail" { - return beforeError - } - - return nil - } - - app.Commands = []Command{ - Command{ - Name: "sub", - Action: func(c *Context) { - subcommandRun = true - }, - }, - } - - app.Flags = []Flag{ - StringFlag{Name: "opt"}, - } - - // run with the Before() func succeeding - err = app.Run([]string{"command", "--opt", "succeed", "sub"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } - - // reset - beforeRun, subcommandRun = false, false - - // run with the Before() func failing - err = app.Run([]string{"command", "--opt", "fail", "sub"}) - - // should be the same error produced by the Before func - if err != beforeError { - t.Errorf("Run error expected, but not received") - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == true { - t.Errorf("Subcommand executed when NOT expected") - } - -} - -func TestApp_AfterFunc(t *testing.T) { - afterRun, subcommandRun := false, false - afterError := fmt.Errorf("fail") - var err error - - app := NewApp() - - app.After = func(c *Context) error { - afterRun = true - s := c.String("opt") - if s == "fail" { - return afterError - } - - return nil - } - - app.Commands = []Command{ - Command{ - Name: "sub", - Action: func(c *Context) { - subcommandRun = true - }, - }, - } - - app.Flags = []Flag{ - StringFlag{Name: "opt"}, - } - - // run with the After() func succeeding - err = app.Run([]string{"command", "--opt", "succeed", "sub"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if afterRun == false { - t.Errorf("After() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } - - // reset - afterRun, subcommandRun = false, false - - // run with the Before() func failing - err = app.Run([]string{"command", "--opt", "fail", "sub"}) - - // should be the same error produced by the Before func - if err != afterError { - t.Errorf("Run error expected, but not received") - } - - if afterRun == false { - t.Errorf("After() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } -} - -func TestAppNoHelpFlag(t *testing.T) { - oldFlag := HelpFlag - defer func() { - HelpFlag = oldFlag - }() - - HelpFlag = BoolFlag{} - - app := NewApp() - err := app.Run([]string{"test", "-h"}) - - if err != flag.ErrHelp { - t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err) - } -} - -func TestAppHelpPrinter(t *testing.T) { - oldPrinter := HelpPrinter - defer func() { - HelpPrinter = oldPrinter - }() - - var wasCalled = false - HelpPrinter = func(w io.Writer, template string, data interface{}) { - wasCalled = true - } - - app := NewApp() - app.Run([]string{"-h"}) - - if wasCalled == false { - t.Errorf("Help printer expected to be called, but was not") - } -} - -func TestAppVersionPrinter(t *testing.T) { - oldPrinter := VersionPrinter - defer func() { - VersionPrinter = oldPrinter - }() - - var wasCalled = false - VersionPrinter = func(c *Context) { - wasCalled = true - } - - app := NewApp() - ctx := NewContext(app, nil, nil) - ShowVersion(ctx) - - if wasCalled == false { - t.Errorf("Version printer expected to be called, but was not") - } -} - -func TestAppCommandNotFound(t *testing.T) { - beforeRun, subcommandRun := false, false - app := NewApp() - - app.CommandNotFound = func(c *Context, command string) { - beforeRun = true - } - - app.Commands = []Command{ - Command{ - Name: "bar", - Action: func(c *Context) { - subcommandRun = true - }, - }, - } - - app.Run([]string{"command", "foo"}) - - expect(t, beforeRun, true) - expect(t, subcommandRun, false) -} - -func TestGlobalFlag(t *testing.T) { - var globalFlag string - var globalFlagSet bool - app := NewApp() - app.Flags = []Flag{ - StringFlag{Name: "global, g", Usage: "global"}, - } - app.Action = func(c *Context) { - globalFlag = c.GlobalString("global") - globalFlagSet = c.GlobalIsSet("global") - } - app.Run([]string{"command", "-g", "foo"}) - expect(t, globalFlag, "foo") - expect(t, globalFlagSet, true) - -} - -func TestGlobalFlagsInSubcommands(t *testing.T) { - subcommandRun := false - parentFlag := false - app := NewApp() - - app.Flags = []Flag{ - BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, - } - - app.Commands = []Command{ - Command{ - Name: "foo", - Flags: []Flag{ - BoolFlag{Name: "parent, p", Usage: "Parent flag"}, - }, - Subcommands: []Command{ - { - Name: "bar", - Action: func(c *Context) { - if c.GlobalBool("debug") { - subcommandRun = true - } - if c.GlobalBool("parent") { - parentFlag = true - } - }, - }, - }, - }, - } - - app.Run([]string{"command", "-d", "foo", "-p", "bar"}) - - expect(t, subcommandRun, true) - expect(t, parentFlag, true) -} - -func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) { - var subcommandHelpTopics = [][]string{ - {"command", "foo", "--help"}, - {"command", "foo", "-h"}, - {"command", "foo", "help"}, - } - - for _, flagSet := range subcommandHelpTopics { - t.Logf("==> checking with flags %v", flagSet) - - app := NewApp() - buf := new(bytes.Buffer) - app.Writer = buf - - subCmdBar := Command{ - Name: "bar", - Usage: "does bar things", - } - subCmdBaz := Command{ - Name: "baz", - Usage: "does baz things", - } - cmd := Command{ - Name: "foo", - Description: "descriptive wall of text about how it does foo things", - Subcommands: []Command{subCmdBar, subCmdBaz}, - } - - app.Commands = []Command{cmd} - err := app.Run(flagSet) - - if err != nil { - t.Error(err) - } - - output := buf.String() - t.Logf("output: %q\n", buf.Bytes()) - - if strings.Contains(output, "No help topic for") { - t.Errorf("expect a help topic, got none: \n%q", output) - } - - for _, shouldContain := range []string{ - cmd.Name, cmd.Description, - subCmdBar.Name, subCmdBar.Usage, - subCmdBaz.Name, subCmdBaz.Usage, - } { - if !strings.Contains(output, shouldContain) { - t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output) - } - } - } -} - -func TestApp_Run_SubcommandFullPath(t *testing.T) { - app := NewApp() - buf := new(bytes.Buffer) - app.Writer = buf - - subCmd := Command{ - Name: "bar", - Usage: "does bar things", - } - cmd := Command{ - Name: "foo", - Description: "foo commands", - Subcommands: []Command{subCmd}, - } - app.Commands = []Command{cmd} - - err := app.Run([]string{"command", "foo", "bar", "--help"}) - if err != nil { - t.Error(err) - } - - output := buf.String() - if !strings.Contains(output, "foo bar - does bar things") { - t.Errorf("expected full path to subcommand: %s", output) - } - if !strings.Contains(output, "command foo bar [arguments...]") { - t.Errorf("expected full path to subcommand: %s", output) - } -} - -func TestApp_Run_Help(t *testing.T) { - var helpArguments = [][]string{{"boom", "--help"}, {"boom", "-h"}, {"boom", "help"}} - - for _, args := range helpArguments { - buf := new(bytes.Buffer) - - t.Logf("==> checking with arguments %v", args) - - app := NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Writer = buf - app.Action = func(c *Context) { - buf.WriteString("boom I say!") - } - - err := app.Run(args) - if err != nil { - t.Error(err) - } - - output := buf.String() - t.Logf("output: %q\n", buf.Bytes()) - - if !strings.Contains(output, "boom - make an explosive entrance") { - t.Errorf("want help to contain %q, did not: \n%q", "boom - make an explosive entrance", output) - } - } -} - -func TestApp_Run_Version(t *testing.T) { - var versionArguments = [][]string{{"boom", "--version"}, {"boom", "-v"}} - - for _, args := range versionArguments { - buf := new(bytes.Buffer) - - t.Logf("==> checking with arguments %v", args) - - app := NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Version = "0.1.0" - app.Writer = buf - app.Action = func(c *Context) { - buf.WriteString("boom I say!") - } - - err := app.Run(args) - if err != nil { - t.Error(err) - } - - output := buf.String() - t.Logf("output: %q\n", buf.Bytes()) - - if !strings.Contains(output, "0.1.0") { - t.Errorf("want version to contain %q, did not: \n%q", "0.1.0", output) - } - } -} - -func TestApp_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) { - app := NewApp() - app.Action = func(c *Context) {} - app.Before = func(c *Context) error { return fmt.Errorf("before error") } - app.After = func(c *Context) error { return fmt.Errorf("after error") } - - err := app.Run([]string{"foo"}) - if err == nil { - t.Fatalf("expected to recieve error from Run, got none") - } - - if !strings.Contains(err.Error(), "before error") { - t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) - } - if !strings.Contains(err.Error(), "after error") { - t.Errorf("expected text of error from After method, but got none in \"%v\"", err) - } -} - -func TestApp_Run_SubcommandDoesNotOverwriteErrorFromBefore(t *testing.T) { - app := NewApp() - app.Commands = []Command{ - Command{ - Name: "bar", - Before: func(c *Context) error { return fmt.Errorf("before error") }, - After: func(c *Context) error { return fmt.Errorf("after error") }, - }, - } - - err := app.Run([]string{"foo", "bar"}) - if err == nil { - t.Fatalf("expected to recieve error from Run, got none") - } - - if !strings.Contains(err.Error(), "before error") { - t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) - } - if !strings.Contains(err.Error(), "after error") { - t.Errorf("expected text of error from After method, but got none in \"%v\"", err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete deleted file mode 100644 index d9231f4cf7..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/bash - -: ${PROG:=$(basename ${BASH_SOURCE})} - -_cli_bash_autocomplete() { - local cur prev opts base - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD-1]}" - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) - return 0 - } - - complete -F _cli_bash_autocomplete $PROG \ No newline at end of file diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete deleted file mode 100644 index 5430a18f95..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete +++ /dev/null @@ -1,5 +0,0 @@ -autoload -U compinit && compinit -autoload -U bashcompinit && bashcompinit - -script_dir=$(dirname $0) -source ${script_dir}/bash_autocomplete diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go deleted file mode 100644 index e54f8e2688..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package cli - -import ( - "os" -) - -func Example() { - app := NewApp() - app.Name = "todo" - app.Usage = "task list on the command line" - app.Commands = []Command{ - { - Name: "add", - Aliases: []string{"a"}, - Usage: "add a task to the list", - Action: func(c *Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *Context) { - println("completed task: ", c.Args().First()) - }, - }, - } - - app.Run(os.Args) -} - -func ExampleSubcommand() { - app := NewApp() - app.Name = "say" - app.Commands = []Command{ - { - Name: "hello", - Aliases: []string{"hi"}, - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []Command{ - { - Name: "english", - Aliases: []string{"en"}, - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []Flag{ - StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *Context) { - println("Hello, ", c.String("name")) - }, - }, { - Name: "spanish", - Aliases: []string{"sp"}, - Usage: "sends a greeting in spanish", - Flags: []Flag{ - StringFlag{ - Name: "surname", - Value: "Jones", - Usage: "Surname of the person to greet", - }, - }, - Action: func(c *Context) { - println("Hola, ", c.String("surname")) - }, - }, { - Name: "french", - Aliases: []string{"fr"}, - Usage: "sends a greeting in french", - Flags: []Flag{ - StringFlag{ - Name: "nickname", - Value: "Stevie", - Usage: "Nickname of the person to greet", - }, - }, - Action: func(c *Context) { - println("Bonjour, ", c.String("nickname")) - }, - }, - }, - }, { - Name: "bye", - Usage: "says goodbye", - Action: func(c *Context) { - println("bye") - }, - }, - } - - app.Run(os.Args) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go deleted file mode 100644 index 688d12c3b4..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package cli - -import ( - "flag" - "testing" -) - -func TestCommandDoNotIgnoreFlags(t *testing.T) { - app := NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah", "-break"} - set.Parse(test) - - c := NewContext(app, set, nil) - - command := Command{ - Name: "test-cmd", - Aliases: []string{"tc"}, - Usage: "this is for testing", - Description: "testing", - Action: func(_ *Context) {}, - } - err := command.Run(c) - - expect(t, err.Error(), "flag provided but not defined: -break") -} - -func TestCommandIgnoreFlags(t *testing.T) { - app := NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah"} - set.Parse(test) - - c := NewContext(app, set, nil) - - command := Command{ - Name: "test-cmd", - Aliases: []string{"tc"}, - Usage: "this is for testing", - Description: "testing", - Action: func(_ *Context) {}, - SkipFlagParsing: true, - } - err := command.Run(c) - - expect(t, err, nil) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go deleted file mode 100644 index 7f8e928932..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package cli - -import ( - "flag" - "testing" - "time" -) - -func TestNewContext(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Int("myflag", 42, "doc") - globalCtx := NewContext(nil, globalSet, nil) - command := Command{Name: "mycommand"} - c := NewContext(nil, set, globalCtx) - c.Command = command - expect(t, c.Int("myflag"), 12) - expect(t, c.GlobalInt("myflag"), 42) - expect(t, c.Command.Name, "mycommand") -} - -func TestContext_Int(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - c := NewContext(nil, set, nil) - expect(t, c.Int("myflag"), 12) -} - -func TestContext_Duration(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Duration("myflag", time.Duration(12*time.Second), "doc") - c := NewContext(nil, set, nil) - expect(t, c.Duration("myflag"), time.Duration(12*time.Second)) -} - -func TestContext_String(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.String("myflag", "hello world", "doc") - c := NewContext(nil, set, nil) - expect(t, c.String("myflag"), "hello world") -} - -func TestContext_Bool(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := NewContext(nil, set, nil) - expect(t, c.Bool("myflag"), false) -} - -func TestContext_BoolT(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", true, "doc") - c := NewContext(nil, set, nil) - expect(t, c.BoolT("myflag"), true) -} - -func TestContext_Args(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := NewContext(nil, set, nil) - set.Parse([]string{"--myflag", "bat", "baz"}) - expect(t, len(c.Args()), 2) - expect(t, c.Bool("myflag"), true) -} - -func TestContext_IsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalCtx := NewContext(nil, globalSet, nil) - c := NewContext(nil, set, globalCtx) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.IsSet("myflag"), true) - expect(t, c.IsSet("otherflag"), false) - expect(t, c.IsSet("bogusflag"), false) - expect(t, c.IsSet("myflagGlobal"), false) -} - -func TestContext_GlobalIsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalSet.Bool("myflagGlobalUnset", true, "doc") - globalCtx := NewContext(nil, globalSet, nil) - c := NewContext(nil, set, globalCtx) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.GlobalIsSet("myflag"), false) - expect(t, c.GlobalIsSet("otherflag"), false) - expect(t, c.GlobalIsSet("bogusflag"), false) - expect(t, c.GlobalIsSet("myflagGlobal"), true) - expect(t, c.GlobalIsSet("myflagGlobalUnset"), false) - expect(t, c.GlobalIsSet("bogusGlobal"), false) -} - -func TestContext_NumFlags(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalCtx := NewContext(nil, globalSet, nil) - c := NewContext(nil, set, globalCtx) - set.Parse([]string{"--myflag", "--otherflag=foo"}) - globalSet.Parse([]string{"--myflagGlobal"}) - expect(t, c.NumFlags(), 2) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go deleted file mode 100644 index 36061028af..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go +++ /dev/null @@ -1,740 +0,0 @@ -package cli - -import ( - "fmt" - "os" - "reflect" - "strings" - "testing" -) - -var boolFlagTests = []struct { - name string - expected string -}{ - {"help", "--help\t"}, - {"h", "-h\t"}, -} - -func TestBoolFlagHelpOutput(t *testing.T) { - - for _, test := range boolFlagTests { - flag := BoolFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -var stringFlagTests = []struct { - name string - value string - expected string -}{ - {"help", "", "--help \t"}, - {"h", "", "-h \t"}, - {"h", "", "-h \t"}, - {"test", "Something", "--test \"Something\"\t"}, -} - -func TestStringFlagHelpOutput(t *testing.T) { - - for _, test := range stringFlagTests { - flag := StringFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "derp") - for _, test := range stringFlagTests { - flag := StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_FOO]") { - t.Errorf("%s does not end with [$APP_FOO]", output) - } - } -} - -var stringSliceFlagTests = []struct { - name string - value *StringSlice - expected string -}{ - {"help", func() *StringSlice { - s := &StringSlice{} - s.Set("") - return s - }(), "--help [--help option --help option]\t"}, - {"h", func() *StringSlice { - s := &StringSlice{} - s.Set("") - return s - }(), "-h [-h option -h option]\t"}, - {"h", func() *StringSlice { - s := &StringSlice{} - s.Set("") - return s - }(), "-h [-h option -h option]\t"}, - {"test", func() *StringSlice { - s := &StringSlice{} - s.Set("Something") - return s - }(), "--test [--test option --test option]\t"}, -} - -func TestStringSliceFlagHelpOutput(t *testing.T) { - - for _, test := range stringSliceFlagTests { - flag := StringSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_QWWX", "11,4") - for _, test := range stringSliceFlagTests { - flag := StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_QWWX]") { - t.Errorf("%q does not end with [$APP_QWWX]", output) - } - } -} - -var intFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestIntFlagHelpOutput(t *testing.T) { - - for _, test := range intFlagTests { - flag := IntFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestIntFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2") - for _, test := range intFlagTests { - flag := IntFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var durationFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestDurationFlagHelpOutput(t *testing.T) { - - for _, test := range durationFlagTests { - flag := DurationFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2h3m6s") - for _, test := range durationFlagTests { - flag := DurationFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var intSliceFlagTests = []struct { - name string - value *IntSlice - expected string -}{ - {"help", &IntSlice{}, "--help [--help option --help option]\t"}, - {"h", &IntSlice{}, "-h [-h option -h option]\t"}, - {"h", &IntSlice{}, "-h [-h option -h option]\t"}, - {"test", func() *IntSlice { - i := &IntSlice{} - i.Set("9") - return i - }(), "--test [--test option --test option]\t"}, -} - -func TestIntSliceFlagHelpOutput(t *testing.T) { - - for _, test := range intSliceFlagTests { - flag := IntSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_SMURF", "42,3") - for _, test := range intSliceFlagTests { - flag := IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_SMURF]") { - t.Errorf("%q does not end with [$APP_SMURF]", output) - } - } -} - -var float64FlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestFloat64FlagHelpOutput(t *testing.T) { - - for _, test := range float64FlagTests { - flag := Float64Flag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAZ", "99.4") - for _, test := range float64FlagTests { - flag := Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAZ]") { - t.Errorf("%s does not end with [$APP_BAZ]", output) - } - } -} - -var genericFlagTests = []struct { - name string - value Generic - expected string -}{ - {"test", &Parser{"abc", "def"}, "--test \"abc,def\"\ttest flag"}, - {"t", &Parser{"abc", "def"}, "-t \"abc,def\"\ttest flag"}, -} - -func TestGenericFlagHelpOutput(t *testing.T) { - - for _, test := range genericFlagTests { - flag := GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_ZAP", "3") - for _, test := range genericFlagTests { - flag := GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_ZAP]") { - t.Errorf("%s does not end with [$APP_ZAP]", output) - } - } -} - -func TestParseMultiString(t *testing.T) { - (&App{ - Flags: []Flag{ - StringFlag{Name: "serve, s"}, - }, - Action: func(ctx *Context) { - if ctx.String("serve") != "10" { - t.Errorf("main name not set") - } - if ctx.String("s") != "10" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiStringFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&App{ - Flags: []Flag{ - StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, - }, - Action: func(ctx *Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&App{ - Flags: []Flag{ - StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"}, - }, - Action: func(ctx *Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSlice(t *testing.T) { - (&App{ - Flags: []Flag{ - StringSliceFlag{Name: "serve, s", Value: &StringSlice{}}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiStringSliceFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&App{ - Flags: []Flag{ - StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&App{ - Flags: []Flag{ - StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiInt(t *testing.T) { - a := App{ - Flags: []Flag{ - IntFlag{Name: "serve, s"}, - }, - Action: func(ctx *Context) { - if ctx.Int("serve") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("s") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiIntFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := App{ - Flags: []Flag{ - IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := App{ - Flags: []Flag{ - IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntSlice(t *testing.T) { - (&App{ - Flags: []Flag{ - IntSliceFlag{Name: "serve, s", Value: &IntSlice{}}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiIntSliceFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&App{ - Flags: []Flag{ - IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&App{ - Flags: []Flag{ - IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiFloat64(t *testing.T) { - a := App{ - Flags: []Flag{ - Float64Flag{Name: "serve, s"}, - }, - Action: func(ctx *Context) { - if ctx.Float64("serve") != 10.2 { - t.Errorf("main name not set") - } - if ctx.Float64("s") != 10.2 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10.2"}) -} - -func TestParseMultiFloat64FromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := App{ - Flags: []Flag{ - Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiFloat64FromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := App{ - Flags: []Flag{ - Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBool(t *testing.T) { - a := App{ - Flags: []Flag{ - BoolFlag{Name: "serve, s"}, - }, - Action: func(ctx *Context) { - if ctx.Bool("serve") != true { - t.Errorf("main name not set") - } - if ctx.Bool("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := App{ - Flags: []Flag{ - BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := App{ - Flags: []Flag{ - BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolT(t *testing.T) { - a := App{ - Flags: []Flag{ - BoolTFlag{Name: "serve, s"}, - }, - Action: func(ctx *Context) { - if ctx.BoolT("serve") != true { - t.Errorf("main name not set") - } - if ctx.BoolT("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolTFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := App{ - Flags: []Flag{ - BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolTFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := App{ - Flags: []Flag{ - BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -type Parser [2]string - -func (p *Parser) Set(value string) error { - parts := strings.Split(value, ",") - if len(parts) != 2 { - return fmt.Errorf("invalid format") - } - - (*p)[0] = parts[0] - (*p)[1] = parts[1] - - return nil -} - -func (p *Parser) String() string { - return fmt.Sprintf("%s,%s", p[0], p[1]) -} - -func TestParseGeneric(t *testing.T) { - a := App{ - Flags: []Flag{ - GenericFlag{Name: "serve, s", Value: &Parser{}}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10,20"}) -} - -func TestParseGenericFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_SERVE", "20,30") - a := App{ - Flags: []Flag{ - GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseGenericFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "99,2000") - a := App{ - Flags: []Flag{ - GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) { - t.Errorf("value not set from env") - } - }, - } - a.Run([]string{"run"}) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/help_test.go b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/help_test.go deleted file mode 100644 index 42d0284c91..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/help_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package cli - -import ( - "bytes" - "testing" -) - -func Test_ShowAppHelp_NoAuthor(t *testing.T) { - output := new(bytes.Buffer) - app := NewApp() - app.Writer = output - - c := NewContext(app, nil, nil) - - ShowAppHelp(c) - - if bytes.Index(output.Bytes(), []byte("AUTHOR(S):")) != -1 { - t.Errorf("expected\n%snot to include %s", output.String(), "AUTHOR(S):") - } -} - -func Test_ShowAppHelp_NoVersion(t *testing.T) { - output := new(bytes.Buffer) - app := NewApp() - app.Writer = output - - app.Version = "" - - c := NewContext(app, nil, nil) - - ShowAppHelp(c) - - if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 { - t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go b/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go deleted file mode 100644 index 3ce8e938bc..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package cli - -import ( - "reflect" - "testing" -) - -/* Test Helpers */ -func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { - t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { - t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/LICENSE b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/NOTICE b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/NOTICE new file mode 100644 index 0000000000..b39ddfa5cb --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/client_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/client_test.go deleted file mode 100644 index b2b46d1b12..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/client_test.go +++ /dev/null @@ -1,896 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "errors" - "io" - "io/ioutil" - "math/rand" - "net/http" - "net/url" - "reflect" - "sort" - "strings" - "testing" - "time" - - "github.com/coreos/etcd/pkg/testutil" - "golang.org/x/net/context" -) - -type actionAssertingHTTPClient struct { - t *testing.T - num int - act httpAction - - resp http.Response - body []byte - err error -} - -func (a *actionAssertingHTTPClient) Do(_ context.Context, act httpAction) (*http.Response, []byte, error) { - if !reflect.DeepEqual(a.act, act) { - a.t.Errorf("#%d: unexpected httpAction: want=%#v got=%#v", a.num, a.act, act) - } - - return &a.resp, a.body, a.err -} - -type staticHTTPClient struct { - resp http.Response - body []byte - err error -} - -func (s *staticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) { - return &s.resp, s.body, s.err -} - -type staticHTTPAction struct { - request http.Request -} - -func (s *staticHTTPAction) HTTPRequest(url.URL) *http.Request { - return &s.request -} - -type staticHTTPResponse struct { - resp http.Response - body []byte - err error -} - -type multiStaticHTTPClient struct { - responses []staticHTTPResponse - cur int -} - -func (s *multiStaticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) { - r := s.responses[s.cur] - s.cur++ - return &r.resp, r.body, r.err -} - -func newStaticHTTPClientFactory(responses []staticHTTPResponse) httpClientFactory { - var cur int - return func(url.URL) httpClient { - r := responses[cur] - cur++ - return &staticHTTPClient{resp: r.resp, body: r.body, err: r.err} - } -} - -type fakeTransport struct { - respchan chan *http.Response - errchan chan error - startCancel chan struct{} - finishCancel chan struct{} -} - -func newFakeTransport() *fakeTransport { - return &fakeTransport{ - respchan: make(chan *http.Response, 1), - errchan: make(chan error, 1), - startCancel: make(chan struct{}, 1), - finishCancel: make(chan struct{}, 1), - } -} - -func (t *fakeTransport) CancelRequest(*http.Request) { - t.startCancel <- struct{}{} -} - -type fakeAction struct{} - -func (a *fakeAction) HTTPRequest(url.URL) *http.Request { - return &http.Request{} -} - -func TestSimpleHTTPClientDoSuccess(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - tr.respchan <- &http.Response{ - StatusCode: http.StatusTeapot, - Body: ioutil.NopCloser(strings.NewReader("foo")), - } - - resp, body, err := c.Do(context.Background(), &fakeAction{}) - if err != nil { - t.Fatalf("incorrect error value: want=nil got=%v", err) - } - - wantCode := http.StatusTeapot - if wantCode != resp.StatusCode { - t.Fatalf("invalid response code: want=%d got=%d", wantCode, resp.StatusCode) - } - - wantBody := []byte("foo") - if !reflect.DeepEqual(wantBody, body) { - t.Fatalf("invalid response body: want=%q got=%q", wantBody, body) - } -} - -func TestSimpleHTTPClientDoError(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - tr.errchan <- errors.New("fixture") - - _, _, err := c.Do(context.Background(), &fakeAction{}) - if err == nil { - t.Fatalf("expected non-nil error, got nil") - } -} - -func TestSimpleHTTPClientDoCancelContext(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - tr.startCancel <- struct{}{} - tr.finishCancel <- struct{}{} - - _, _, err := c.Do(context.Background(), &fakeAction{}) - if err == nil { - t.Fatalf("expected non-nil error, got nil") - } -} - -type checkableReadCloser struct { - io.ReadCloser - closed bool -} - -func (c *checkableReadCloser) Close() error { - if !c.closed { - c.closed = true - return c.ReadCloser.Close() - } - return nil -} - -func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - // create an already-cancelled context - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))} - go func() { - // wait that simpleHTTPClient knows the context is already timed out, - // and calls CancelRequest - testutil.WaitSchedule() - - // response is returned before cancel effects - tr.respchan <- &http.Response{Body: body} - }() - - _, _, err := c.Do(ctx, &fakeAction{}) - if err == nil { - t.Fatalf("expected non-nil error, got nil") - } - - if !body.closed { - t.Fatalf("expected closed body") - } -} - -type blockingBody struct { - c chan struct{} -} - -func (bb *blockingBody) Read(p []byte) (n int, err error) { - <-bb.c - return 0, errors.New("closed") -} - -func (bb *blockingBody) Close() error { - close(bb.c) - return nil -} - -func TestSimpleHTTPClientDoCancelContextResponseBodyClosedWithBlockingBody(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - ctx, cancel := context.WithCancel(context.Background()) - body := &checkableReadCloser{ReadCloser: &blockingBody{c: make(chan struct{})}} - go func() { - tr.respchan <- &http.Response{Body: body} - time.Sleep(2 * time.Millisecond) - // cancel after the body is received - cancel() - }() - - _, _, err := c.Do(ctx, &fakeAction{}) - if err != context.Canceled { - t.Fatalf("expected %+v, got %+v", context.Canceled, err) - } - - if !body.closed { - t.Fatalf("expected closed body") - } -} - -func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - donechan := make(chan struct{}) - ctx, cancel := context.WithCancel(context.Background()) - go func() { - c.Do(ctx, &fakeAction{}) - close(donechan) - }() - - // This should call CancelRequest and begin the cancellation process - cancel() - - select { - case <-donechan: - t.Fatalf("simpleHTTPClient.Do should not have exited yet") - default: - } - - tr.finishCancel <- struct{}{} - - select { - case <-donechan: - //expected behavior - return - case <-time.After(time.Second): - t.Fatalf("simpleHTTPClient.Do did not exit within 1s") - } -} - -func TestSimpleHTTPClientDoHeaderTimeout(t *testing.T) { - tr := newFakeTransport() - tr.finishCancel <- struct{}{} - c := &simpleHTTPClient{transport: tr, headerTimeout: time.Millisecond} - - errc := make(chan error) - go func() { - _, _, err := c.Do(context.Background(), &fakeAction{}) - errc <- err - }() - - select { - case err := <-errc: - if err == nil { - t.Fatalf("expected non-nil error, got nil") - } - case <-time.After(time.Second): - t.Fatalf("unexpected timeout when waitting for the test to finish") - } -} - -func TestHTTPClusterClientDo(t *testing.T) { - fakeErr := errors.New("fake!") - fakeURL := url.URL{} - tests := []struct { - client *httpClusterClient - wantCode int - wantErr error - wantPinned int - }{ - // first good response short-circuits Do - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {resp: http.Response{StatusCode: http.StatusTeapot}}, - {err: fakeErr}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantCode: http.StatusTeapot, - }, - - // fall through to good endpoint if err is arbitrary - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {err: fakeErr}, - {resp: http.Response{StatusCode: http.StatusTeapot}}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantCode: http.StatusTeapot, - wantPinned: 1, - }, - - // context.Canceled short-circuits Do - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {err: context.Canceled}, - {resp: http.Response{StatusCode: http.StatusTeapot}}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantErr: context.Canceled, - }, - - // return err if there are no endpoints - { - client: &httpClusterClient{ - endpoints: []url.URL{}, - clientFactory: newHTTPClientFactory(nil, nil, 0), - rand: rand.New(rand.NewSource(0)), - }, - wantErr: ErrNoEndpoints, - }, - - // return err if all endpoints return arbitrary errors - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {err: fakeErr}, - {err: fakeErr}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantErr: &ClusterError{Errors: []error{fakeErr, fakeErr}}, - }, - - // 500-level errors cause Do to fallthrough to next endpoint - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {resp: http.Response{StatusCode: http.StatusBadGateway}}, - {resp: http.Response{StatusCode: http.StatusTeapot}}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantCode: http.StatusTeapot, - wantPinned: 1, - }, - } - - for i, tt := range tests { - resp, _, err := tt.client.Do(context.Background(), nil) - if !reflect.DeepEqual(tt.wantErr, err) { - t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr) - continue - } - - if resp == nil { - if tt.wantCode != 0 { - t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode) - } - continue - } - - if resp.StatusCode != tt.wantCode { - t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode) - continue - } - - if tt.client.pinned != tt.wantPinned { - t.Errorf("#%d: pinned=%d, want=%d", i, tt.client.pinned, tt.wantPinned) - } - } -} - -func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) { - fakeURL := url.URL{} - tr := newFakeTransport() - tr.finishCancel <- struct{}{} - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0), - endpoints: []url.URL{fakeURL}, - } - - errc := make(chan error) - go func() { - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - _, _, err := c.Do(ctx, &fakeAction{}) - errc <- err - }() - - select { - case err := <-errc: - if err != context.DeadlineExceeded { - t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded) - } - case <-time.After(time.Second): - t.Fatalf("unexpected timeout when waitting for request to deadline exceed") - } -} - -func TestRedirectedHTTPAction(t *testing.T) { - act := &redirectedHTTPAction{ - action: &staticHTTPAction{ - request: http.Request{ - Method: "DELETE", - URL: &url.URL{ - Scheme: "https", - Host: "foo.example.com", - Path: "/ping", - }, - }, - }, - location: url.URL{ - Scheme: "https", - Host: "bar.example.com", - Path: "/pong", - }, - } - - want := &http.Request{ - Method: "DELETE", - URL: &url.URL{ - Scheme: "https", - Host: "bar.example.com", - Path: "/pong", - }, - } - got := act.HTTPRequest(url.URL{Scheme: "http", Host: "baz.example.com", Path: "/pang"}) - - if !reflect.DeepEqual(want, got) { - t.Fatalf("HTTPRequest is %#v, want %#v", want, got) - } -} - -func TestRedirectFollowingHTTPClient(t *testing.T) { - tests := []struct { - checkRedirect CheckRedirectFunc - client httpClient - wantCode int - wantErr error - }{ - // errors bubbled up - { - checkRedirect: func(int) error { return ErrTooManyRedirects }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - err: errors.New("fail!"), - }, - }, - }, - wantErr: errors.New("fail!"), - }, - - // no need to follow redirect if none given - { - checkRedirect: func(int) error { return ErrTooManyRedirects }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - }, - }, - wantCode: http.StatusTeapot, - }, - - // redirects if less than max - { - checkRedirect: func(via int) error { - if via >= 2 { - return ErrTooManyRedirects - } - return nil - }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - }, - }, - wantCode: http.StatusTeapot, - }, - - // succeed after reaching max redirects - { - checkRedirect: func(via int) error { - if via >= 3 { - return ErrTooManyRedirects - } - return nil - }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - }, - }, - wantCode: http.StatusTeapot, - }, - - // fail if too many redirects - { - checkRedirect: func(via int) error { - if via >= 2 { - return ErrTooManyRedirects - } - return nil - }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - }, - }, - wantErr: ErrTooManyRedirects, - }, - - // fail if Location header not set - { - checkRedirect: func(int) error { return ErrTooManyRedirects }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - }, - }, - }, - }, - wantErr: errors.New("Location header not set"), - }, - - // fail if Location header is invalid - { - checkRedirect: func(int) error { return ErrTooManyRedirects }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{":"}}, - }, - }, - }, - }, - wantErr: errors.New("Location header not valid URL: :"), - }, - - // fail if redirects checked way too many times - { - checkRedirect: func(int) error { return nil }, - client: &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - wantErr: errTooManyRedirectChecks, - }, - } - - for i, tt := range tests { - client := &redirectFollowingHTTPClient{client: tt.client, checkRedirect: tt.checkRedirect} - resp, _, err := client.Do(context.Background(), nil) - if !reflect.DeepEqual(tt.wantErr, err) { - t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr) - continue - } - - if resp == nil { - if tt.wantCode != 0 { - t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode) - } - continue - } - - if resp.StatusCode != tt.wantCode { - t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode) - continue - } - } -} - -func TestDefaultCheckRedirect(t *testing.T) { - tests := []struct { - num int - err error - }{ - {0, nil}, - {5, nil}, - {10, nil}, - {11, ErrTooManyRedirects}, - {29, ErrTooManyRedirects}, - } - - for i, tt := range tests { - err := DefaultCheckRedirect(tt.num) - if !reflect.DeepEqual(tt.err, err) { - t.Errorf("#%d: want=%#v got=%#v", i, tt.err, err) - } - } -} - -func TestHTTPClusterClientSync(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.reset([]string{"http://127.0.0.1:2379"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - - want := []string{"http://127.0.0.1:2379"} - got := hc.Endpoints() - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got) - } - - err = hc.Sync(context.Background()) - if err != nil { - t.Fatalf("unexpected error during Sync: %#v", err) - } - - want = []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"} - got = hc.Endpoints() - sort.Sort(sort.StringSlice(got)) - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints post-Sync: want=%#v got=%#v", want, got) - } - - err = hc.reset([]string{"http://127.0.0.1:4009"}) - if err != nil { - t.Fatalf("unexpected error during reset: %#v", err) - } - - want = []string{"http://127.0.0.1:4009"} - got = hc.Endpoints() - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints post-reset: want=%#v got=%#v", want, got) - } -} - -func TestHTTPClusterClientSyncFail(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - {err: errors.New("fail!")}, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.reset([]string{"http://127.0.0.1:2379"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - - want := []string{"http://127.0.0.1:2379"} - got := hc.Endpoints() - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got) - } - - err = hc.Sync(context.Background()) - if err == nil { - t.Fatalf("got nil error during Sync") - } - - got = hc.Endpoints() - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints after failed Sync: want=%#v got=%#v", want, got) - } -} - -func TestHTTPClusterClientAutoSyncCancelContext(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.reset([]string{"http://127.0.0.1:2379"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - err = hc.AutoSync(ctx, time.Hour) - if err != context.Canceled { - t.Fatalf("incorrect error value: want=%v got=%v", context.Canceled, err) - } -} - -func TestHTTPClusterClientAutoSyncFail(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - {err: errors.New("fail!")}, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.reset([]string{"http://127.0.0.1:2379"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - - err = hc.AutoSync(context.Background(), time.Hour) - if err.Error() != ErrClusterUnavailable.Error() { - t.Fatalf("incorrect error value: want=%v got=%v", ErrClusterUnavailable, err) - } -} - -// TestHTTPClusterClientSyncPinEndpoint tests that Sync() pins the endpoint when -// it gets the exactly same member list as before. -func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.reset([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - pinnedEndpoint := hc.endpoints[hc.pinned] - - for i := 0; i < 3; i++ { - err = hc.Sync(context.Background()) - if err != nil { - t.Fatalf("#%d: unexpected error during Sync: %#v", i, err) - } - - if g := hc.endpoints[hc.pinned]; g != pinnedEndpoint { - t.Errorf("#%d: pinned endpoint = %s, want %s", i, g, pinnedEndpoint) - } - } -} - -func TestHTTPClusterClientResetFail(t *testing.T) { - tests := [][]string{ - // need at least one endpoint - {}, - - // urls must be valid - {":"}, - } - - for i, tt := range tests { - hc := &httpClusterClient{rand: rand.New(rand.NewSource(0))} - err := hc.reset(tt) - if err == nil { - t.Errorf("#%d: expected non-nil error", i) - } - } -} - -func TestHTTPClusterClientResetPinRandom(t *testing.T) { - round := 2000 - pinNum := 0 - for i := 0; i < round; i++ { - hc := &httpClusterClient{rand: rand.New(rand.NewSource(int64(i)))} - err := hc.reset([]string{"http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"}) - if err != nil { - t.Fatalf("#%d: reset error (%v)", i, err) - } - if hc.endpoints[hc.pinned].String() == "http://127.0.0.1:4001" { - pinNum++ - } - } - - min := 1.0/3.0 - 0.05 - max := 1.0/3.0 + 0.05 - if ratio := float64(pinNum) / float64(round); ratio > max || ratio < min { - t.Errorf("pinned ratio = %v, want [%v, %v]", ratio, min, max) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/fake_transport_go14_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/fake_transport_go14_test.go deleted file mode 100644 index 4a99a7d374..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/fake_transport_go14_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.5 - -package client - -import ( - "errors" - "net/http" -) - -func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { - select { - case resp := <-t.respchan: - return resp, nil - case err := <-t.errchan: - return nil, err - case <-t.startCancel: - select { - // this simulates that the request is finished before cancel effects - case resp := <-t.respchan: - return resp, nil - // wait on finishCancel to simulate taking some amount of - // time while calling CancelRequest - case <-t.finishCancel: - return nil, errors.New("cancelled") - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/fake_transport_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/fake_transport_test.go deleted file mode 100644 index 06761e2668..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/fake_transport_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.5 - -package client - -import ( - "errors" - "net/http" -) - -func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { - select { - case resp := <-t.respchan: - return resp, nil - case err := <-t.errchan: - return nil, err - case <-t.startCancel: - case <-req.Cancel: - } - select { - // this simulates that the request is finished before cancel effects - case resp := <-t.respchan: - return resp, nil - // wait on finishCancel to simulate taking some amount of - // time while calling CancelRequest - case <-t.finishCancel: - return nil, errors.New("cancelled") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/keys_bench_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/keys_bench_test.go deleted file mode 100644 index d871650117..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/keys_bench_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - "reflect" - "strings" - "testing" -) - -func createTestNode(size int) *Node { - return &Node{ - Key: strings.Repeat("a", 30), - Value: strings.Repeat("a", size), - CreatedIndex: 123456, - ModifiedIndex: 123456, - TTL: 123456789, - } -} - -func createTestNodeWithChildren(children, size int) *Node { - node := createTestNode(size) - for i := 0; i < children; i++ { - node.Nodes = append(node.Nodes, createTestNode(size)) - } - return node -} - -func createTestResponse(children, size int) *Response { - return &Response{ - Action: "aaaaa", - Node: createTestNodeWithChildren(children, size), - PrevNode: nil, - } -} - -func benchmarkResponseUnmarshalling(b *testing.B, children, size int) { - header := http.Header{} - header.Add("X-Etcd-Index", "123456") - response := createTestResponse(children, size) - body, err := json.Marshal(response) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - newResponse := new(Response) - for i := 0; i < b.N; i++ { - if newResponse, err = unmarshalSuccessfulKeysResponse(header, body); err != nil { - b.Errorf("error unmarshaling response (%v)", err) - } - - } - if !reflect.DeepEqual(response.Node, newResponse.Node) { - b.Errorf("Unexpected difference in a parsed response: \n%+v\n%+v", response, newResponse) - } -} - -func BenchmarkSmallResponseUnmarshal(b *testing.B) { - benchmarkResponseUnmarshalling(b, 30, 20) -} - -func BenchmarkManySmallResponseUnmarshal(b *testing.B) { - benchmarkResponseUnmarshalling(b, 3000, 20) -} - -func BenchmarkMediumResponseUnmarshal(b *testing.B) { - benchmarkResponseUnmarshalling(b, 300, 200) -} - -func BenchmarkLargeResponseUnmarshal(b *testing.B) { - benchmarkResponseUnmarshalling(b, 3000, 2000) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/keys_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/keys_test.go deleted file mode 100644 index 80d7a554c0..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/keys_test.go +++ /dev/null @@ -1,1407 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "testing" - "time" - - "golang.org/x/net/context" -) - -func TestV2KeysURLHelper(t *testing.T) { - tests := []struct { - endpoint url.URL - prefix string - key string - want url.URL - }{ - // key is empty, no problem - { - endpoint: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, - prefix: "", - key: "", - want: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, - }, - - // key is joined to path - { - endpoint: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, - prefix: "", - key: "/foo/bar", - want: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys/foo/bar"}, - }, - - // key is joined to path when path is empty - { - endpoint: url.URL{Scheme: "http", Host: "example.com", Path: ""}, - prefix: "", - key: "/foo/bar", - want: url.URL{Scheme: "http", Host: "example.com", Path: "/foo/bar"}, - }, - - // Host field carries through with port - { - endpoint: url.URL{Scheme: "http", Host: "example.com:8080", Path: "/v2/keys"}, - prefix: "", - key: "", - want: url.URL{Scheme: "http", Host: "example.com:8080", Path: "/v2/keys"}, - }, - - // Scheme carries through - { - endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/v2/keys"}, - prefix: "", - key: "", - want: url.URL{Scheme: "https", Host: "example.com", Path: "/v2/keys"}, - }, - // Prefix is applied - { - endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, - prefix: "/bar", - key: "/baz", - want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar/baz"}, - }, - // Prefix is joined to path - { - endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, - prefix: "/bar", - key: "", - want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar"}, - }, - // Keep trailing slash - { - endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, - prefix: "/bar", - key: "/baz/", - want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar/baz/"}, - }, - } - - for i, tt := range tests { - got := v2KeysURL(tt.endpoint, tt.prefix, tt.key) - if tt.want != *got { - t.Errorf("#%d: want=%#v, got=%#v", i, tt.want, *got) - } - } -} - -func TestGetAction(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"} - baseWantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/keys/foo/bar", - } - wantHeader := http.Header{} - - tests := []struct { - recursive bool - sorted bool - quorum bool - wantQuery string - }{ - { - recursive: false, - sorted: false, - quorum: false, - wantQuery: "quorum=false&recursive=false&sorted=false", - }, - { - recursive: true, - sorted: false, - quorum: false, - wantQuery: "quorum=false&recursive=true&sorted=false", - }, - { - recursive: false, - sorted: true, - quorum: false, - wantQuery: "quorum=false&recursive=false&sorted=true", - }, - { - recursive: true, - sorted: true, - quorum: false, - wantQuery: "quorum=false&recursive=true&sorted=true", - }, - { - recursive: false, - sorted: false, - quorum: true, - wantQuery: "quorum=true&recursive=false&sorted=false", - }, - } - - for i, tt := range tests { - f := getAction{ - Key: "/foo/bar", - Recursive: tt.recursive, - Sorted: tt.sorted, - Quorum: tt.quorum, - } - got := *f.HTTPRequest(ep) - - wantURL := baseWantURL - wantURL.RawQuery = tt.wantQuery - - err := assertRequest(got, "GET", wantURL, wantHeader, nil) - if err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func TestWaitAction(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"} - baseWantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/keys/foo/bar", - } - wantHeader := http.Header{} - - tests := []struct { - waitIndex uint64 - recursive bool - wantQuery string - }{ - { - recursive: false, - waitIndex: uint64(0), - wantQuery: "recursive=false&wait=true&waitIndex=0", - }, - { - recursive: false, - waitIndex: uint64(12), - wantQuery: "recursive=false&wait=true&waitIndex=12", - }, - { - recursive: true, - waitIndex: uint64(12), - wantQuery: "recursive=true&wait=true&waitIndex=12", - }, - } - - for i, tt := range tests { - f := waitAction{ - Key: "/foo/bar", - WaitIndex: tt.waitIndex, - Recursive: tt.recursive, - } - got := *f.HTTPRequest(ep) - - wantURL := baseWantURL - wantURL.RawQuery = tt.wantQuery - - err := assertRequest(got, "GET", wantURL, wantHeader, nil) - if err != nil { - t.Errorf("#%d: unexpected error: %#v", i, err) - } - } -} - -func TestSetAction(t *testing.T) { - wantHeader := http.Header(map[string][]string{ - "Content-Type": {"application/x-www-form-urlencoded"}, - }) - - tests := []struct { - act setAction - wantURL string - wantBody string - }{ - // default prefix - { - act: setAction{ - Prefix: defaultV2KeysPrefix, - Key: "foo", - }, - wantURL: "http://example.com/v2/keys/foo", - wantBody: "value=", - }, - - // non-default prefix - { - act: setAction{ - Prefix: "/pfx", - Key: "foo", - }, - wantURL: "http://example.com/pfx/foo", - wantBody: "value=", - }, - - // no prefix - { - act: setAction{ - Key: "foo", - }, - wantURL: "http://example.com/foo", - wantBody: "value=", - }, - - // Key with path separators - { - act: setAction{ - Prefix: defaultV2KeysPrefix, - Key: "foo/bar/baz", - }, - wantURL: "http://example.com/v2/keys/foo/bar/baz", - wantBody: "value=", - }, - - // Key with leading slash, Prefix with trailing slash - { - act: setAction{ - Prefix: "/foo/", - Key: "/bar", - }, - wantURL: "http://example.com/foo/bar", - wantBody: "value=", - }, - - // Key with trailing slash - { - act: setAction{ - Key: "/foo/", - }, - wantURL: "http://example.com/foo/", - wantBody: "value=", - }, - - // Value is set - { - act: setAction{ - Key: "foo", - Value: "baz", - }, - wantURL: "http://example.com/foo", - wantBody: "value=baz", - }, - - // PrevExist set, but still ignored - { - act: setAction{ - Key: "foo", - PrevExist: PrevIgnore, - }, - wantURL: "http://example.com/foo", - wantBody: "value=", - }, - - // PrevExist set to true - { - act: setAction{ - Key: "foo", - PrevExist: PrevExist, - }, - wantURL: "http://example.com/foo?prevExist=true", - wantBody: "value=", - }, - - // PrevExist set to false - { - act: setAction{ - Key: "foo", - PrevExist: PrevNoExist, - }, - wantURL: "http://example.com/foo?prevExist=false", - wantBody: "value=", - }, - - // PrevValue is urlencoded - { - act: setAction{ - Key: "foo", - PrevValue: "bar baz", - }, - wantURL: "http://example.com/foo?prevValue=bar+baz", - wantBody: "value=", - }, - - // PrevIndex is set - { - act: setAction{ - Key: "foo", - PrevIndex: uint64(12), - }, - wantURL: "http://example.com/foo?prevIndex=12", - wantBody: "value=", - }, - - // TTL is set - { - act: setAction{ - Key: "foo", - TTL: 3 * time.Minute, - }, - wantURL: "http://example.com/foo", - wantBody: "ttl=180&value=", - }, - // Dir is set - { - act: setAction{ - Key: "foo", - Dir: true, - }, - wantURL: "http://example.com/foo?dir=true", - wantBody: "", - }, - // Dir is set with a value - { - act: setAction{ - Key: "foo", - Value: "bar", - Dir: true, - }, - wantURL: "http://example.com/foo?dir=true", - wantBody: "", - }, - // Dir is set with PrevExist set to true - { - act: setAction{ - Key: "foo", - PrevExist: PrevExist, - Dir: true, - }, - wantURL: "http://example.com/foo?dir=true&prevExist=true", - wantBody: "", - }, - // Dir is set with PrevValue - { - act: setAction{ - Key: "foo", - PrevValue: "bar", - Dir: true, - }, - wantURL: "http://example.com/foo?dir=true", - wantBody: "", - }, - } - - for i, tt := range tests { - u, err := url.Parse(tt.wantURL) - if err != nil { - t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) - } - - got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) - if err := assertRequest(*got, "PUT", u, wantHeader, []byte(tt.wantBody)); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func TestCreateInOrderAction(t *testing.T) { - wantHeader := http.Header(map[string][]string{ - "Content-Type": {"application/x-www-form-urlencoded"}, - }) - - tests := []struct { - act createInOrderAction - wantURL string - wantBody string - }{ - // default prefix - { - act: createInOrderAction{ - Prefix: defaultV2KeysPrefix, - Dir: "foo", - }, - wantURL: "http://example.com/v2/keys/foo", - wantBody: "value=", - }, - - // non-default prefix - { - act: createInOrderAction{ - Prefix: "/pfx", - Dir: "foo", - }, - wantURL: "http://example.com/pfx/foo", - wantBody: "value=", - }, - - // no prefix - { - act: createInOrderAction{ - Dir: "foo", - }, - wantURL: "http://example.com/foo", - wantBody: "value=", - }, - - // Key with path separators - { - act: createInOrderAction{ - Prefix: defaultV2KeysPrefix, - Dir: "foo/bar/baz", - }, - wantURL: "http://example.com/v2/keys/foo/bar/baz", - wantBody: "value=", - }, - - // Key with leading slash, Prefix with trailing slash - { - act: createInOrderAction{ - Prefix: "/foo/", - Dir: "/bar", - }, - wantURL: "http://example.com/foo/bar", - wantBody: "value=", - }, - - // Key with trailing slash - { - act: createInOrderAction{ - Dir: "/foo/", - }, - wantURL: "http://example.com/foo/", - wantBody: "value=", - }, - - // Value is set - { - act: createInOrderAction{ - Dir: "foo", - Value: "baz", - }, - wantURL: "http://example.com/foo", - wantBody: "value=baz", - }, - // TTL is set - { - act: createInOrderAction{ - Dir: "foo", - TTL: 3 * time.Minute, - }, - wantURL: "http://example.com/foo", - wantBody: "ttl=180&value=", - }, - } - - for i, tt := range tests { - u, err := url.Parse(tt.wantURL) - if err != nil { - t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) - } - - got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) - if err := assertRequest(*got, "POST", u, wantHeader, []byte(tt.wantBody)); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func TestDeleteAction(t *testing.T) { - wantHeader := http.Header(map[string][]string{ - "Content-Type": {"application/x-www-form-urlencoded"}, - }) - - tests := []struct { - act deleteAction - wantURL string - }{ - // default prefix - { - act: deleteAction{ - Prefix: defaultV2KeysPrefix, - Key: "foo", - }, - wantURL: "http://example.com/v2/keys/foo", - }, - - // non-default prefix - { - act: deleteAction{ - Prefix: "/pfx", - Key: "foo", - }, - wantURL: "http://example.com/pfx/foo", - }, - - // no prefix - { - act: deleteAction{ - Key: "foo", - }, - wantURL: "http://example.com/foo", - }, - - // Key with path separators - { - act: deleteAction{ - Prefix: defaultV2KeysPrefix, - Key: "foo/bar/baz", - }, - wantURL: "http://example.com/v2/keys/foo/bar/baz", - }, - - // Key with leading slash, Prefix with trailing slash - { - act: deleteAction{ - Prefix: "/foo/", - Key: "/bar", - }, - wantURL: "http://example.com/foo/bar", - }, - - // Key with trailing slash - { - act: deleteAction{ - Key: "/foo/", - }, - wantURL: "http://example.com/foo/", - }, - - // Recursive set to true - { - act: deleteAction{ - Key: "foo", - Recursive: true, - }, - wantURL: "http://example.com/foo?recursive=true", - }, - - // PrevValue is urlencoded - { - act: deleteAction{ - Key: "foo", - PrevValue: "bar baz", - }, - wantURL: "http://example.com/foo?prevValue=bar+baz", - }, - - // PrevIndex is set - { - act: deleteAction{ - Key: "foo", - PrevIndex: uint64(12), - }, - wantURL: "http://example.com/foo?prevIndex=12", - }, - } - - for i, tt := range tests { - u, err := url.Parse(tt.wantURL) - if err != nil { - t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) - } - - got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) - if err := assertRequest(*got, "DELETE", u, wantHeader, nil); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func assertRequest(got http.Request, wantMethod string, wantURL *url.URL, wantHeader http.Header, wantBody []byte) error { - if wantMethod != got.Method { - return fmt.Errorf("want.Method=%#v got.Method=%#v", wantMethod, got.Method) - } - - if !reflect.DeepEqual(wantURL, got.URL) { - return fmt.Errorf("want.URL=%#v got.URL=%#v", wantURL, got.URL) - } - - if !reflect.DeepEqual(wantHeader, got.Header) { - return fmt.Errorf("want.Header=%#v got.Header=%#v", wantHeader, got.Header) - } - - if got.Body == nil { - if wantBody != nil { - return fmt.Errorf("want.Body=%v got.Body=%v", wantBody, got.Body) - } - } else { - if wantBody == nil { - return fmt.Errorf("want.Body=%v got.Body=%s", wantBody, got.Body) - } else { - gotBytes, err := ioutil.ReadAll(got.Body) - if err != nil { - return err - } - - if !reflect.DeepEqual(wantBody, gotBytes) { - return fmt.Errorf("want.Body=%s got.Body=%s", wantBody, gotBytes) - } - } - } - - return nil -} - -func TestUnmarshalSuccessfulResponse(t *testing.T) { - var expiration time.Time - expiration.UnmarshalText([]byte("2015-04-07T04:40:23.044979686Z")) - - tests := []struct { - hdr string - body string - wantRes *Response - wantErr bool - }{ - // Neither PrevNode or Node - { - hdr: "1", - body: `{"action":"delete"}`, - wantRes: &Response{Action: "delete", Index: 1}, - wantErr: false, - }, - - // PrevNode - { - hdr: "15", - body: `{"action":"delete", "prevNode": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`, - wantRes: &Response{ - Action: "delete", - Index: 15, - Node: nil, - PrevNode: &Node{ - Key: "/foo", - Value: "bar", - ModifiedIndex: 12, - CreatedIndex: 10, - }, - }, - wantErr: false, - }, - - // Node - { - hdr: "15", - body: `{"action":"get", "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10, "ttl": 10, "expiration": "2015-04-07T04:40:23.044979686Z"}}`, - wantRes: &Response{ - Action: "get", - Index: 15, - Node: &Node{ - Key: "/foo", - Value: "bar", - ModifiedIndex: 12, - CreatedIndex: 10, - TTL: 10, - Expiration: &expiration, - }, - PrevNode: nil, - }, - wantErr: false, - }, - - // Node Dir - { - hdr: "15", - body: `{"action":"get", "node": {"key": "/foo", "dir": true, "modifiedIndex": 12, "createdIndex": 10}}`, - wantRes: &Response{ - Action: "get", - Index: 15, - Node: &Node{ - Key: "/foo", - Dir: true, - ModifiedIndex: 12, - CreatedIndex: 10, - }, - PrevNode: nil, - }, - wantErr: false, - }, - - // PrevNode and Node - { - hdr: "15", - body: `{"action":"update", "prevNode": {"key": "/foo", "value": "baz", "modifiedIndex": 10, "createdIndex": 10}, "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`, - wantRes: &Response{ - Action: "update", - Index: 15, - PrevNode: &Node{ - Key: "/foo", - Value: "baz", - ModifiedIndex: 10, - CreatedIndex: 10, - }, - Node: &Node{ - Key: "/foo", - Value: "bar", - ModifiedIndex: 12, - CreatedIndex: 10, - }, - }, - wantErr: false, - }, - - // Garbage in body - { - hdr: "", - body: `garbage`, - wantRes: nil, - wantErr: true, - }, - - // non-integer index - { - hdr: "poo", - body: `{}`, - wantRes: nil, - wantErr: true, - }, - } - - for i, tt := range tests { - h := make(http.Header) - h.Add("X-Etcd-Index", tt.hdr) - res, err := unmarshalSuccessfulKeysResponse(h, []byte(tt.body)) - if tt.wantErr != (err != nil) { - t.Errorf("#%d: wantErr=%t, err=%v", i, tt.wantErr, err) - } - - if (res == nil) != (tt.wantRes == nil) { - t.Errorf("#%d: received res=%#v, but expected res=%#v", i, res, tt.wantRes) - continue - } else if tt.wantRes == nil { - // expected and successfully got nil response - continue - } - - if res.Action != tt.wantRes.Action { - t.Errorf("#%d: Action=%s, expected %s", i, res.Action, tt.wantRes.Action) - } - if res.Index != tt.wantRes.Index { - t.Errorf("#%d: Index=%d, expected %d", i, res.Index, tt.wantRes.Index) - } - if !reflect.DeepEqual(res.Node, tt.wantRes.Node) { - t.Errorf("#%d: Node=%v, expected %v", i, res.Node, tt.wantRes.Node) - } - } -} - -func TestUnmarshalFailedKeysResponse(t *testing.T) { - body := []byte(`{"errorCode":100,"message":"Key not found","cause":"/foo","index":18}`) - - wantErr := Error{ - Code: 100, - Message: "Key not found", - Cause: "/foo", - Index: uint64(18), - } - - gotErr := unmarshalFailedKeysResponse(body) - if !reflect.DeepEqual(wantErr, gotErr) { - t.Errorf("unexpected error: want=%#v got=%#v", wantErr, gotErr) - } -} - -func TestUnmarshalFailedKeysResponseBadJSON(t *testing.T) { - err := unmarshalFailedKeysResponse([]byte(`{"er`)) - if err == nil { - t.Errorf("got nil error") - } else if _, ok := err.(Error); ok { - t.Errorf("error is of incorrect type *Error: %#v", err) - } -} - -func TestHTTPWatcherNextWaitAction(t *testing.T) { - initAction := waitAction{ - Prefix: "/pants", - Key: "/foo/bar", - Recursive: true, - WaitIndex: 19, - } - - client := &actionAssertingHTTPClient{ - t: t, - act: &initAction, - resp: http.Response{ - StatusCode: http.StatusOK, - Header: http.Header{"X-Etcd-Index": []string{"42"}}, - }, - body: []byte(`{"action":"update","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":21,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), - } - - wantResponse := &Response{ - Action: "update", - Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(21)}, - PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, - Index: uint64(42), - } - - wantNextWait := waitAction{ - Prefix: "/pants", - Key: "/foo/bar", - Recursive: true, - WaitIndex: 22, - } - - watcher := &httpWatcher{ - client: client, - nextWait: initAction, - } - - resp, err := watcher.Next(context.Background()) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - - if !reflect.DeepEqual(wantResponse, resp) { - t.Errorf("received incorrect Response: want=%#v got=%#v", wantResponse, resp) - } - - if !reflect.DeepEqual(wantNextWait, watcher.nextWait) { - t.Errorf("nextWait incorrect: want=%#v got=%#v", wantNextWait, watcher.nextWait) - } -} - -func TestHTTPWatcherNextFail(t *testing.T) { - tests := []httpClient{ - // generic HTTP client failure - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unusable status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - - // etcd Error response - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusNotFound, - }, - body: []byte(`{"errorCode":100,"message":"Key not found","cause":"/foo","index":18}`), - }, - } - - for i, tt := range tests { - act := waitAction{ - Prefix: "/pants", - Key: "/foo/bar", - Recursive: true, - WaitIndex: 19, - } - - watcher := &httpWatcher{ - client: tt, - nextWait: act, - } - - resp, err := watcher.Next(context.Background()) - if err == nil { - t.Errorf("#%d: expected non-nil error", i) - } - if resp != nil { - t.Errorf("#%d: expected nil Response, got %#v", i, resp) - } - if !reflect.DeepEqual(act, watcher.nextWait) { - t.Errorf("#%d: nextWait changed: want=%#v got=%#v", i, act, watcher.nextWait) - } - } -} - -func TestHTTPKeysAPIWatcherAction(t *testing.T) { - tests := []struct { - key string - opts *WatcherOptions - want waitAction - }{ - { - key: "/foo", - opts: nil, - want: waitAction{ - Key: "/foo", - Recursive: false, - WaitIndex: 0, - }, - }, - - { - key: "/foo", - opts: &WatcherOptions{ - Recursive: false, - AfterIndex: 0, - }, - want: waitAction{ - Key: "/foo", - Recursive: false, - WaitIndex: 0, - }, - }, - - { - key: "/foo", - opts: &WatcherOptions{ - Recursive: true, - AfterIndex: 0, - }, - want: waitAction{ - Key: "/foo", - Recursive: true, - WaitIndex: 0, - }, - }, - - { - key: "/foo", - opts: &WatcherOptions{ - Recursive: false, - AfterIndex: 19, - }, - want: waitAction{ - Key: "/foo", - Recursive: false, - WaitIndex: 20, - }, - }, - } - - for i, tt := range tests { - kAPI := &httpKeysAPI{ - client: &staticHTTPClient{err: errors.New("fail!")}, - } - - want := &httpWatcher{ - client: &staticHTTPClient{err: errors.New("fail!")}, - nextWait: tt.want, - } - - got := kAPI.Watcher(tt.key, tt.opts) - if !reflect.DeepEqual(want, got) { - t.Errorf("#%d: incorrect watcher: want=%#v got=%#v", i, want, got) - } - } -} - -func TestHTTPKeysAPISetAction(t *testing.T) { - tests := []struct { - key string - value string - opts *SetOptions - wantAction httpAction - }{ - // nil SetOptions - { - key: "/foo", - value: "bar", - opts: nil, - wantAction: &setAction{ - Key: "/foo", - Value: "bar", - PrevValue: "", - PrevIndex: 0, - PrevExist: PrevIgnore, - TTL: 0, - }, - }, - // empty SetOptions - { - key: "/foo", - value: "bar", - opts: &SetOptions{}, - wantAction: &setAction{ - Key: "/foo", - Value: "bar", - PrevValue: "", - PrevIndex: 0, - PrevExist: PrevIgnore, - TTL: 0, - }, - }, - // populated SetOptions - { - key: "/foo", - value: "bar", - opts: &SetOptions{ - PrevValue: "baz", - PrevIndex: 13, - PrevExist: PrevExist, - TTL: time.Minute, - Dir: true, - }, - wantAction: &setAction{ - Key: "/foo", - Value: "bar", - PrevValue: "baz", - PrevIndex: 13, - PrevExist: PrevExist, - TTL: time.Minute, - Dir: true, - }, - }, - } - - for i, tt := range tests { - client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} - kAPI := httpKeysAPI{client: client} - kAPI.Set(context.Background(), tt.key, tt.value, tt.opts) - } -} - -func TestHTTPKeysAPISetError(t *testing.T) { - tests := []httpClient{ - // generic HTTP client failure - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unusable status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - - // etcd Error response - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusInternalServerError, - }, - body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), - }, - } - - for i, tt := range tests { - kAPI := httpKeysAPI{client: tt} - resp, err := kAPI.Set(context.Background(), "/foo", "bar", nil) - if err == nil { - t.Errorf("#%d: received nil error", i) - } - if resp != nil { - t.Errorf("#%d: received non-nil Response: %#v", i, resp) - } - } -} - -func TestHTTPKeysAPISetResponse(t *testing.T) { - client := &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - Header: http.Header{"X-Etcd-Index": []string{"21"}}, - }, - body: []byte(`{"action":"set","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":21,"createdIndex":21},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), - } - - wantResponse := &Response{ - Action: "set", - Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(21), ModifiedIndex: uint64(21)}, - PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, - Index: uint64(21), - } - - kAPI := &httpKeysAPI{client: client, prefix: "/pants"} - resp, err := kAPI.Set(context.Background(), "/foo/bar/baz", "snarf", nil) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - if !reflect.DeepEqual(wantResponse, resp) { - t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) - } -} - -func TestHTTPKeysAPIGetAction(t *testing.T) { - tests := []struct { - key string - opts *GetOptions - wantAction httpAction - }{ - // nil GetOptions - { - key: "/foo", - opts: nil, - wantAction: &getAction{ - Key: "/foo", - Sorted: false, - Recursive: false, - }, - }, - // empty GetOptions - { - key: "/foo", - opts: &GetOptions{}, - wantAction: &getAction{ - Key: "/foo", - Sorted: false, - Recursive: false, - }, - }, - // populated GetOptions - { - key: "/foo", - opts: &GetOptions{ - Sort: true, - Recursive: true, - Quorum: true, - }, - wantAction: &getAction{ - Key: "/foo", - Sorted: true, - Recursive: true, - Quorum: true, - }, - }, - } - - for i, tt := range tests { - client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} - kAPI := httpKeysAPI{client: client} - kAPI.Get(context.Background(), tt.key, tt.opts) - } -} - -func TestHTTPKeysAPIGetError(t *testing.T) { - tests := []httpClient{ - // generic HTTP client failure - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unusable status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - - // etcd Error response - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusInternalServerError, - }, - body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), - }, - } - - for i, tt := range tests { - kAPI := httpKeysAPI{client: tt} - resp, err := kAPI.Get(context.Background(), "/foo", nil) - if err == nil { - t.Errorf("#%d: received nil error", i) - } - if resp != nil { - t.Errorf("#%d: received non-nil Response: %#v", i, resp) - } - } -} - -func TestHTTPKeysAPIGetResponse(t *testing.T) { - client := &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - Header: http.Header{"X-Etcd-Index": []string{"42"}}, - }, - body: []byte(`{"action":"get","node":{"key":"/pants/foo/bar","modifiedIndex":25,"createdIndex":19,"nodes":[{"key":"/pants/foo/bar/baz","value":"snarf","createdIndex":21,"modifiedIndex":25}]}}`), - } - - wantResponse := &Response{ - Action: "get", - Node: &Node{ - Key: "/pants/foo/bar", - Nodes: []*Node{ - {Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: 21, ModifiedIndex: 25}, - }, - CreatedIndex: uint64(19), - ModifiedIndex: uint64(25), - }, - Index: uint64(42), - } - - kAPI := &httpKeysAPI{client: client, prefix: "/pants"} - resp, err := kAPI.Get(context.Background(), "/foo/bar", &GetOptions{Recursive: true}) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - if !reflect.DeepEqual(wantResponse, resp) { - t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) - } -} - -func TestHTTPKeysAPIDeleteAction(t *testing.T) { - tests := []struct { - key string - value string - opts *DeleteOptions - wantAction httpAction - }{ - // nil DeleteOptions - { - key: "/foo", - opts: nil, - wantAction: &deleteAction{ - Key: "/foo", - PrevValue: "", - PrevIndex: 0, - Recursive: false, - }, - }, - // empty DeleteOptions - { - key: "/foo", - opts: &DeleteOptions{}, - wantAction: &deleteAction{ - Key: "/foo", - PrevValue: "", - PrevIndex: 0, - Recursive: false, - }, - }, - // populated DeleteOptions - { - key: "/foo", - opts: &DeleteOptions{ - PrevValue: "baz", - PrevIndex: 13, - Recursive: true, - }, - wantAction: &deleteAction{ - Key: "/foo", - PrevValue: "baz", - PrevIndex: 13, - Recursive: true, - }, - }, - } - - for i, tt := range tests { - client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} - kAPI := httpKeysAPI{client: client} - kAPI.Delete(context.Background(), tt.key, tt.opts) - } -} - -func TestHTTPKeysAPIDeleteError(t *testing.T) { - tests := []httpClient{ - // generic HTTP client failure - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unusable status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - - // etcd Error response - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusInternalServerError, - }, - body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), - }, - } - - for i, tt := range tests { - kAPI := httpKeysAPI{client: tt} - resp, err := kAPI.Delete(context.Background(), "/foo", nil) - if err == nil { - t.Errorf("#%d: received nil error", i) - } - if resp != nil { - t.Errorf("#%d: received non-nil Response: %#v", i, resp) - } - } -} - -func TestHTTPKeysAPIDeleteResponse(t *testing.T) { - client := &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - Header: http.Header{"X-Etcd-Index": []string{"22"}}, - }, - body: []byte(`{"action":"delete","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":22,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), - } - - wantResponse := &Response{ - Action: "delete", - Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(22)}, - PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, - Index: uint64(22), - } - - kAPI := &httpKeysAPI{client: client, prefix: "/pants"} - resp, err := kAPI.Delete(context.Background(), "/foo/bar/baz", nil) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - if !reflect.DeepEqual(wantResponse, resp) { - t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) - } -} - -func TestHTTPKeysAPICreateAction(t *testing.T) { - act := &setAction{ - Key: "/foo", - Value: "bar", - PrevExist: PrevNoExist, - PrevIndex: 0, - PrevValue: "", - TTL: 0, - } - - kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} - kAPI.Create(context.Background(), "/foo", "bar") -} - -func TestHTTPKeysAPICreateInOrderAction(t *testing.T) { - act := &createInOrderAction{ - Dir: "/foo", - Value: "bar", - TTL: 0, - } - kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} - kAPI.CreateInOrder(context.Background(), "/foo", "bar", nil) -} - -func TestHTTPKeysAPIUpdateAction(t *testing.T) { - act := &setAction{ - Key: "/foo", - Value: "bar", - PrevExist: PrevExist, - PrevIndex: 0, - PrevValue: "", - TTL: 0, - } - - kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} - kAPI.Update(context.Background(), "/foo", "bar") -} - -func TestNodeTTLDuration(t *testing.T) { - tests := []struct { - node *Node - want time.Duration - }{ - { - node: &Node{TTL: 0}, - want: 0, - }, - { - node: &Node{TTL: 97}, - want: 97 * time.Second, - }, - } - - for i, tt := range tests { - got := tt.node.TTLDuration() - if tt.want != got { - t.Errorf("#%d: incorrect duration: want=%v got=%v", i, tt.want, got) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/members_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/members_test.go deleted file mode 100644 index e892b76965..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/members_test.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "encoding/json" - "errors" - "net/http" - "net/url" - "reflect" - "testing" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/types" -) - -func TestMembersAPIActionList(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionList{} - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members", - } - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "GET", wantURL, http.Header{}, nil) - if err != nil { - t.Error(err.Error()) - } -} - -func TestMembersAPIActionAdd(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionAdd{ - peerURLs: types.URLs([]url.URL{ - {Scheme: "https", Host: "127.0.0.1:8081"}, - {Scheme: "http", Host: "127.0.0.1:8080"}, - }), - } - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members", - } - wantHeader := http.Header{ - "Content-Type": []string{"application/json"}, - } - wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`) - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "POST", wantURL, wantHeader, wantBody) - if err != nil { - t.Error(err.Error()) - } -} - -func TestMembersAPIActionUpdate(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionUpdate{ - memberID: "0xabcd", - peerURLs: types.URLs([]url.URL{ - {Scheme: "https", Host: "127.0.0.1:8081"}, - {Scheme: "http", Host: "127.0.0.1:8080"}, - }), - } - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members/0xabcd", - } - wantHeader := http.Header{ - "Content-Type": []string{"application/json"}, - } - wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`) - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "PUT", wantURL, wantHeader, wantBody) - if err != nil { - t.Error(err.Error()) - } -} - -func TestMembersAPIActionRemove(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionRemove{memberID: "XXX"} - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members/XXX", - } - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "DELETE", wantURL, http.Header{}, nil) - if err != nil { - t.Error(err.Error()) - } -} - -func TestAssertStatusCode(t *testing.T) { - if err := assertStatusCode(404, 400); err == nil { - t.Errorf("assertStatusCode failed to detect conflict in 400 vs 404") - } - - if err := assertStatusCode(404, 400, 404); err != nil { - t.Errorf("assertStatusCode found conflict in (404,400) vs 400: %v", err) - } -} - -func TestV2MembersURL(t *testing.T) { - got := v2MembersURL(url.URL{ - Scheme: "http", - Host: "foo.example.com:4002", - Path: "/pants", - }) - want := &url.URL{ - Scheme: "http", - Host: "foo.example.com:4002", - Path: "/pants/v2/members", - } - - if !reflect.DeepEqual(want, got) { - t.Fatalf("v2MembersURL got %#v, want %#v", got, want) - } -} - -func TestMemberUnmarshal(t *testing.T) { - tests := []struct { - body []byte - wantMember Member - wantError bool - }{ - // no URLs, just check ID & Name - { - body: []byte(`{"id": "c", "name": "dungarees"}`), - wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil}, - }, - - // both client and peer URLs - { - body: []byte(`{"peerURLs": ["http://127.0.0.1:2379"], "clientURLs": ["http://127.0.0.1:2379"]}`), - wantMember: Member{ - PeerURLs: []string{ - "http://127.0.0.1:2379", - }, - ClientURLs: []string{ - "http://127.0.0.1:2379", - }, - }, - }, - - // multiple peer URLs - { - body: []byte(`{"peerURLs": ["http://127.0.0.1:2379", "https://example.com"]}`), - wantMember: Member{ - PeerURLs: []string{ - "http://127.0.0.1:2379", - "https://example.com", - }, - ClientURLs: nil, - }, - }, - - // multiple client URLs - { - body: []byte(`{"clientURLs": ["http://127.0.0.1:2379", "https://example.com"]}`), - wantMember: Member{ - PeerURLs: nil, - ClientURLs: []string{ - "http://127.0.0.1:2379", - "https://example.com", - }, - }, - }, - - // invalid JSON - { - body: []byte(`{"peerU`), - wantError: true, - }, - } - - for i, tt := range tests { - got := Member{} - err := json.Unmarshal(tt.body, &got) - if tt.wantError != (err != nil) { - t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err) - continue - } - - if !reflect.DeepEqual(tt.wantMember, got) { - t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got) - } - } -} - -func TestMemberCollectionUnmarshalFail(t *testing.T) { - mc := &memberCollection{} - if err := mc.UnmarshalJSON([]byte(`{`)); err == nil { - t.Errorf("got nil error") - } -} - -func TestMemberCollectionUnmarshal(t *testing.T) { - tests := []struct { - body []byte - want memberCollection - }{ - { - body: []byte(`{}`), - want: memberCollection([]Member{}), - }, - { - body: []byte(`{"members":[]}`), - want: memberCollection([]Member{}), - }, - { - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - want: memberCollection( - []Member{ - { - ID: "2745e2525fce8fe", - Name: "node3", - PeerURLs: []string{ - "http://127.0.0.1:7003", - }, - ClientURLs: []string{ - "http://127.0.0.1:4003", - }, - }, - { - ID: "42134f434382925", - Name: "node1", - PeerURLs: []string{ - "http://127.0.0.1:2380", - "http://127.0.0.1:7001", - }, - ClientURLs: []string{ - "http://127.0.0.1:2379", - "http://127.0.0.1:4001", - }, - }, - { - ID: "94088180e21eb87b", - Name: "node2", - PeerURLs: []string{ - "http://127.0.0.1:7002", - }, - ClientURLs: []string{ - "http://127.0.0.1:4002", - }, - }, - }, - ), - }, - } - - for i, tt := range tests { - var got memberCollection - err := json.Unmarshal(tt.body, &got) - if err != nil { - t.Errorf("#%d: unexpected error: %v", i, err) - continue - } - - if !reflect.DeepEqual(tt.want, got) { - t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got) - } - } -} - -func TestMemberCreateRequestMarshal(t *testing.T) { - req := memberCreateOrUpdateRequest{ - PeerURLs: types.URLs([]url.URL{ - {Scheme: "http", Host: "127.0.0.1:8081"}, - {Scheme: "https", Host: "127.0.0.1:8080"}, - }), - } - want := []byte(`{"peerURLs":["http://127.0.0.1:8081","https://127.0.0.1:8080"]}`) - - got, err := json.Marshal(&req) - if err != nil { - t.Fatalf("Marshal returned unexpected err=%v", err) - } - - if !reflect.DeepEqual(want, got) { - t.Fatalf("Failed to marshal memberCreateRequest: want=%s, got=%s", want, got) - } -} - -func TestHTTPMembersAPIAddSuccess(t *testing.T) { - wantAction := &membersAPIActionAdd{ - peerURLs: types.URLs([]url.URL{ - {Scheme: "http", Host: "127.0.0.1:7002"}, - }), - } - - mAPI := &httpMembersAPI{ - client: &actionAssertingHTTPClient{ - t: t, - act: wantAction, - resp: http.Response{ - StatusCode: http.StatusCreated, - }, - body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"]}`), - }, - } - - wantResponseMember := &Member{ - ID: "94088180e21eb87b", - PeerURLs: []string{"http://127.0.0.1:7002"}, - } - - m, err := mAPI.Add(context.Background(), "http://127.0.0.1:7002") - if err != nil { - t.Errorf("got non-nil err: %#v", err) - } - if !reflect.DeepEqual(wantResponseMember, m) { - t.Errorf("incorrect Member: want=%#v got=%#v", wantResponseMember, m) - } -} - -func TestHTTPMembersAPIAddError(t *testing.T) { - okPeer := "http://example.com:2379" - tests := []struct { - peerURL string - client httpClient - - // if wantErr == nil, assert that the returned error is non-nil - // if wantErr != nil, assert that the returned error matches - wantErr error - }{ - // malformed peer URL - { - peerURL: ":", - }, - - // generic httpClient failure - { - peerURL: okPeer, - client: &staticHTTPClient{err: errors.New("fail!")}, - }, - - // unrecognized HTTP status code - { - peerURL: okPeer, - client: &staticHTTPClient{ - resp: http.Response{StatusCode: http.StatusTeapot}, - }, - }, - - // unmarshal body into membersError on StatusConflict - { - peerURL: okPeer, - client: &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusConflict, - }, - body: []byte(`{"message":"fail!"}`), - }, - wantErr: membersError{Message: "fail!"}, - }, - - // fail to unmarshal body on StatusConflict - { - peerURL: okPeer, - client: &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusConflict, - }, - body: []byte(`{"`), - }, - }, - - // fail to unmarshal body on StatusCreated - { - peerURL: okPeer, - client: &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusCreated, - }, - body: []byte(`{"id":"XX`), - }, - }, - } - - for i, tt := range tests { - mAPI := &httpMembersAPI{client: tt.client} - m, err := mAPI.Add(context.Background(), tt.peerURL) - if err == nil { - t.Errorf("#%d: got nil err", i) - } - if tt.wantErr != nil && !reflect.DeepEqual(tt.wantErr, err) { - t.Errorf("#%d: incorrect error: want=%#v got=%#v", i, tt.wantErr, err) - } - if m != nil { - t.Errorf("#%d: got non-nil Member", i) - } - } -} - -func TestHTTPMembersAPIRemoveSuccess(t *testing.T) { - wantAction := &membersAPIActionRemove{ - memberID: "94088180e21eb87b", - } - - mAPI := &httpMembersAPI{ - client: &actionAssertingHTTPClient{ - t: t, - act: wantAction, - resp: http.Response{ - StatusCode: http.StatusNoContent, - }, - }, - } - - if err := mAPI.Remove(context.Background(), "94088180e21eb87b"); err != nil { - t.Errorf("got non-nil err: %#v", err) - } -} - -func TestHTTPMembersAPIRemoveFail(t *testing.T) { - tests := []httpClient{ - // generic error - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unexpected HTTP status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusInternalServerError, - }, - }, - } - - for i, tt := range tests { - mAPI := &httpMembersAPI{client: tt} - if err := mAPI.Remove(context.Background(), "94088180e21eb87b"); err == nil { - t.Errorf("#%d: got nil err", i) - } - } -} - -func TestHTTPMembersAPIListSuccess(t *testing.T) { - wantAction := &membersAPIActionList{} - mAPI := &httpMembersAPI{ - client: &actionAssertingHTTPClient{ - t: t, - act: wantAction, - resp: http.Response{ - StatusCode: http.StatusOK, - }, - body: []byte(`{"members":[{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - } - - wantResponseMembers := []Member{ - { - ID: "94088180e21eb87b", - Name: "node2", - PeerURLs: []string{"http://127.0.0.1:7002"}, - ClientURLs: []string{"http://127.0.0.1:4002"}, - }, - } - - m, err := mAPI.List(context.Background()) - if err != nil { - t.Errorf("got non-nil err: %#v", err) - } - if !reflect.DeepEqual(wantResponseMembers, m) { - t.Errorf("incorrect Members: want=%#v got=%#v", wantResponseMembers, m) - } -} - -func TestHTTPMembersAPIListError(t *testing.T) { - tests := []httpClient{ - // generic httpClient failure - &staticHTTPClient{err: errors.New("fail!")}, - - // unrecognized HTTP status code - &staticHTTPClient{ - resp: http.Response{StatusCode: http.StatusTeapot}, - }, - - // fail to unmarshal body on StatusOK - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - }, - body: []byte(`[{"id":"XX`), - }, - } - - for i, tt := range tests { - mAPI := &httpMembersAPI{client: tt} - ms, err := mAPI.List(context.Background()) - if err == nil { - t.Errorf("#%d: got nil err", i) - } - if ms != nil { - t.Errorf("#%d: got non-nil Member slice", i) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/srv_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/srv_test.go deleted file mode 100644 index a4a27f108a..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/client/srv_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "errors" - "net" - "reflect" - "testing" -) - -func TestSRVDiscover(t *testing.T) { - defer func() { lookupSRV = net.LookupSRV }() - - tests := []struct { - withSSL []*net.SRV - withoutSSL []*net.SRV - expected []string - }{ - { - []*net.SRV{}, - []*net.SRV{}, - []string{}, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{}, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480"}, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{ - {Target: "10.0.0.1", Port: 7001}, - }, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{ - {Target: "10.0.0.1", Port: 7001}, - }, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, - }, - { - []*net.SRV{ - {Target: "a.example.com", Port: 2480}, - {Target: "b.example.com", Port: 2480}, - {Target: "c.example.com", Port: 2480}, - }, - []*net.SRV{}, - []string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com:2480"}, - }, - } - - for i, tt := range tests { - lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) { - if service == "etcd-server-ssl" { - return "", tt.withSSL, nil - } - if service == "etcd-server" { - return "", tt.withoutSSL, nil - } - return "", nil, errors.New("Unkown service in mock") - } - - d := NewSRVDiscover() - - endpoints, err := d.Discover("example.com") - if err != nil { - t.Fatalf("%d: err: %#v", i, err) - } - - if !reflect.DeepEqual(endpoints, tt.expected) { - t.Errorf("#%d: endpoints = %v, want %v", i, endpoints, tt.expected) - } - - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/pathutil/path_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/pathutil/path_test.go deleted file mode 100644 index 6d3d803cfc..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/pathutil/path_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pathutil - -import "testing" - -func TestCanonicalURLPath(t *testing.T) { - tests := []struct { - p string - wp string - }{ - {"/a", "/a"}, - {"", "/"}, - {"a", "/a"}, - {"//a", "/a"}, - {"/a/.", "/a"}, - {"/a/..", "/"}, - {"/a/", "/a/"}, - {"/a//", "/a/"}, - } - for i, tt := range tests { - if g := CanonicalURLPath(tt.p); g != tt.wp { - t.Errorf("#%d: canonical path = %s, want %s", i, g, tt.wp) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id_test.go deleted file mode 100644 index 97d168f58e..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestIDString(t *testing.T) { - tests := []struct { - input ID - want string - }{ - { - input: 12, - want: "c", - }, - { - input: 4918257920282737594, - want: "444129853c343bba", - }, - } - - for i, tt := range tests { - got := tt.input.String() - if tt.want != got { - t.Errorf("#%d: ID.String failure: want=%v, got=%v", i, tt.want, got) - } - } -} - -func TestIDFromString(t *testing.T) { - tests := []struct { - input string - want ID - }{ - { - input: "17", - want: 23, - }, - { - input: "612840dae127353", - want: 437557308098245459, - }, - } - - for i, tt := range tests { - got, err := IDFromString(tt.input) - if err != nil { - t.Errorf("#%d: IDFromString failure: err=%v", i, err) - continue - } - if tt.want != got { - t.Errorf("#%d: IDFromString failure: want=%v, got=%v", i, tt.want, got) - } - } -} - -func TestIDFromStringFail(t *testing.T) { - tests := []string{ - "", - "XXX", - "612840dae127353612840dae127353", - } - - for i, tt := range tests { - _, err := IDFromString(tt) - if err == nil { - t.Fatalf("#%d: IDFromString expected error, but err=nil", i) - } - } -} - -func TestIDSlice(t *testing.T) { - g := []ID{10, 500, 5, 1, 100, 25} - w := []ID{1, 5, 10, 25, 100, 500} - sort.Sort(IDSlice(g)) - if !reflect.DeepEqual(g, w) { - t.Errorf("slice after sort = %#v, want %#v", g, w) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set_test.go deleted file mode 100644 index ff1ecc68d3..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set_test.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestUnsafeSet(t *testing.T) { - driveSetTests(t, NewUnsafeSet()) -} - -func TestThreadsafeSet(t *testing.T) { - driveSetTests(t, NewThreadsafeSet()) -} - -// Check that two slices contents are equal; order is irrelevant -func equal(a, b []string) bool { - as := sort.StringSlice(a) - bs := sort.StringSlice(b) - as.Sort() - bs.Sort() - return reflect.DeepEqual(as, bs) -} - -func driveSetTests(t *testing.T, s Set) { - // Verify operations on an empty set - eValues := []string{} - values := s.Values() - if !reflect.DeepEqual(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - if l := s.Length(); l != 0 { - t.Fatalf("Expected length=0, got %d", l) - } - for _, v := range []string{"foo", "bar", "baz"} { - if s.Contains(v) { - t.Fatalf("Expect s.Contains(%q) to be fale, got true", v) - } - } - - // Add three items, ensure they show up - s.Add("foo") - s.Add("bar") - s.Add("baz") - - eValues = []string{"foo", "bar", "baz"} - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - - for _, v := range eValues { - if !s.Contains(v) { - t.Fatalf("Expect s.Contains(%q) to be true, got false", v) - } - } - - if l := s.Length(); l != 3 { - t.Fatalf("Expected length=3, got %d", l) - } - - // Add the same item a second time, ensuring it is not duplicated - s.Add("foo") - - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - if l := s.Length(); l != 3 { - t.Fatalf("Expected length=3, got %d", l) - } - - // Remove all items, ensure they are gone - s.Remove("foo") - s.Remove("bar") - s.Remove("baz") - - eValues = []string{} - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - - if l := s.Length(); l != 0 { - t.Fatalf("Expected length=0, got %d", l) - } - - // Create new copies of the set, and ensure they are unlinked to the - // original Set by making modifications - s.Add("foo") - s.Add("bar") - cp1 := s.Copy() - cp2 := s.Copy() - s.Remove("foo") - cp3 := s.Copy() - cp1.Add("baz") - - for i, tt := range []struct { - want []string - got []string - }{ - {[]string{"bar"}, s.Values()}, - {[]string{"foo", "bar", "baz"}, cp1.Values()}, - {[]string{"foo", "bar"}, cp2.Values()}, - {[]string{"bar"}, cp3.Values()}, - } { - if !equal(tt.want, tt.got) { - t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got) - } - } - - for i, tt := range []struct { - want bool - got bool - }{ - {true, s.Equals(cp3)}, - {true, cp3.Equals(s)}, - {false, s.Equals(cp2)}, - {false, s.Equals(cp1)}, - {false, cp1.Equals(s)}, - {false, cp2.Equals(s)}, - {false, cp2.Equals(cp1)}, - } { - if tt.got != tt.want { - t.Fatalf("case %d: want %t, got %t", i, tt.want, tt.got) - - } - } - - // Subtract values from a Set, ensuring a new Set is created and - // the original Sets are unmodified - sub1 := cp1.Sub(s) - sub2 := cp2.Sub(cp1) - - for i, tt := range []struct { - want []string - got []string - }{ - {[]string{"foo", "bar", "baz"}, cp1.Values()}, - {[]string{"foo", "bar"}, cp2.Values()}, - {[]string{"bar"}, s.Values()}, - {[]string{"foo", "baz"}, sub1.Values()}, - {[]string{}, sub2.Values()}, - } { - if !equal(tt.want, tt.got) { - t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got) - } - } -} - -func TestUnsafeSetContainsAll(t *testing.T) { - vals := []string{"foo", "bar", "baz"} - s := NewUnsafeSet(vals...) - - tests := []struct { - strs []string - wcontain bool - }{ - {[]string{}, true}, - {vals[:1], true}, - {vals[:2], true}, - {vals, true}, - {[]string{"cuz"}, false}, - {[]string{vals[0], "cuz"}, false}, - } - for i, tt := range tests { - if g := s.ContainsAll(tt.strs); g != tt.wcontain { - t.Errorf("#%d: ok = %v, want %v", i, g, tt.wcontain) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice_test.go deleted file mode 100644 index 95e37e04d2..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestUint64Slice(t *testing.T) { - g := Uint64Slice{10, 500, 5, 1, 100, 25} - w := Uint64Slice{1, 5, 10, 25, 100, 500} - sort.Sort(g) - if !reflect.DeepEqual(g, w) { - t.Errorf("slice after sort = %#v, want %#v", g, w) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls_test.go deleted file mode 100644 index ffa2cf007b..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "testing" - - "github.com/coreos/etcd/pkg/testutil" -) - -func TestNewURLs(t *testing.T) { - tests := []struct { - strs []string - wurls URLs - }{ - { - []string{"http://127.0.0.1:2379"}, - testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - }, - // it can trim space - { - []string{" http://127.0.0.1:2379 "}, - testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - }, - // it does sort - { - []string{ - "http://127.0.0.2:2379", - "http://127.0.0.1:2379", - }, - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:2379", - "http://127.0.0.2:2379", - }), - }, - } - for i, tt := range tests { - urls, _ := NewURLs(tt.strs) - if !reflect.DeepEqual(urls, tt.wurls) { - t.Errorf("#%d: urls = %+v, want %+v", i, urls, tt.wurls) - } - } -} - -func TestURLsString(t *testing.T) { - tests := []struct { - us URLs - wstr string - }{ - { - URLs{}, - "", - }, - { - testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - "http://127.0.0.1:2379", - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:2379", - "http://127.0.0.2:2379", - }), - "http://127.0.0.1:2379,http://127.0.0.2:2379", - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.2:2379", - "http://127.0.0.1:2379", - }), - "http://127.0.0.2:2379,http://127.0.0.1:2379", - }, - } - for i, tt := range tests { - g := tt.us.String() - if g != tt.wstr { - t.Errorf("#%d: string = %s, want %s", i, g, tt.wstr) - } - } -} - -func TestURLsSort(t *testing.T) { - g := testutil.MustNewURLs(t, []string{ - "http://127.0.0.4:2379", - "http://127.0.0.2:2379", - "http://127.0.0.1:2379", - "http://127.0.0.3:2379", - }) - w := testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:2379", - "http://127.0.0.2:2379", - "http://127.0.0.3:2379", - "http://127.0.0.4:2379", - }) - gurls := URLs(g) - gurls.Sort() - if !reflect.DeepEqual(g, w) { - t.Errorf("URLs after sort = %#v, want %#v", g, w) - } -} - -func TestURLsStringSlice(t *testing.T) { - tests := []struct { - us URLs - wstr []string - }{ - { - URLs{}, - []string{}, - }, - { - testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - []string{"http://127.0.0.1:2379"}, - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:2379", - "http://127.0.0.2:2379", - }), - []string{"http://127.0.0.1:2379", "http://127.0.0.2:2379"}, - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.2:2379", - "http://127.0.0.1:2379", - }), - []string{"http://127.0.0.2:2379", "http://127.0.0.1:2379"}, - }, - } - for i, tt := range tests { - g := tt.us.StringSlice() - if !reflect.DeepEqual(g, tt.wstr) { - t.Errorf("#%d: string slice = %+v, want %+v", i, g, tt.wstr) - } - } -} - -func TestNewURLsFail(t *testing.T) { - tests := [][]string{ - // no urls given - {}, - // missing protocol scheme - {"://127.0.0.1:2379"}, - // unsupported scheme - {"mailto://127.0.0.1:2379"}, - // not conform to host:port - {"http://127.0.0.1"}, - // contain a path - {"http://127.0.0.1:2379/path"}, - } - for i, tt := range tests { - _, err := NewURLs(tt) - if err == nil { - t.Errorf("#%d: err = nil, but error", i) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urlsmap_test.go b/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urlsmap_test.go deleted file mode 100644 index 8b52dc17b1..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urlsmap_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "testing" - - "github.com/coreos/etcd/pkg/testutil" -) - -func TestParseInitialCluster(t *testing.T) { - c, err := NewURLsMap("mem1=http://10.0.0.1:2379,mem1=http://128.193.4.20:2379,mem2=http://10.0.0.2:2379,default=http://127.0.0.1:2379") - if err != nil { - t.Fatalf("unexpected parse error: %v", err) - } - wc := URLsMap(map[string]URLs{ - "mem1": testutil.MustNewURLs(t, []string{"http://10.0.0.1:2379", "http://128.193.4.20:2379"}), - "mem2": testutil.MustNewURLs(t, []string{"http://10.0.0.2:2379"}), - "default": testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - }) - if !reflect.DeepEqual(c, wc) { - t.Errorf("cluster = %+v, want %+v", c, wc) - } -} - -func TestParseInitialClusterBad(t *testing.T) { - tests := []string{ - // invalid URL - "%^", - // no URL defined for member - "mem1=,mem2=http://128.193.4.20:2379,mem3=http://10.0.0.2:2379", - "mem1,mem2=http://128.193.4.20:2379,mem3=http://10.0.0.2:2379", - // bad URL for member - "default=http://localhost/", - } - for i, tt := range tests { - if _, err := NewURLsMap(tt); err == nil { - t.Errorf("#%d: unexpected successful parse, want err", i) - } - } -} - -func TestNameURLPairsString(t *testing.T) { - cls := URLsMap(map[string]URLs{ - "abc": testutil.MustNewURLs(t, []string{"http://1.1.1.1:1111", "http://0.0.0.0:0000"}), - "def": testutil.MustNewURLs(t, []string{"http://2.2.2.2:2222"}), - "ghi": testutil.MustNewURLs(t, []string{"http://3.3.3.3:1234", "http://127.0.0.1:2380"}), - // no PeerURLs = not included - "four": testutil.MustNewURLs(t, []string{}), - "five": testutil.MustNewURLs(t, nil), - }) - w := "abc=http://0.0.0.0:0000,abc=http://1.1.1.1:1111,def=http://2.2.2.2:2222,ghi=http://127.0.0.1:2380,ghi=http://3.3.3.3:1234" - if g := cls.String(); g != w { - t.Fatalf("NameURLPairs.String():\ngot %#v\nwant %#v", g, w) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/bench_test.go b/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/bench_test.go deleted file mode 100644 index a5b5fe38d2..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/bench_test.go +++ /dev/null @@ -1,523 +0,0 @@ -package mapset - -import ( - "math/rand" - "testing" -) - -func nrand(n int) []int { - i := make([]int, n) - for ind := range i { - i[ind] = rand.Int() - } - return i -} - -func toInterfaces(i []int) []interface{} { - ifs := make([]interface{}, len(i)) - for ind, v := range i { - ifs[ind] = v - } - return ifs -} - -func benchAdd(b *testing.B, s Set) { - nums := nrand(b.N) - b.ResetTimer() - for _, v := range nums { - s.Add(v) - } -} - -func BenchmarkAddSafe(b *testing.B) { - benchAdd(b, NewSet()) -} - -func BenchmarkAddUnsafe(b *testing.B) { - benchAdd(b, NewThreadUnsafeSet()) -} - -func benchRemove(b *testing.B, s Set) { - nums := nrand(b.N) - for _, v := range nums { - s.Add(v) - } - - b.ResetTimer() - for _, v := range nums { - s.Remove(v) - } -} - -func BenchmarkRemoveSafe(b *testing.B) { - benchRemove(b, NewSet()) -} - -func BenchmarkRemoveUnsafe(b *testing.B) { - benchRemove(b, NewThreadUnsafeSet()) -} - -func benchCardinality(b *testing.B, s Set) { - for i := 0; i < b.N; i++ { - s.Cardinality() - } -} - -func BenchmarkCardinalitySafe(b *testing.B) { - benchCardinality(b, NewSet()) -} - -func BenchmarkCardinalityUnsafe(b *testing.B) { - benchCardinality(b, NewThreadUnsafeSet()) -} - -func benchClear(b *testing.B, s Set) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Clear() - } -} - -func BenchmarkClearSafe(b *testing.B) { - benchClear(b, NewSet()) -} - -func BenchmarkClearUnsafe(b *testing.B) { - benchClear(b, NewThreadUnsafeSet()) -} - -func benchClone(b *testing.B, n int, s Set) { - nums := toInterfaces(nrand(n)) - for _, v := range nums { - s.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Clone() - } -} - -func BenchmarkClone1Safe(b *testing.B) { - benchClone(b, 1, NewSet()) -} - -func BenchmarkClone1Unsafe(b *testing.B) { - benchClone(b, 1, NewThreadUnsafeSet()) -} - -func BenchmarkClone10Safe(b *testing.B) { - benchClone(b, 10, NewSet()) -} - -func BenchmarkClone10Unsafe(b *testing.B) { - benchClone(b, 10, NewThreadUnsafeSet()) -} - -func BenchmarkClone100Safe(b *testing.B) { - benchClone(b, 100, NewSet()) -} - -func BenchmarkClone100Unsafe(b *testing.B) { - benchClone(b, 100, NewThreadUnsafeSet()) -} - -func benchContains(b *testing.B, n int, s Set) { - nums := toInterfaces(nrand(n)) - for _, v := range nums { - s.Add(v) - } - - nums[n-1] = -1 // Definitely not in s - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Contains(nums...) - } -} - -func BenchmarkContains1Safe(b *testing.B) { - benchContains(b, 1, NewSet()) -} - -func BenchmarkContains1Unsafe(b *testing.B) { - benchContains(b, 1, NewThreadUnsafeSet()) -} - -func BenchmarkContains10Safe(b *testing.B) { - benchContains(b, 10, NewSet()) -} - -func BenchmarkContains10Unsafe(b *testing.B) { - benchContains(b, 10, NewThreadUnsafeSet()) -} - -func BenchmarkContains100Safe(b *testing.B) { - benchContains(b, 100, NewSet()) -} - -func BenchmarkContains100Unsafe(b *testing.B) { - benchContains(b, 100, NewThreadUnsafeSet()) -} - -func benchEqual(b *testing.B, n int, s, t Set) { - nums := nrand(n) - for _, v := range nums { - s.Add(v) - t.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Equal(t) - } -} - -func BenchmarkEqual1Safe(b *testing.B) { - benchEqual(b, 1, NewSet(), NewSet()) -} - -func BenchmarkEqual1Unsafe(b *testing.B) { - benchEqual(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkEqual10Safe(b *testing.B) { - benchEqual(b, 10, NewSet(), NewSet()) -} - -func BenchmarkEqual10Unsafe(b *testing.B) { - benchEqual(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkEqual100Safe(b *testing.B) { - benchEqual(b, 100, NewSet(), NewSet()) -} - -func BenchmarkEqual100Unsafe(b *testing.B) { - benchEqual(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func benchDifference(b *testing.B, n int, s, t Set) { - nums := nrand(n) - for _, v := range nums { - s.Add(v) - } - for _, v := range nums[:n/2] { - t.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Difference(t) - } -} - -func benchIsSubset(b *testing.B, n int, s, t Set) { - nums := nrand(n) - for _, v := range nums { - s.Add(v) - t.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.IsSubset(t) - } -} - -func BenchmarkIsSubset1Safe(b *testing.B) { - benchIsSubset(b, 1, NewSet(), NewSet()) -} - -func BenchmarkIsSubset1Unsafe(b *testing.B) { - benchIsSubset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkIsSubset10Safe(b *testing.B) { - benchIsSubset(b, 10, NewSet(), NewSet()) -} - -func BenchmarkIsSubset10Unsafe(b *testing.B) { - benchIsSubset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkIsSubset100Safe(b *testing.B) { - benchIsSubset(b, 100, NewSet(), NewSet()) -} - -func BenchmarkIsSubset100Unsafe(b *testing.B) { - benchIsSubset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func benchIsSuperset(b *testing.B, n int, s, t Set) { - nums := nrand(n) - for _, v := range nums { - s.Add(v) - t.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.IsSuperset(t) - } -} - -func BenchmarkIsSuperset1Safe(b *testing.B) { - benchIsSuperset(b, 1, NewSet(), NewSet()) -} - -func BenchmarkIsSuperset1Unsafe(b *testing.B) { - benchIsSuperset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkIsSuperset10Safe(b *testing.B) { - benchIsSuperset(b, 10, NewSet(), NewSet()) -} - -func BenchmarkIsSuperset10Unsafe(b *testing.B) { - benchIsSuperset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkIsSuperset100Safe(b *testing.B) { - benchIsSuperset(b, 100, NewSet(), NewSet()) -} - -func BenchmarkIsSuperset100Unsafe(b *testing.B) { - benchIsSuperset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkDifference1Safe(b *testing.B) { - benchDifference(b, 1, NewSet(), NewSet()) -} - -func BenchmarkDifference1Unsafe(b *testing.B) { - benchDifference(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkDifference10Safe(b *testing.B) { - benchDifference(b, 10, NewSet(), NewSet()) -} - -func BenchmarkDifference10Unsafe(b *testing.B) { - benchDifference(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkDifference100Safe(b *testing.B) { - benchDifference(b, 100, NewSet(), NewSet()) -} - -func BenchmarkDifference100Unsafe(b *testing.B) { - benchDifference(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func benchIntersect(b *testing.B, n int, s, t Set) { - nums := nrand(int(float64(n) * float64(1.5))) - for _, v := range nums[:n] { - s.Add(v) - } - for _, v := range nums[n/2:] { - t.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Intersect(t) - } -} - -func BenchmarkIntersect1Safe(b *testing.B) { - benchIntersect(b, 1, NewSet(), NewSet()) -} - -func BenchmarkIntersect1Unsafe(b *testing.B) { - benchIntersect(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkIntersect10Safe(b *testing.B) { - benchIntersect(b, 10, NewSet(), NewSet()) -} - -func BenchmarkIntersect10Unsafe(b *testing.B) { - benchIntersect(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkIntersect100Safe(b *testing.B) { - benchIntersect(b, 100, NewSet(), NewSet()) -} - -func BenchmarkIntersect100Unsafe(b *testing.B) { - benchIntersect(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func benchSymmetricDifference(b *testing.B, n int, s, t Set) { - nums := nrand(int(float64(n) * float64(1.5))) - for _, v := range nums[:n] { - s.Add(v) - } - for _, v := range nums[n/2:] { - t.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.SymmetricDifference(t) - } -} - -func BenchmarkSymmetricDifference1Safe(b *testing.B) { - benchSymmetricDifference(b, 1, NewSet(), NewSet()) -} - -func BenchmarkSymmetricDifference1Unsafe(b *testing.B) { - benchSymmetricDifference(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkSymmetricDifference10Safe(b *testing.B) { - benchSymmetricDifference(b, 10, NewSet(), NewSet()) -} - -func BenchmarkSymmetricDifference10Unsafe(b *testing.B) { - benchSymmetricDifference(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkSymmetricDifference100Safe(b *testing.B) { - benchSymmetricDifference(b, 100, NewSet(), NewSet()) -} - -func BenchmarkSymmetricDifference100Unsafe(b *testing.B) { - benchSymmetricDifference(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func benchUnion(b *testing.B, n int, s, t Set) { - nums := nrand(n) - for _, v := range nums[:n/2] { - s.Add(v) - } - for _, v := range nums[n/2:] { - t.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Union(t) - } -} - -func BenchmarkUnion1Safe(b *testing.B) { - benchUnion(b, 1, NewSet(), NewSet()) -} - -func BenchmarkUnion1Unsafe(b *testing.B) { - benchUnion(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkUnion10Safe(b *testing.B) { - benchUnion(b, 10, NewSet(), NewSet()) -} - -func BenchmarkUnion10Unsafe(b *testing.B) { - benchUnion(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func BenchmarkUnion100Safe(b *testing.B) { - benchUnion(b, 100, NewSet(), NewSet()) -} - -func BenchmarkUnion100Unsafe(b *testing.B) { - benchUnion(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet()) -} - -func benchIter(b *testing.B, n int, s Set) { - nums := nrand(n) - for _, v := range nums { - s.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - c := s.Iter() - for _ = range c { - - } - } -} - -func BenchmarkIter1Safe(b *testing.B) { - benchIter(b, 1, NewSet()) -} - -func BenchmarkIter1Unsafe(b *testing.B) { - benchIter(b, 1, NewThreadUnsafeSet()) -} - -func BenchmarkIter10Safe(b *testing.B) { - benchIter(b, 10, NewSet()) -} - -func BenchmarkIter10Unsafe(b *testing.B) { - benchIter(b, 10, NewThreadUnsafeSet()) -} - -func BenchmarkIter100Safe(b *testing.B) { - benchIter(b, 100, NewSet()) -} - -func BenchmarkIter100Unsafe(b *testing.B) { - benchIter(b, 100, NewThreadUnsafeSet()) -} - -func benchString(b *testing.B, n int, s Set) { - nums := nrand(n) - for _, v := range nums { - s.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.String() - } -} - -func BenchmarkString1Safe(b *testing.B) { - benchString(b, 1, NewSet()) -} - -func BenchmarkString1Unsafe(b *testing.B) { - benchString(b, 1, NewThreadUnsafeSet()) -} - -func BenchmarkString10Safe(b *testing.B) { - benchString(b, 10, NewSet()) -} - -func BenchmarkString10Unsafe(b *testing.B) { - benchString(b, 10, NewThreadUnsafeSet()) -} - -func BenchmarkString100Safe(b *testing.B) { - benchString(b, 100, NewSet()) -} - -func BenchmarkString100Unsafe(b *testing.B) { - benchString(b, 100, NewThreadUnsafeSet()) -} - -func benchToSlice(b *testing.B, s Set) { - nums := nrand(b.N) - for _, v := range nums { - s.Add(v) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.ToSlice() - } -} - -func BenchmarkToSliceSafe(b *testing.B) { - benchToSlice(b, NewSet()) -} - -func BenchmarkToSliceUnsafe(b *testing.B) { - benchToSlice(b, NewThreadUnsafeSet()) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/set_test.go b/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/set_test.go deleted file mode 100644 index 5931002a82..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/set_test.go +++ /dev/null @@ -1,910 +0,0 @@ -/* -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -package mapset - -import "testing" - -func makeSet(ints []int) Set { - set := NewSet() - for _, i := range ints { - set.Add(i) - } - return set -} - -func makeUnsafeSet(ints []int) Set { - set := NewThreadUnsafeSet() - for _, i := range ints { - set.Add(i) - } - return set -} - -func Test_NewSet(t *testing.T) { - a := NewSet() - - if a.Cardinality() != 0 { - t.Error("NewSet should start out as an empty set") - } -} - -func Test_NewUnsafeSet(t *testing.T) { - a := NewThreadUnsafeSet() - - if a.Cardinality() != 0 { - t.Error("NewSet should start out as an empty set") - } -} - -func Test_AddSet(t *testing.T) { - a := makeSet([]int{1, 2, 3}) - - if a.Cardinality() != 3 { - t.Error("AddSet does not have a size of 3 even though 3 items were added to a new set") - } -} - -func Test_AddUnsafeSet(t *testing.T) { - a := makeUnsafeSet([]int{1, 2, 3}) - - if a.Cardinality() != 3 { - t.Error("AddSet does not have a size of 3 even though 3 items were added to a new set") - } -} - -func Test_AddSetNoDuplicate(t *testing.T) { - a := makeSet([]int{7, 5, 3, 7}) - - if a.Cardinality() != 3 { - t.Error("AddSetNoDuplicate set should have 3 elements since 7 is a duplicate") - } - - if !(a.Contains(7) && a.Contains(5) && a.Contains(3)) { - t.Error("AddSetNoDuplicate set should have a 7, 5, and 3 in it.") - } -} - -func Test_AddUnsafeSetNoDuplicate(t *testing.T) { - a := makeUnsafeSet([]int{7, 5, 3, 7}) - - if a.Cardinality() != 3 { - t.Error("AddSetNoDuplicate set should have 3 elements since 7 is a duplicate") - } - - if !(a.Contains(7) && a.Contains(5) && a.Contains(3)) { - t.Error("AddSetNoDuplicate set should have a 7, 5, and 3 in it.") - } -} - -func Test_RemoveSet(t *testing.T) { - a := makeSet([]int{6, 3, 1}) - - a.Remove(3) - - if a.Cardinality() != 2 { - t.Error("RemoveSet should only have 2 items in the set") - } - - if !(a.Contains(6) && a.Contains(1)) { - t.Error("RemoveSet should have only items 6 and 1 in the set") - } - - a.Remove(6) - a.Remove(1) - - if a.Cardinality() != 0 { - t.Error("RemoveSet should be an empty set after removing 6 and 1") - } -} - -func Test_RemoveUnsafeSet(t *testing.T) { - a := makeUnsafeSet([]int{6, 3, 1}) - - a.Remove(3) - - if a.Cardinality() != 2 { - t.Error("RemoveSet should only have 2 items in the set") - } - - if !(a.Contains(6) && a.Contains(1)) { - t.Error("RemoveSet should have only items 6 and 1 in the set") - } - - a.Remove(6) - a.Remove(1) - - if a.Cardinality() != 0 { - t.Error("RemoveSet should be an empty set after removing 6 and 1") - } -} - -func Test_ContainsSet(t *testing.T) { - a := NewSet() - - a.Add(71) - - if !a.Contains(71) { - t.Error("ContainsSet should contain 71") - } - - a.Remove(71) - - if a.Contains(71) { - t.Error("ContainsSet should not contain 71") - } - - a.Add(13) - a.Add(7) - a.Add(1) - - if !(a.Contains(13) && a.Contains(7) && a.Contains(1)) { - t.Error("ContainsSet should contain 13, 7, 1") - } -} - -func Test_ContainsUnsafeSet(t *testing.T) { - a := NewThreadUnsafeSet() - - a.Add(71) - - if !a.Contains(71) { - t.Error("ContainsSet should contain 71") - } - - a.Remove(71) - - if a.Contains(71) { - t.Error("ContainsSet should not contain 71") - } - - a.Add(13) - a.Add(7) - a.Add(1) - - if !(a.Contains(13) && a.Contains(7) && a.Contains(1)) { - t.Error("ContainsSet should contain 13, 7, 1") - } -} - -func Test_ContainsMultipleSet(t *testing.T) { - a := makeSet([]int{8, 6, 7, 5, 3, 0, 9}) - - if !a.Contains(8, 6, 7, 5, 3, 0, 9) { - t.Error("ContainsAll should contain Jenny's phone number") - } - - if a.Contains(8, 6, 11, 5, 3, 0, 9) { - t.Error("ContainsAll should not have all of these numbers") - } -} - -func Test_ContainsMultipleUnsafeSet(t *testing.T) { - a := makeUnsafeSet([]int{8, 6, 7, 5, 3, 0, 9}) - - if !a.Contains(8, 6, 7, 5, 3, 0, 9) { - t.Error("ContainsAll should contain Jenny's phone number") - } - - if a.Contains(8, 6, 11, 5, 3, 0, 9) { - t.Error("ContainsAll should not have all of these numbers") - } -} - -func Test_ClearSet(t *testing.T) { - a := makeSet([]int{2, 5, 9, 10}) - - a.Clear() - - if a.Cardinality() != 0 { - t.Error("ClearSet should be an empty set") - } -} - -func Test_ClearUnsafeSet(t *testing.T) { - a := makeUnsafeSet([]int{2, 5, 9, 10}) - - a.Clear() - - if a.Cardinality() != 0 { - t.Error("ClearSet should be an empty set") - } -} - -func Test_CardinalitySet(t *testing.T) { - a := NewSet() - - if a.Cardinality() != 0 { - t.Error("set should be an empty set") - } - - a.Add(1) - - if a.Cardinality() != 1 { - t.Error("set should have a size of 1") - } - - a.Remove(1) - - if a.Cardinality() != 0 { - t.Error("set should be an empty set") - } - - a.Add(9) - - if a.Cardinality() != 1 { - t.Error("set should have a size of 1") - } - - a.Clear() - - if a.Cardinality() != 0 { - t.Error("set should have a size of 1") - } -} - -func Test_CardinalityUnsafeSet(t *testing.T) { - a := NewThreadUnsafeSet() - - if a.Cardinality() != 0 { - t.Error("set should be an empty set") - } - - a.Add(1) - - if a.Cardinality() != 1 { - t.Error("set should have a size of 1") - } - - a.Remove(1) - - if a.Cardinality() != 0 { - t.Error("set should be an empty set") - } - - a.Add(9) - - if a.Cardinality() != 1 { - t.Error("set should have a size of 1") - } - - a.Clear() - - if a.Cardinality() != 0 { - t.Error("set should have a size of 1") - } -} - -func Test_SetIsSubset(t *testing.T) { - a := makeSet([]int{1, 2, 3, 5, 7}) - - b := NewSet() - b.Add(3) - b.Add(5) - b.Add(7) - - if !b.IsSubset(a) { - t.Error("set b should be a subset of set a") - } - - b.Add(72) - - if b.IsSubset(a) { - t.Error("set b should not be a subset of set a because it contains 72 which is not in the set of a") - } -} - -func Test_UnsafeSetIsSubset(t *testing.T) { - a := makeUnsafeSet([]int{1, 2, 3, 5, 7}) - - b := NewThreadUnsafeSet() - b.Add(3) - b.Add(5) - b.Add(7) - - if !b.IsSubset(a) { - t.Error("set b should be a subset of set a") - } - - b.Add(72) - - if b.IsSubset(a) { - t.Error("set b should not be a subset of set a because it contains 72 which is not in the set of a") - } -} - -func Test_SetIsSuperSet(t *testing.T) { - a := NewSet() - a.Add(9) - a.Add(5) - a.Add(2) - a.Add(1) - a.Add(11) - - b := NewSet() - b.Add(5) - b.Add(2) - b.Add(11) - - if !a.IsSuperset(b) { - t.Error("set a should be a superset of set b") - } - - b.Add(42) - - if a.IsSuperset(b) { - t.Error("set a should not be a superset of set b because set a has a 42") - } -} - -func Test_UnsafeSetIsSuperSet(t *testing.T) { - a := NewThreadUnsafeSet() - a.Add(9) - a.Add(5) - a.Add(2) - a.Add(1) - a.Add(11) - - b := NewThreadUnsafeSet() - b.Add(5) - b.Add(2) - b.Add(11) - - if !a.IsSuperset(b) { - t.Error("set a should be a superset of set b") - } - - b.Add(42) - - if a.IsSuperset(b) { - t.Error("set a should not be a superset of set b because set a has a 42") - } -} - -func Test_SetUnion(t *testing.T) { - a := NewSet() - - b := NewSet() - b.Add(1) - b.Add(2) - b.Add(3) - b.Add(4) - b.Add(5) - - c := a.Union(b) - - if c.Cardinality() != 5 { - t.Error("set c is unioned with an empty set and therefore should have 5 elements in it") - } - - d := NewSet() - d.Add(10) - d.Add(14) - d.Add(0) - - e := c.Union(d) - if e.Cardinality() != 8 { - t.Error("set e should should have 8 elements in it after being unioned with set c to d") - } - - f := NewSet() - f.Add(14) - f.Add(3) - - g := f.Union(e) - if g.Cardinality() != 8 { - t.Error("set g should still ahve 8 elements in it after being unioned with set f that has duplicates") - } -} - -func Test_UnsafeSetUnion(t *testing.T) { - a := NewThreadUnsafeSet() - - b := NewThreadUnsafeSet() - b.Add(1) - b.Add(2) - b.Add(3) - b.Add(4) - b.Add(5) - - c := a.Union(b) - - if c.Cardinality() != 5 { - t.Error("set c is unioned with an empty set and therefore should have 5 elements in it") - } - - d := NewThreadUnsafeSet() - d.Add(10) - d.Add(14) - d.Add(0) - - e := c.Union(d) - if e.Cardinality() != 8 { - t.Error("set e should should have 8 elements in it after being unioned with set c to d") - } - - f := NewThreadUnsafeSet() - f.Add(14) - f.Add(3) - - g := f.Union(e) - if g.Cardinality() != 8 { - t.Error("set g should still ahve 8 elements in it after being unioned with set f that has duplicates") - } -} - -func Test_SetIntersect(t *testing.T) { - a := NewSet() - a.Add(1) - a.Add(3) - a.Add(5) - - b := NewSet() - a.Add(2) - a.Add(4) - a.Add(6) - - c := a.Intersect(b) - - if c.Cardinality() != 0 { - t.Error("set c should be the empty set because there is no common items to intersect") - } - - a.Add(10) - b.Add(10) - - d := a.Intersect(b) - - if !(d.Cardinality() == 1 && d.Contains(10)) { - t.Error("set d should have a size of 1 and contain the item 10") - } -} - -func Test_UnsafeSetIntersect(t *testing.T) { - a := NewThreadUnsafeSet() - a.Add(1) - a.Add(3) - a.Add(5) - - b := NewThreadUnsafeSet() - a.Add(2) - a.Add(4) - a.Add(6) - - c := a.Intersect(b) - - if c.Cardinality() != 0 { - t.Error("set c should be the empty set because there is no common items to intersect") - } - - a.Add(10) - b.Add(10) - - d := a.Intersect(b) - - if !(d.Cardinality() == 1 && d.Contains(10)) { - t.Error("set d should have a size of 1 and contain the item 10") - } -} - -func Test_SetDifference(t *testing.T) { - a := NewSet() - a.Add(1) - a.Add(2) - a.Add(3) - - b := NewSet() - b.Add(1) - b.Add(3) - b.Add(4) - b.Add(5) - b.Add(6) - b.Add(99) - - c := a.Difference(b) - - if !(c.Cardinality() == 1 && c.Contains(2)) { - t.Error("the difference of set a to b is the set of 1 item: 2") - } -} - -func Test_UnsafeSetDifference(t *testing.T) { - a := NewThreadUnsafeSet() - a.Add(1) - a.Add(2) - a.Add(3) - - b := NewThreadUnsafeSet() - b.Add(1) - b.Add(3) - b.Add(4) - b.Add(5) - b.Add(6) - b.Add(99) - - c := a.Difference(b) - - if !(c.Cardinality() == 1 && c.Contains(2)) { - t.Error("the difference of set a to b is the set of 1 item: 2") - } -} - -func Test_SetSymmetricDifference(t *testing.T) { - a := NewSet() - a.Add(1) - a.Add(2) - a.Add(3) - a.Add(45) - - b := NewSet() - b.Add(1) - b.Add(3) - b.Add(4) - b.Add(5) - b.Add(6) - b.Add(99) - - c := a.SymmetricDifference(b) - - if !(c.Cardinality() == 6 && c.Contains(2) && c.Contains(45) && c.Contains(4) && c.Contains(5) && c.Contains(6) && c.Contains(99)) { - t.Error("the symmetric difference of set a to b is the set of 6 items: 2, 45, 4, 5, 6, 99") - } -} - -func Test_UnsafeSetSymmetricDifference(t *testing.T) { - a := NewThreadUnsafeSet() - a.Add(1) - a.Add(2) - a.Add(3) - a.Add(45) - - b := NewThreadUnsafeSet() - b.Add(1) - b.Add(3) - b.Add(4) - b.Add(5) - b.Add(6) - b.Add(99) - - c := a.SymmetricDifference(b) - - if !(c.Cardinality() == 6 && c.Contains(2) && c.Contains(45) && c.Contains(4) && c.Contains(5) && c.Contains(6) && c.Contains(99)) { - t.Error("the symmetric difference of set a to b is the set of 6 items: 2, 45, 4, 5, 6, 99") - } -} - -func Test_SetEqual(t *testing.T) { - a := NewSet() - b := NewSet() - - if !a.Equal(b) { - t.Error("Both a and b are empty sets, and should be equal") - } - - a.Add(10) - - if a.Equal(b) { - t.Error("a should not be equal to b because b is empty and a has item 1 in it") - } - - b.Add(10) - - if !a.Equal(b) { - t.Error("a is now equal again to b because both have the item 10 in them") - } - - b.Add(8) - b.Add(3) - b.Add(47) - - if a.Equal(b) { - t.Error("b has 3 more elements in it so therefore should not be equal to a") - } - - a.Add(8) - a.Add(3) - a.Add(47) - - if !a.Equal(b) { - t.Error("a and b should be equal with the same number of elements") - } -} - -func Test_UnsafeSetEqual(t *testing.T) { - a := NewThreadUnsafeSet() - b := NewThreadUnsafeSet() - - if !a.Equal(b) { - t.Error("Both a and b are empty sets, and should be equal") - } - - a.Add(10) - - if a.Equal(b) { - t.Error("a should not be equal to b because b is empty and a has item 1 in it") - } - - b.Add(10) - - if !a.Equal(b) { - t.Error("a is now equal again to b because both have the item 10 in them") - } - - b.Add(8) - b.Add(3) - b.Add(47) - - if a.Equal(b) { - t.Error("b has 3 more elements in it so therefore should not be equal to a") - } - - a.Add(8) - a.Add(3) - a.Add(47) - - if !a.Equal(b) { - t.Error("a and b should be equal with the same number of elements") - } -} - -func Test_SetClone(t *testing.T) { - a := NewSet() - a.Add(1) - a.Add(2) - - b := a.Clone() - - if !a.Equal(b) { - t.Error("Clones should be equal") - } - - a.Add(3) - if a.Equal(b) { - t.Error("a contains one more element, they should not be equal") - } - - c := a.Clone() - c.Remove(1) - - if a.Equal(c) { - t.Error("C contains one element less, they should not be equal") - } -} - -func Test_UnsafeSetClone(t *testing.T) { - a := NewThreadUnsafeSet() - a.Add(1) - a.Add(2) - - b := a.Clone() - - if !a.Equal(b) { - t.Error("Clones should be equal") - } - - a.Add(3) - if a.Equal(b) { - t.Error("a contains one more element, they should not be equal") - } - - c := a.Clone() - c.Remove(1) - - if a.Equal(c) { - t.Error("C contains one element less, they should not be equal") - } -} - -func Test_Iterator(t *testing.T) { - a := NewSet() - - a.Add("Z") - a.Add("Y") - a.Add("X") - a.Add("W") - - b := NewSet() - for val := range a.Iter() { - b.Add(val) - } - - if !a.Equal(b) { - t.Error("The sets are not equal after iterating through the first set") - } -} - -func Test_UnsafeIterator(t *testing.T) { - a := NewThreadUnsafeSet() - - a.Add("Z") - a.Add("Y") - a.Add("X") - a.Add("W") - - b := NewThreadUnsafeSet() - for val := range a.Iter() { - b.Add(val) - } - - if !a.Equal(b) { - t.Error("The sets are not equal after iterating through the first set") - } -} - -func Test_PowerSet(t *testing.T) { - a := NewThreadUnsafeSet() - - a.Add(1) - a.Add("delta") - a.Add("chi") - a.Add(4) - - b := a.PowerSet() - if b.Cardinality() != 16 { - t.Error("unexpected PowerSet cardinality") - } -} - -func Test_EmptySetProperties(t *testing.T) { - empty := NewSet() - - a := NewSet() - a.Add(1) - a.Add("foo") - a.Add("bar") - - b := NewSet() - b.Add("one") - b.Add("two") - b.Add(3) - b.Add(4) - - c := NewSet() - - if !empty.IsSubset(a) || !empty.IsSubset(b) { - t.Error("The empty set is supposed to be a subset of all sets") - } - - if !a.IsSuperset(empty) || !b.IsSuperset(empty) { - t.Error("All sets are supposed to be a superset of the empty set") - } - - if !empty.IsSubset(empty) || !empty.IsSuperset(empty) { - t.Error("The empty set is supposed to be a subset and a superset of itself") - } - - c = a.Union(empty) - if !c.Equal(a) { - t.Error("The union of any set with the empty set is supposed to be equal to itself") - } - - c = a.Intersect(empty) - if !c.Equal(empty) { - t.Error("The intesection of any set with the empty set is supposed to be the empty set") - } - - c = a.CartesianProduct(empty) - if c.Cardinality() != 0 { - t.Error("Cartesian product of any set and the empty set must be the empty set") - } - - if empty.Cardinality() != 0 { - t.Error("Cardinality of the empty set is supposed to be zero") - } - - c = empty.PowerSet() - if c.Cardinality() != 1 { - t.Error("Cardinality of the power set of the empty set is supposed to be one { {} }") - } -} - -func Test_CartesianProduct(t *testing.T) { - a := NewThreadUnsafeSet() - b := NewThreadUnsafeSet() - empty := NewThreadUnsafeSet() - - a.Add(1) - a.Add(2) - a.Add(3) - - b.Add("one") - b.Add("two") - b.Add("three") - b.Add("alpha") - b.Add("gamma") - - c := a.CartesianProduct(b) - d := b.CartesianProduct(a) - - if c.Cardinality() != d.Cardinality() { - t.Error("Cardinality of AxB must be equal to BxA") - } - - if c.Cardinality() != (a.Cardinality() * b.Cardinality()) { - t.Error("Unexpected cardinality for cartesian product set") - } - - c = a.CartesianProduct(empty) - d = empty.CartesianProduct(b) - - if c.Cardinality() != 0 || d.Cardinality() != 0 { - t.Error("Cartesian product of any set and the emtpy set Ax0 || 0xA must be the empty set") - } -} - -func Test_ToSliceUnthreadsafe(t *testing.T) { - s := makeUnsafeSet([]int{1, 2, 3}) - setAsSlice := s.ToSlice() - if len(setAsSlice) != s.Cardinality() { - t.Errorf("Set length is incorrect: %v", len(setAsSlice)) - } - - for _, i := range setAsSlice { - if !s.Contains(i) { - t.Errorf("Set is missing element: %v", i) - } - } -} - -func Test_Example(t *testing.T) { - /* - requiredClasses := NewSet() - requiredClasses.Add("Cooking") - requiredClasses.Add("English") - requiredClasses.Add("Math") - requiredClasses.Add("Biology") - - scienceSlice := []interface{}{"Biology", "Chemistry"} - scienceClasses := NewSetFromSlice(scienceSlice) - - electiveClasses := NewSet() - electiveClasses.Add("Welding") - electiveClasses.Add("Music") - electiveClasses.Add("Automotive") - - bonusClasses := NewSet() - bonusClasses.Add("Go Programming") - bonusClasses.Add("Python Programming") - - //Show me all the available classes I can take - allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) - fmt.Println(allClasses) //Set{English, Chemistry, Automotive, Cooking, Math, Biology, Welding, Music, Go Programming} - - //Is cooking considered a science class? - fmt.Println(scienceClasses.Contains("Cooking")) //false - - //Show me all classes that are not science classes, since I hate science. - fmt.Println(allClasses.Difference(scienceClasses)) //Set{English, Automotive, Cooking, Math, Welding, Music, Go Programming} - - //Which science classes are also required classes? - fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} - - //How many bonus classes do you offer? - fmt.Println(bonusClasses.Cardinality()) //2 - - //Do you have the following classes? Welding, Automotive and English? - fmt.Println(allClasses.ContainsAll("Welding", "Automotive", "English")) - */ -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadsafe_test.go b/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadsafe_test.go deleted file mode 100644 index d207660c86..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadsafe_test.go +++ /dev/null @@ -1,376 +0,0 @@ -/* -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -package mapset - -import ( - "math/rand" - "runtime" - "sync" - "testing" -) - -const N = 1000 - -func Test_AddConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - ints := rand.Perm(N) - - var wg sync.WaitGroup - wg.Add(len(ints)) - for i := 0; i < len(ints); i++ { - go func(i int) { - s.Add(i) - wg.Done() - }(i) - } - - wg.Wait() - for _, i := range ints { - if !s.Contains(i) { - t.Errorf("Set is missing element: %v", i) - } - } -} - -func Test_CardinalityConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - - var wg sync.WaitGroup - wg.Add(1) - go func() { - elems := s.Cardinality() - for i := 0; i < N; i++ { - newElems := s.Cardinality() - if newElems < elems { - t.Errorf("Cardinality shrunk from %v to %v", elems, newElems) - } - } - wg.Done() - }() - - for i := 0; i < N; i++ { - s.Add(rand.Int()) - } - wg.Wait() -} - -func Test_ClearConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - ints := rand.Perm(N) - - var wg sync.WaitGroup - wg.Add(len(ints)) - for i := 0; i < len(ints); i++ { - go func() { - s.Clear() - wg.Done() - }() - go func(i int) { - s.Add(i) - }(i) - } - - wg.Wait() -} - -func Test_CloneConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - ints := rand.Perm(N) - - for _, v := range ints { - s.Add(v) - } - - var wg sync.WaitGroup - wg.Add(len(ints)) - for i := range ints { - go func(i int) { - s.Remove(i) - wg.Done() - }(i) - } - - s.Clone() -} - -func Test_ContainsConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - ints := rand.Perm(N) - interfaces := make([]interface{}, 0) - for _, v := range ints { - s.Add(v) - interfaces = append(interfaces, v) - } - - var wg sync.WaitGroup - for _ = range ints { - go func() { - s.Contains(interfaces...) - }() - } - wg.Wait() -} - -func Test_DifferenceConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s, ss := NewSet(), NewSet() - ints := rand.Perm(N) - interfaces := make([]interface{}, 0) - for _, v := range ints { - s.Add(v) - ss.Add(v) - interfaces = append(interfaces, v) - } - - var wg sync.WaitGroup - for _ = range ints { - go func() { - s.Difference(ss) - }() - } - wg.Wait() -} - -func Test_EqualConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s, ss := NewSet(), NewSet() - ints := rand.Perm(N) - interfaces := make([]interface{}, 0) - for _, v := range ints { - s.Add(v) - ss.Add(v) - interfaces = append(interfaces, v) - } - - var wg sync.WaitGroup - for _ = range ints { - go func() { - s.Equal(ss) - }() - } - wg.Wait() -} - -func Test_IntersectConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s, ss := NewSet(), NewSet() - ints := rand.Perm(N) - interfaces := make([]interface{}, 0) - for _, v := range ints { - s.Add(v) - ss.Add(v) - interfaces = append(interfaces, v) - } - - var wg sync.WaitGroup - for _ = range ints { - go func() { - s.Intersect(ss) - }() - } - wg.Wait() -} - -func Test_IsSubsetConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s, ss := NewSet(), NewSet() - ints := rand.Perm(N) - interfaces := make([]interface{}, 0) - for _, v := range ints { - s.Add(v) - ss.Add(v) - interfaces = append(interfaces, v) - } - - var wg sync.WaitGroup - for _ = range ints { - go func() { - s.IsSubset(ss) - }() - } - wg.Wait() -} - -func Test_IsSupersetConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s, ss := NewSet(), NewSet() - ints := rand.Perm(N) - interfaces := make([]interface{}, 0) - for _, v := range ints { - s.Add(v) - ss.Add(v) - interfaces = append(interfaces, v) - } - - var wg sync.WaitGroup - for _ = range ints { - go func() { - s.IsSuperset(ss) - }() - } - wg.Wait() -} - -func Test_IterConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - ints := rand.Perm(N) - for _, v := range ints { - s.Add(v) - } - - cs := make([]<-chan interface{}, 0) - for _ = range ints { - cs = append(cs, s.Iter()) - } - - c := make(chan interface{}) - go func() { - for n := 0; n < len(ints)*N; { - for _, d := range cs { - select { - case <-d: - n++ - c <- nil - default: - } - } - } - close(c) - }() - - for _ = range c { - } -} - -func Test_RemoveConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - ints := rand.Perm(N) - for _, v := range ints { - s.Add(v) - } - - var wg sync.WaitGroup - wg.Add(len(ints)) - for _, v := range ints { - go func(i int) { - s.Remove(i) - wg.Done() - }(v) - } - wg.Wait() - - if s.Cardinality() != 0 { - t.Errorf("Expected cardinality 0; got %v", s.Cardinality()) - } -} - -func Test_StringConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - ints := rand.Perm(N) - for _, v := range ints { - s.Add(v) - } - - var wg sync.WaitGroup - wg.Add(len(ints)) - for _ = range ints { - go func() { - s.String() - wg.Done() - }() - } - wg.Wait() -} - -func Test_SymmetricDifferenceConcurrent(t *testing.T) { - runtime.GOMAXPROCS(2) - - s, ss := NewSet(), NewSet() - ints := rand.Perm(N) - interfaces := make([]interface{}, 0) - for _, v := range ints { - s.Add(v) - ss.Add(v) - interfaces = append(interfaces, v) - } - - var wg sync.WaitGroup - for _ = range ints { - go func() { - s.SymmetricDifference(ss) - }() - } - wg.Wait() -} - -func Test_ToSlice(t *testing.T) { - runtime.GOMAXPROCS(2) - - s := NewSet() - ints := rand.Perm(N) - - var wg sync.WaitGroup - wg.Add(len(ints)) - for i := 0; i < len(ints); i++ { - go func(i int) { - s.Add(i) - wg.Done() - }(i) - } - - wg.Wait() - setAsSlice := s.ToSlice() - if len(setAsSlice) != s.Cardinality() { - t.Errorf("Set length is incorrect: %v", len(setAsSlice)) - } - - for _, i := range setAsSlice { - if !s.Contains(i) { - t.Errorf("Set is missing element: %v", i) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/api/types/blkiodev/blkio.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index 458a9c96bb..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev - -import "fmt" - -// WeightDevice is a structure that hold device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that hold device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go index 05aadbe74b..0b09981778 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go @@ -36,7 +36,7 @@ func (opts *ListOpts) String() string { return fmt.Sprintf("%v", []string((*opts.values))) } -// Set validates if needed the input value and add it to the +// Set validates if needed the input value and adds it to the // internal slice. func (opts *ListOpts) Set(value string) error { if opts.validator != nil { @@ -240,3 +240,35 @@ func ValidateLabel(val string) (string, error) { } return val, nil } + +// ValidateSysctl validates an sysctl and returns it. +func ValidateSysctl(val string) (string, error) { + validSysctlMap := map[string]bool{ + "kernel.msgmax": true, + "kernel.msgmnb": true, + "kernel.msgmni": true, + "kernel.sem": true, + "kernel.shmall": true, + "kernel.shmmax": true, + "kernel.shmmni": true, + "kernel.shm_rmid_forced": true, + } + validSysctlPrefixes := []string{ + "net.", + "fs.mqueue.", + } + arr := strings.Split(val, "=") + if len(arr) < 2 { + return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) + } + if validSysctlMap[arr[0]] { + return val, nil + } + + for _, vp := range validSysctlPrefixes { + if strings.HasPrefix(arr[0], vp) { + return val, nil + } + } + return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go index 2a9e2be744..ebe40c969c 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts_windows.go @@ -1,10 +1,10 @@ package opts -// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. // @jhowardmsft, @swernli. // // On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP4. +// a docker client against a local docker daemon on TP5. // // What was found that if the default host is "localhost", even if the client // (and daemon as this is local) is not physically on a network, and the DNS @@ -35,7 +35,7 @@ package opts // time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" // // We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, // the Windows networking stack is supposed to resolve "localhost" internally, // without hitting DNS, or even reading the hosts file (which is why localhost // is commented out in the hosts file on Windows). @@ -44,12 +44,12 @@ package opts // address does not cause the delay. // // This does not occur with the docker client built with 1.4.3 on the same -// Windows TP4 build, regardless of whether the daemon is built using 1.5.1 +// Windows build, regardless of whether the daemon is built using 1.5.1 // or 1.4.3. It does not occur on Linux. We also verified we see the same thing // on a cross-compiled Windows binary (from Linux). // // Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' // explicitly. // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/buffer.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 0000000000..3d737b3e19 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go index e263c284f0..59bba96265 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -9,12 +9,19 @@ import ( // maxCap is the highest capacity to use in byte slices that buffer data. const maxCap = 1e6 +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + // blockThreshold is the minimum number of bytes in the buffer which will cause // a write to BytesPipe to block when allocating a new slice. const blockThreshold = 1e6 -// ErrClosed is returned when Write is called on a closed BytesPipe. -var ErrClosed = errors.New("write to closed BytesPipe") +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) +) // BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). // All written data may be read at most once. Also, BytesPipe allocates @@ -23,22 +30,17 @@ var ErrClosed = errors.New("write to closed BytesPipe") type BytesPipe struct { mu sync.Mutex wait *sync.Cond - buf [][]byte // slice of byte-slices of buffered data - lastRead int // index in the first slice to a read point - bufLen int // length of data buffered over the slices - closeErr error // error to return from next Read. set to nil if not closed. + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. } // NewBytesPipe creates new BytesPipe, initialized by specified slice. // If buf is nil, then it will be initialized with slice which cap is 64. // buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe(buf []byte) *BytesPipe { - if cap(buf) == 0 { - buf = make([]byte, 0, 64) - } - bp := &BytesPipe{ - buf: [][]byte{buf[:0]}, - } +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) bp.wait = sync.NewCond(&bp.mu) return bp } @@ -47,22 +49,31 @@ func NewBytesPipe(buf []byte) *BytesPipe { // It can allocate new []byte slices in a process of writing. func (bp *BytesPipe) Write(p []byte) (int, error) { bp.mu.Lock() - defer bp.mu.Unlock() + written := 0 +loop0: for { if bp.closeErr != nil { + bp.mu.Unlock() return written, ErrClosed } - // write data to the last buffer - b := bp.buf[len(bp.buf)-1] - // copy data to the current empty allocated area - n := copy(b[len(b):cap(b)], p) - // increment buffered data length - bp.bufLen += n - // include written data in last buffer - bp.buf[len(bp.buf)-1] = b[:len(b)+n] + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } // if there was enough room to write all then break if len(p) == n { @@ -72,20 +83,23 @@ func (bp *BytesPipe) Write(p []byte) (int, error) { // more data: write to the next slice p = p[n:] - // block if too much data is still in the buffer + // make sure the buffer doesn't grow too big from this write for bp.bufLen >= blockThreshold { bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } } - // allocate slice that has twice the size of the last unless maximum reached - nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 if nextCap > maxCap { nextCap = maxCap } - // add new byte slice to the buffers slice and continue writing - bp.buf = append(bp.buf, make([]byte, 0, nextCap)) + bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() + bp.mu.Unlock() return written, nil } @@ -107,46 +121,60 @@ func (bp *BytesPipe) Close() error { return bp.CloseWithError(nil) } -func (bp *BytesPipe) len() int { - return bp.bufLen - bp.lastRead -} - // Read reads bytes from BytesPipe. // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() - defer bp.mu.Unlock() - if bp.len() == 0 { + if bp.bufLen == 0 { if bp.closeErr != nil { + bp.mu.Unlock() return 0, bp.closeErr } bp.wait.Wait() - if bp.len() == 0 && bp.closeErr != nil { + if bp.bufLen == 0 && bp.closeErr != nil { + bp.mu.Unlock() return 0, bp.closeErr } } - for { - read := copy(p, bp.buf[0][bp.lastRead:]) + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error n += read - bp.lastRead += read - if bp.len() == 0 { - // we have read everything. reset to the beginning. - bp.lastRead = 0 - bp.bufLen -= len(bp.buf[0]) - bp.buf[0] = bp.buf[0][:0] - break + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] } - // break if everything was read + if len(p) == read { break } - // more buffered data and more asked. read from next slice. + p = p[read:] - bp.lastRead = 0 - bp.bufLen -= len(bp.buf[0]) - bp.buf[0] = nil // throw away old slice - bp.buf = bp.buf[1:] // switch to next } + bp.wait.Broadcast() + bp.mu.Unlock() return } + +func returnBuffer(b *fixedBuffer) { + b.Reset() + pool := bufPools[b.Cap()] + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + return pool.Get().(*fixedBuffer) +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go deleted file mode 100644 index 3c88f29e35..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !gccgo - -package ioutils - -func callSchedulerIfNecessary() { -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go deleted file mode 100644 index c11d02b947..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build gccgo - -package ioutils - -import ( - "runtime" -) - -func callSchedulerIfNecessary() { - //allow or force Go scheduler to switch context, without explicitly - //forcing this will make it hang when using gccgo implementation - runtime.Gosched() -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/README.md b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/README.md deleted file mode 100644 index 2273509817..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# listenbuffer - -listenbuffer uses the kernel's listening backlog functionality to queue -connections, allowing applications to start listening immediately and handle -connections later. This is signaled by closing the activation channel passed to -the constructor. - -The maximum amount of queued connections depends on the configuration of your -kernel (typically called SOMAXXCON) and cannot be configured in Go with the -net package. See `src/net/sock_platform.go` in the Go tree or consult your -kernel's manual. - - activator := make(chan struct{}) - buffer, err := NewListenBuffer("tcp", "localhost:4000", activator) - if err != nil { - panic(err) - } - - // will block until activator has been closed or is sent an event - client, err := buffer.Accept() - -Somewhere else in your application once it's been booted: - - close(activator) - -`buffer.Accept()` will return the first client in the kernel listening queue, or -continue to block until a client connects or an error occurs. diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/buffer.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/buffer.go deleted file mode 100644 index aa47471c40..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/buffer.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Package listenbuffer uses the kernel's listening backlog functionality to queue -connections, allowing applications to start listening immediately and handle -connections later. This is signaled by closing the activation channel passed to -the constructor. - -The maximum amount of queued connections depends on the configuration of your -kernel (typically called SOMAXXCON) and cannot be configured in Go with the -net package. See `src/net/sock_platform.go` in the Go tree or consult your -kernel's manual. - - activator := make(chan struct{}) - buffer, err := NewListenBuffer("tcp", "localhost:4000", activator) - if err != nil { - panic(err) - } - - // will block until activator has been closed or is sent an event - client, err := buffer.Accept() - -Somewhere else in your application once it's been booted: - - close(activator) - -`buffer.Accept()` will return the first client in the kernel listening queue, or -continue to block until a client connects or an error occurs. -*/ -package listenbuffer - -import "net" - -// NewListenBuffer returns a net.Listener listening on addr with the protocol -// passed. The channel passed is used to activate the listenbuffer when the -// caller is ready to accept connections. -func NewListenBuffer(proto, addr string, activate <-chan struct{}) (net.Listener, error) { - wrapped, err := net.Listen(proto, addr) - if err != nil { - return nil, err - } - - return &defaultListener{ - wrapped: wrapped, - activate: activate, - }, nil -} - -// defaultListener is the buffered wrapper around the net.Listener -type defaultListener struct { - wrapped net.Listener // The net.Listener wrapped by listenbuffer - ready bool // Whether the listenbuffer has been activated - activate <-chan struct{} // Channel to control activation of the listenbuffer -} - -// Close closes the wrapped socket. -func (l *defaultListener) Close() error { - return l.wrapped.Close() -} - -// Addr returns the listening address of the wrapped socket. -func (l *defaultListener) Addr() net.Addr { - return l.wrapped.Addr() -} - -// Accept returns a client connection on the wrapped socket if the listen buffer -// has been activated. To active the listenbuffer the activation channel passed -// to NewListenBuffer must have been closed or sent an event. -func (l *defaultListener) Accept() (net.Conn, error) { - // if the listen has been told it is ready then we can go ahead and - // start returning connections - if l.ready { - return l.wrapped.Accept() - } - <-l.activate - l.ready = true - return l.Accept() -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/listen_buffer_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/listen_buffer_test.go deleted file mode 100644 index 6ffd2f7984..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/listenbuffer/listen_buffer_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package listenbuffer - -import ( - "io/ioutil" - "net" - "testing" -) - -func TestListenBufferAllowsAcceptingWhenActivated(t *testing.T) { - lock := make(chan struct{}) - buffer, err := NewListenBuffer("tcp", "", lock) - if err != nil { - t.Fatal("Unable to create listen buffer: ", err) - } - - go func() { - conn, err := net.Dial("tcp", buffer.Addr().String()) - if err != nil { - t.Fatal("Client failed to establish connection to server: ", err) - } - - conn.Write([]byte("ping")) - conn.Close() - }() - - close(lock) - - client, err := buffer.Accept() - if err != nil { - t.Fatal("Failed to accept client: ", err) - } - - response, err := ioutil.ReadAll(client) - if err != nil { - t.Fatal("Failed to read from client: ", err) - } - - if string(response) != "ping" { - t.Fatal("Expected to receive ping from client, received: ", string(response)) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go index 7d12fcbd9d..bb9b32641e 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -5,7 +5,7 @@ import ( ) // Utsname represents the system name structure. -// It is passthgrouh for syscall.Utsname in order to make it portable with +// It is passthrough for syscall.Utsname in order to make it portable with // other platforms where it is not available. type Utsname syscall.Utsname diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/discovery.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/discovery.go index 3f79661782..9dc64194f2 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/discovery.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/discovery.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "strings" + "sync" ) var ( @@ -118,6 +119,7 @@ func readPluginJSONInfo(name, path string) (*Plugin, error) { if len(p.TLSConfig.CAFile) == 0 { p.TLSConfig.InsecureSkipVerify = true } + p.activateWait = sync.NewCond(&sync.Mutex{}) return &p, nil } diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/plugins.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/plugins.go index 4f270a4040..b83b5ae61d 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/plugins.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/plugins.go @@ -67,7 +67,7 @@ type Plugin struct { // error produced by activation activateErr error - // specifies if the activation sequence is completed (not if it is sucessful or not) + // specifies if the activation sequence is completed (not if it is successful or not) activated bool // wait for activation to finish activateWait *sync.Cond diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go index ceb98d25fc..c56671d919 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/reexec/reexec.go @@ -12,7 +12,7 @@ var registeredInitializers = make(map[string]func()) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registred under name %q", name)) + panic(fmt.Sprintf("reexec func already registered under name %q", name)) } registeredInitializers[name] = initializer diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/sockets/README.md b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/sockets/README.md deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/sockets/tcp_socket.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/sockets/tcp_socket.go deleted file mode 100644 index 6665a3bde1..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/sockets/tcp_socket.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP -// sockets. -package sockets - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -// NewTCPSocket creates a TCP socket listener with the specified address and -// and the specified tls configuration. If TLSConfig is set, will encapsulate the -// TCP listener inside a TLS one. -func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - if tlsConfig != nil { - tlsConfig.NextProtos = []string{"http/1.1"} - l = tls.NewListener(l, tlsConfig) - } - return l, nil -} - -// ConfigureTCPTransport configures the specified Transport according to the -// specified proto and addr. -// If the proto is unix (using a unix socket to communicate) the compression -// is disabled. -func ConfigureTCPTransport(tr *http.Transport, proto, addr string) { - // Why 32? See https://github.com/docker/docker/pull/8035. - timeout := 32 * time.Second - if proto == "unix" { - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, timeout) - } - } else { - tr.Proxy = http.ProxyFromEnvironment - tr.Dial = (&net.Dialer{Timeout: timeout}).Dial - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/sockets/unix_socket.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/sockets/unix_socket.go deleted file mode 100644 index c10acedca2..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/sockets/unix_socket.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build linux freebsd - -package sockets - -import ( - "fmt" - "net" - "os" - "strconv" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/user" -) - -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path, group string) (net.Listener, error) { - if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - if err := setSocketGroup(path, group); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err - } - return l, nil -} - -func setSocketGroup(path, group string) error { - if group == "" { - return nil - } - if err := changeGroup(path, group); err != nil { - if group != "docker" { - return err - } - logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) - } - return nil -} - -func changeGroup(path string, nameOrGid string) error { - gid, err := lookupGidByName(nameOrGid) - if err != nil { - return err - } - logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) - return os.Chown(path, 0, gid) -} - -func lookupGidByName(nameOrGid string) (int, error) { - groupFile, err := user.GetGroupPath() - if err != nil { - return -1, err - } - groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { - return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid - }) - if err != nil { - return -1, err - } - if groups != nil && len(groups) > 0 { - return groups[0].Gid, nil - } - gid, err := strconv.Atoi(nameOrGid) - if err == nil { - logrus.Warnf("Could not find GID %d", gid) - return gid, nil - } - return -1, fmt.Errorf("Group %s not found", nameOrGid) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go index 02d2594e1a..161184ff8a 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/stringid/stringid.go @@ -24,7 +24,7 @@ func IsShortID(id string) bool { // TruncateID returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a langer prefix, or the full-length Id. +// will need to use a longer prefix, or the full-length Id. func TruncateID(id string) string { if i := strings.IndexRune(id, ':'); i >= 0 { id = id[i+1:] @@ -57,7 +57,7 @@ func generateID(crypto bool) string { } } -// GenerateRandomID returns an unique id. +// GenerateRandomID returns a unique id. func GenerateRandomID() string { return generateID(true) diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go index 061e220f79..ef596f343f 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/system/syscall_windows.go @@ -1,11 +1,14 @@ package system import ( - "fmt" "syscall" "unsafe" ) +var ( + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") +) + // OSVersion is a wrapper for Windows version information // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx type OSVersion struct { @@ -17,17 +20,18 @@ type OSVersion struct { // GetOSVersion gets the operating system version on Windows. Note that // docker.exe must be manifested to get the correct version information. -func GetOSVersion() (OSVersion, error) { +func GetOSVersion() OSVersion { var err error osv := OSVersion{} osv.Version, err = syscall.GetVersion() if err != nil { - return osv, fmt.Errorf("Failed to call GetVersion()") + // GetVersion never fails. + panic(err) } osv.MajorVersion = uint8(osv.Version & 0xFF) osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) osv.Build = uint16(osv.Version >> 16) - return osv, nil + return osv } // Unmount is a platform-specific helper function to call @@ -58,3 +62,12 @@ func CommandLineToArgv(commandLine string) ([]string, error) { return newArgs, nil } + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go index c670fcd758..3d0146b01a 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go @@ -7,7 +7,7 @@ import ( ) // Umask sets current process's file mode creation mask to newmask -// and return oldmask. +// and returns oldmask. func Umask(newmask int) (oldmask int, err error) { return syscall.Umask(newmask), nil } diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go index a22cd9d105..59dac5ba8e 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go @@ -11,7 +11,7 @@ import ( import "C" // Termios is the Unix API for terminal I/O. -// It is passthgrouh for syscall.Termios in order to make it portable with +// It is passthrough for syscall.Termios in order to make it portable with // other platforms where it is not available or handled differently. type Termios syscall.Termios diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go index 266039bac3..750d7c3f60 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go @@ -1,5 +1,6 @@ // +build !windows // +build !linux !cgo +// +build !solaris !cgo package term diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_solaris_cgo.go new file mode 100644 index 0000000000..c9139d0ca8 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_solaris_cgo.go @@ -0,0 +1,63 @@ +// +build solaris,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + /* + VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned + Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It + needs to be explicitly set to 1. + */ + newState.Cc[C.VMIN] = 1 + newState.Cc[C.VTIME] = 0 + + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go index 11ed20937b..8f554847f0 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go @@ -10,7 +10,6 @@ import ( "os" "os/signal" "syscall" - "unsafe" ) var ( @@ -47,27 +46,6 @@ func GetFdInfo(in interface{}) (uintptr, bool) { return inFd, isTerminalIn } -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skip errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skip errno = 0 - if err == 0 { - return nil - } - return err -} - // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_solaris.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_solaris.go new file mode 100644 index 0000000000..112debbec5 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_solaris.go @@ -0,0 +1,41 @@ +// +build solaris + +package term + +import ( + "syscall" + "unsafe" +) + +/* +#include +#include +#include + +// Small wrapper to get rid of variadic args of ioctl() +int my_ioctl(int fd, int cmd, struct winsize *ws) { + return ioctl(fd, cmd, ws); +} +*/ +import "C" + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return nil + } + return err +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_unix.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_unix.go new file mode 100644 index 0000000000..ddf87a0e58 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_unix.go @@ -0,0 +1,29 @@ +// +build !solaris,!windows + +package term + +import ( + "syscall" + "unsafe" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go index a02e681f44..cd21b5fc2b 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go +++ b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go @@ -23,8 +23,6 @@ type State struct { type Winsize struct { Height uint16 Width uint16 - x uint16 - y uint16 } const ( @@ -59,21 +57,13 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // console which supports ANSI emulation, or fall-back to the golang emulator // (github.com/azure/go-ansiterm). func useNativeConsole() bool { - osv, err := system.GetOSVersion() - if err != nil { - return false - } + osv := system.GetOSVersion() // Native console is not available before major version 10 if osv.MajorVersion < 10 { return false } - // Must have a late pre-release TP4 build of Windows Server 2016/Windows 10 TH2 or later - if osv.Build < 10578 { - return false - } - // Get the console modes. If this fails, we can't use the native console state, err := getNativeConsole() if err != nil { @@ -194,8 +184,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) { winsize := &Winsize{ Width: uint16(info.Window.Right - info.Window.Left + 1), Height: uint16(info.Window.Bottom - info.Window.Top + 1), - x: 0, - y: 0} + } return winsize, nil } diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go deleted file mode 100644 index 8fb0d804de..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package ulimit provides structure and helper function to parse and represent -// resource limits (Rlimit and Ulimit, its human friendly version). -package ulimit - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// Parse parses and returns a Ulimit from the specified string. -func Parse(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - limitVals := strings.SplitN(parts[1], ":", 2) - if len(limitVals) > 2 { - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - soft, err := strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - - hard := soft // in case no hard was set - if len(limitVals) == 2 { - hard, err = strconv.ParseInt(limitVals[1], 10, 64) - } - if soft > hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit_test.go deleted file mode 100644 index 1e8c881f51..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package ulimit - -import "testing" - -func TestParseValid(t *testing.T) { - u1 := &Ulimit{"nofile", 1024, 512} - if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 { - t.Fatalf("expected %q, but got %q", u1, u2) - } -} - -func TestParseInvalidLimitType(t *testing.T) { - if _, err := Parse("notarealtype=1024:1024"); err == nil { - t.Fatalf("expected error on invalid ulimit type") - } -} - -func TestParseBadFormat(t *testing.T) { - if _, err := Parse("nofile:1024:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := Parse("nofile"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := Parse("nofile="); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := Parse("nofile=:"); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := Parse("nofile=:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } -} - -func TestParseHardLessThanSoft(t *testing.T) { - if _, err := Parse("nofile:1024:1"); err == nil { - t.Fatal("expected error on hard limit less than soft limit") - } -} - -func TestParseInvalidValueType(t *testing.T) { - if _, err := Parse("nofile:asdf"); err == nil { - t.Fatal("expected error on bad value type") - } -} - -func TestStringOutput(t *testing.T) { - u := &Ulimit{"nofile", 1024, 512} - if s := u.String(); s != "nofile=512:1024" { - t.Fatal("expected String to return nofile=512:1024, but got", s) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go deleted file mode 100644 index c219a8a968..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go deleted file mode 100644 index fcfb6b7bbd..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package units - -import ( - "testing" - "time" -) - -func TestHumanDuration(t *testing.T) { - // Useful duration abstractions - day := 24 * time.Hour - week := 7 * day - month := 30 * day - year := 365 * day - - assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) - assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) - assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) - assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) - assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) - assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) - assertEquals(t, "2 days", HumanDuration(2*day)) - assertEquals(t, "7 days", HumanDuration(7*day)) - assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) - assertEquals(t, "2 weeks", HumanDuration(2*week)) - assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) - assertEquals(t, "3 weeks", HumanDuration(3*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) - assertEquals(t, "4 weeks", HumanDuration(1*month)) - assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) - assertEquals(t, "8 weeks", HumanDuration(2*month)) - assertEquals(t, "3 months", HumanDuration(3*month+1*week)) - assertEquals(t, "5 months", HumanDuration(5*month+2*week)) - assertEquals(t, "13 months", HumanDuration(13*month)) - assertEquals(t, "23 months", HumanDuration(23*month)) - assertEquals(t, "24 months", HumanDuration(24*month)) - assertEquals(t, "2 years", HumanDuration(24*month+2*week)) - assertEquals(t, "3 years", HumanDuration(3*year+2*month)) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go deleted file mode 100644 index 3b59daff31..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go +++ /dev/null @@ -1,95 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - i := 0 - for size >= base { - size = size / base - i++ - } - return fmt.Sprintf(format, size, _map[i]) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 3 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseInt(matches[1], 10, 0) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[2]) - if mul, ok := uMap[unitPrefix]; ok { - size *= mul - } - - return size, nil -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go deleted file mode 100644 index 67c3b81e6b..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "reflect" - "runtime" - "strings" - "testing" -) - -func TestBytesSize(t *testing.T) { - assertEquals(t, "1 KiB", BytesSize(1024)) - assertEquals(t, "1 MiB", BytesSize(1024*1024)) - assertEquals(t, "1 MiB", BytesSize(1048576)) - assertEquals(t, "2 MiB", BytesSize(2*MiB)) - assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) - assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) - assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) -} - -func TestHumanSize(t *testing.T) { - assertEquals(t, "1 kB", HumanSize(1000)) - assertEquals(t, "1.024 kB", HumanSize(1024)) - assertEquals(t, "1 MB", HumanSize(1000000)) - assertEquals(t, "1.049 MB", HumanSize(1048576)) - assertEquals(t, "2 MB", HumanSize(2*MB)) - assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) - assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) - assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) -} - -func TestFromHumanSize(t *testing.T) { - assertSuccessEquals(t, 32, FromHumanSize, "32") - assertSuccessEquals(t, 32, FromHumanSize, "32b") - assertSuccessEquals(t, 32, FromHumanSize, "32B") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") - assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") - assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") - assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") - assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") - - assertError(t, FromHumanSize, "") - assertError(t, FromHumanSize, "hello") - assertError(t, FromHumanSize, "-32") - assertError(t, FromHumanSize, "32.3") - assertError(t, FromHumanSize, " 32 ") - assertError(t, FromHumanSize, "32.3Kb") - assertError(t, FromHumanSize, "32 mb") - assertError(t, FromHumanSize, "32m b") - assertError(t, FromHumanSize, "32bm") -} - -func TestRAMInBytes(t *testing.T) { - assertSuccessEquals(t, 32, RAMInBytes, "32") - assertSuccessEquals(t, 32, RAMInBytes, "32b") - assertSuccessEquals(t, 32, RAMInBytes, "32B") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") - assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") - assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") - assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") - - assertError(t, RAMInBytes, "") - assertError(t, RAMInBytes, "hello") - assertError(t, RAMInBytes, "-32") - assertError(t, RAMInBytes, "32.3") - assertError(t, RAMInBytes, " 32 ") - assertError(t, RAMInBytes, "32.3Kb") - assertError(t, RAMInBytes, "32 mb") - assertError(t, RAMInBytes, "32m b") - assertError(t, RAMInBytes, "32bm") -} - -func assertEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - t.Errorf("Expected '%v' but got '%v'", expected, actual) - } -} - -// func that maps to the parse function signatures as testing abstraction -type parseFn func(string) (int64, error) - -// Define 'String()' for pretty-print -func (fn parseFn) String() string { - fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() - return fnName[strings.LastIndex(fnName, ".")+1:] -} - -func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { - res, err := fn(arg) - if err != nil || res != expected { - t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) - } -} - -func assertError(t *testing.T, fn parseFn, arg string) { - res, err := fn(arg) - if err == nil && res != -1 { - t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/duration_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/duration_test.go deleted file mode 100644 index 63baa515bf..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/duration_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package units - -import ( - "fmt" - "testing" - "time" -) - -func ExampleHumanDuration() { - fmt.Println(HumanDuration(450 * time.Millisecond)) - fmt.Println(HumanDuration(47 * time.Second)) - fmt.Println(HumanDuration(1 * time.Minute)) - fmt.Println(HumanDuration(3 * time.Minute)) - fmt.Println(HumanDuration(35 * time.Minute)) - fmt.Println(HumanDuration(35*time.Minute + 40*time.Second)) - fmt.Println(HumanDuration(1 * time.Hour)) - fmt.Println(HumanDuration(1*time.Hour + 45*time.Minute)) - fmt.Println(HumanDuration(3 * time.Hour)) - fmt.Println(HumanDuration(3*time.Hour + 59*time.Minute)) - fmt.Println(HumanDuration(3*time.Hour + 60*time.Minute)) - fmt.Println(HumanDuration(24 * time.Hour)) - fmt.Println(HumanDuration(24*time.Hour + 12*time.Hour)) - fmt.Println(HumanDuration(2 * 24 * time.Hour)) - fmt.Println(HumanDuration(7 * 24 * time.Hour)) - fmt.Println(HumanDuration(13*24*time.Hour + 5*time.Hour)) - fmt.Println(HumanDuration(2 * 7 * 24 * time.Hour)) - fmt.Println(HumanDuration(2*7*24*time.Hour + 4*24*time.Hour)) - fmt.Println(HumanDuration(3 * 7 * 24 * time.Hour)) - fmt.Println(HumanDuration(4 * 7 * 24 * time.Hour)) - fmt.Println(HumanDuration(4*7*24*time.Hour + 3*24*time.Hour)) - fmt.Println(HumanDuration(1 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(1*30*24*time.Hour + 2*7*24*time.Hour)) - fmt.Println(HumanDuration(2 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(3*30*24*time.Hour + 1*7*24*time.Hour)) - fmt.Println(HumanDuration(5*30*24*time.Hour + 2*7*24*time.Hour)) - fmt.Println(HumanDuration(13 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(23 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(24 * 30 * 24 * time.Hour)) - fmt.Println(HumanDuration(24*30*24*time.Hour + 2*7*24*time.Hour)) - fmt.Println(HumanDuration(3*365*24*time.Hour + 2*30*24*time.Hour)) -} - -func TestHumanDuration(t *testing.T) { - // Useful duration abstractions - day := 24 * time.Hour - week := 7 * day - month := 30 * day - year := 365 * day - - assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) - assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) - assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) - assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) - assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) - assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) - assertEquals(t, "2 days", HumanDuration(2*day)) - assertEquals(t, "7 days", HumanDuration(7*day)) - assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) - assertEquals(t, "2 weeks", HumanDuration(2*week)) - assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) - assertEquals(t, "3 weeks", HumanDuration(3*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) - assertEquals(t, "4 weeks", HumanDuration(1*month)) - assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) - assertEquals(t, "8 weeks", HumanDuration(2*month)) - assertEquals(t, "3 months", HumanDuration(3*month+1*week)) - assertEquals(t, "5 months", HumanDuration(5*month+2*week)) - assertEquals(t, "13 months", HumanDuration(13*month)) - assertEquals(t, "23 months", HumanDuration(23*month)) - assertEquals(t, "24 months", HumanDuration(24*month)) - assertEquals(t, "2 years", HumanDuration(24*month+2*week)) - assertEquals(t, "3 years", HumanDuration(3*year+2*month)) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/size_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/size_test.go deleted file mode 100644 index a968f5c0df..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/size_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package units - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "testing" -) - -func ExampleBytesSize() { - fmt.Println(BytesSize(1024)) - fmt.Println(BytesSize(1024 * 1024)) - fmt.Println(BytesSize(1048576)) - fmt.Println(BytesSize(2 * MiB)) - fmt.Println(BytesSize(3.42 * GiB)) - fmt.Println(BytesSize(5.372 * TiB)) - fmt.Println(BytesSize(2.22 * PiB)) -} - -func ExampleHumanSize() { - fmt.Println(HumanSize(1000)) - fmt.Println(HumanSize(1024)) - fmt.Println(HumanSize(1000000)) - fmt.Println(HumanSize(1048576)) - fmt.Println(HumanSize(2 * MB)) - fmt.Println(HumanSize(float64(3.42 * GB))) - fmt.Println(HumanSize(float64(5.372 * TB))) - fmt.Println(HumanSize(float64(2.22 * PB))) -} - -func ExampleFromHumanSize() { - fmt.Println(FromHumanSize("32")) - fmt.Println(FromHumanSize("32b")) - fmt.Println(FromHumanSize("32B")) - fmt.Println(FromHumanSize("32k")) - fmt.Println(FromHumanSize("32K")) - fmt.Println(FromHumanSize("32kb")) - fmt.Println(FromHumanSize("32Kb")) - fmt.Println(FromHumanSize("32Mb")) - fmt.Println(FromHumanSize("32Gb")) - fmt.Println(FromHumanSize("32Tb")) - fmt.Println(FromHumanSize("32Pb")) -} - -func ExampleRAMInBytes() { - fmt.Println(RAMInBytes("32")) - fmt.Println(RAMInBytes("32b")) - fmt.Println(RAMInBytes("32B")) - fmt.Println(RAMInBytes("32k")) - fmt.Println(RAMInBytes("32K")) - fmt.Println(RAMInBytes("32kb")) - fmt.Println(RAMInBytes("32Kb")) - fmt.Println(RAMInBytes("32Mb")) - fmt.Println(RAMInBytes("32Gb")) - fmt.Println(RAMInBytes("32Tb")) - fmt.Println(RAMInBytes("32Pb")) - fmt.Println(RAMInBytes("32PB")) - fmt.Println(RAMInBytes("32P")) -} - -func TestBytesSize(t *testing.T) { - assertEquals(t, "1 KiB", BytesSize(1024)) - assertEquals(t, "1 MiB", BytesSize(1024*1024)) - assertEquals(t, "1 MiB", BytesSize(1048576)) - assertEquals(t, "2 MiB", BytesSize(2*MiB)) - assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) - assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) - assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) -} - -func TestHumanSize(t *testing.T) { - assertEquals(t, "1 kB", HumanSize(1000)) - assertEquals(t, "1.024 kB", HumanSize(1024)) - assertEquals(t, "1 MB", HumanSize(1000000)) - assertEquals(t, "1.049 MB", HumanSize(1048576)) - assertEquals(t, "2 MB", HumanSize(2*MB)) - assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) - assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) - assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) -} - -func TestFromHumanSize(t *testing.T) { - assertSuccessEquals(t, 32, FromHumanSize, "32") - assertSuccessEquals(t, 32, FromHumanSize, "32b") - assertSuccessEquals(t, 32, FromHumanSize, "32B") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") - assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") - assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") - assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") - assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") - - assertError(t, FromHumanSize, "") - assertError(t, FromHumanSize, "hello") - assertError(t, FromHumanSize, "-32") - assertError(t, FromHumanSize, "32.3") - assertError(t, FromHumanSize, " 32 ") - assertError(t, FromHumanSize, "32.3Kb") - assertError(t, FromHumanSize, "32 mb") - assertError(t, FromHumanSize, "32m b") - assertError(t, FromHumanSize, "32bm") -} - -func TestRAMInBytes(t *testing.T) { - assertSuccessEquals(t, 32, RAMInBytes, "32") - assertSuccessEquals(t, 32, RAMInBytes, "32b") - assertSuccessEquals(t, 32, RAMInBytes, "32B") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") - assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") - assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") - assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") - - assertError(t, RAMInBytes, "") - assertError(t, RAMInBytes, "hello") - assertError(t, RAMInBytes, "-32") - assertError(t, RAMInBytes, "32.3") - assertError(t, RAMInBytes, " 32 ") - assertError(t, RAMInBytes, "32.3Kb") - assertError(t, RAMInBytes, "32 mb") - assertError(t, RAMInBytes, "32m b") - assertError(t, RAMInBytes, "32bm") -} - -func assertEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - t.Errorf("Expected '%v' but got '%v'", expected, actual) - } -} - -// func that maps to the parse function signatures as testing abstraction -type parseFn func(string) (int64, error) - -// Define 'String()' for pretty-print -func (fn parseFn) String() string { - fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() - return fnName[strings.LastIndex(fnName, ".")+1:] -} - -func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { - res, err := fn(arg) - if err != nil || res != expected { - t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) - } -} - -func assertError(t *testing.T, fn parseFn, arg string) { - res, err := fn(arg) - if err == nil && res != -1 { - t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/ulimit_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/ulimit_test.go deleted file mode 100644 index 09e258eed2..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/go-units/ulimit_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package units - -import ( - "fmt" - "testing" -) - -func ExampleParseUlimit() { - fmt.Println(ParseUlimit("nofile=512:1024")) - fmt.Println(ParseUlimit("nofile=1024")) - fmt.Println(ParseUlimit("cpu=2:4")) - fmt.Println(ParseUlimit("cpu=6")) -} - -func TestParseUlimitValid(t *testing.T) { - u1 := &Ulimit{"nofile", 1024, 512} - if u2, _ := ParseUlimit("nofile=512:1024"); *u1 != *u2 { - t.Fatalf("expected %q, but got %q", u1, u2) - } -} - -func TestParseUlimitInvalidLimitType(t *testing.T) { - if _, err := ParseUlimit("notarealtype=1024:1024"); err == nil { - t.Fatalf("expected error on invalid ulimit type") - } -} - -func TestParseUlimitBadFormat(t *testing.T) { - if _, err := ParseUlimit("nofile:1024:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := ParseUlimit("nofile"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := ParseUlimit("nofile="); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := ParseUlimit("nofile=:"); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := ParseUlimit("nofile=:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } -} - -func TestParseUlimitHardLessThanSoft(t *testing.T) { - if _, err := ParseUlimit("nofile:1024:1"); err == nil { - t.Fatal("expected error on hard limit less than soft limit") - } -} - -func TestParseUlimitInvalidValueType(t *testing.T) { - if _, err := ParseUlimit("nofile:asdf"); err == nil { - t.Fatal("expected error on bad value type") - } -} - -func TestUlimitStringOutput(t *testing.T) { - u := &Ulimit{"nofile", 1024, 512} - if s := u.String(); s != "nofile=512:1024" { - t.Fatal("expected String to return nofile=512:1024, but got", s) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md deleted file mode 100644 index 3c9feeda83..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -page_title: Docker Swarm discovery -page_description: Swarm discovery -page_keywords: docker, swarm, clustering, discovery ---- - -# Discovery - -Docker Swarm comes with multiple Discovery backends. - -## Backends - -### Hosted Discovery with Docker Hub - -First we create a cluster. - -```bash -# create a cluster -$ swarm create -6856663cdefdec325839a4b7e1de38e8 # <- this is your unique -``` - -Then we create each node and join them to the cluster. - -```bash -# on each of your nodes, start the swarm agent -# doesn't have to be public (eg. 192.168.0.X), -# as long as the swarm manager can access it. -$ swarm join --advertise= token:// -``` - -Finally, we start the Swarm manager. This can be on any machine or even -your laptop. - -```bash -$ swarm manage -H tcp:// token:// -``` - -You can then use regular Docker commands to interact with your swarm. - -```bash -docker -H tcp:// info -docker -H tcp:// run ... -docker -H tcp:// ps -docker -H tcp:// logs ... -... -``` - -You can also list the nodes in your cluster. - -```bash -swarm list token:// - -``` - -### Using a static file describing the cluster - -For each of your nodes, add a line to a file. The node IP address -doesn't need to be public as long the Swarm manager can access it. - -```bash -echo >> /tmp/my_cluster -echo >> /tmp/my_cluster -echo >> /tmp/my_cluster -``` - -Then start the Swarm manager on any machine. - -```bash -swarm manage -H tcp:// file:///tmp/my_cluster -``` - -And then use the regular Docker commands. - -```bash -docker -H tcp:// info -docker -H tcp:// run ... -docker -H tcp:// ps -docker -H tcp:// logs ... -... -``` - -You can list the nodes in your cluster. - -```bash -$ swarm list file:///tmp/my_cluster - - - -``` - -### Using etcd - -On each of your nodes, start the Swarm agent. The node IP address -doesn't have to be public as long as the swarm manager can access it. - -```bash -swarm join --advertise= etcd:/// -``` - -Start the manager on any machine or your laptop. - -```bash -swarm manage -H tcp:// etcd:/// -``` - -And then use the regular Docker commands. - -```bash -docker -H tcp:// info -docker -H tcp:// run ... -docker -H tcp:// ps -docker -H tcp:// logs ... -... -``` - -You can list the nodes in your cluster. - -```bash -swarm list etcd:/// - -``` - -### Using consul - -On each of your nodes, start the Swarm agent. The node IP address -doesn't need to be public as long as the Swarm manager can access it. - -```bash -swarm join --advertise= consul:/// -``` - -Start the manager on any machine or your laptop. - -```bash -swarm manage -H tcp:// consul:/// -``` - -And then use the regular Docker commands. - -```bash -docker -H tcp:// info -docker -H tcp:// run ... -docker -H tcp:// ps -docker -H tcp:// logs ... -... -``` - -You can list the nodes in your cluster. - -```bash -swarm list consul:/// - -``` - -### Using zookeeper - -On each of your nodes, start the Swarm agent. The node IP doesn't have -to be public as long as the swarm manager can access it. - -```bash -swarm join --advertise= zk://,/ -``` - -Start the manager on any machine or your laptop. - -```bash -swarm manage -H tcp:// zk://,/ -``` - -You can then use the regular Docker commands. - -```bash -docker -H tcp:// info -docker -H tcp:// run ... -docker -H tcp:// ps -docker -H tcp:// logs ... -... -``` - -You can list the nodes in the cluster. - -```bash -swarm list zk://,/ - -``` - -### Using a static list of IP addresses - -Start the manager on any machine or your laptop - -```bash -swarm manage -H nodes://, -``` - -Or - -```bash -swarm manage -H , -``` - -Then use the regular Docker commands. - -```bash -docker -H info -docker -H run ... -docker -H ps -docker -H logs ... -... -``` - -### Range pattern for IP addresses - -The `file` and `nodes` discoveries support a range pattern to specify IP -addresses, i.e., `10.0.0.[10:200]` will be a list of nodes starting from -`10.0.0.10` to `10.0.0.200`. - -For example for the `file` discovery method. - -```bash -$ echo "10.0.0.[11:100]:2375" >> /tmp/my_cluster -$ echo "10.0.1.[15:20]:2375" >> /tmp/my_cluster -$ echo "192.168.1.2:[2:20]375" >> /tmp/my_cluster -``` - -Then start the manager. - -```bash -swarm manage -H tcp:// file:///tmp/my_cluster -``` - -And for the `nodes` discovery method. - -```bash -swarm manage -H "nodes://10.0.0.[10:200]:2375,10.0.1.[2:250]:2375" -``` - -## Contributing a new discovery backend - -Contributing a new discovery backend is easy, simply implement this -interface: - -```go -type Discovery interface { - Initialize(string, int) error - Fetch() ([]string, error) - Watch(WatchCallback) - Register(string) error -} -``` - -### Initialize - -The parameters are `discovery` location without the scheme and a heartbeat (in seconds). - -### Fetch - -Returns the list of all the nodes from the discovery. - -### Watch - -Triggers an update (`Fetch`). This can happen either via a timer (like -`token`) or use backend specific features (like `etcd`). - -### Register - -Add a new node to the discovery service. - -## Docker Swarm documentation index - -- [User guide](./index.md) -- [Sheduler strategies](./scheduler/strategy.md) -- [Sheduler filters](./scheduler/filter.md) -- [Swarm API](./API.md) diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go deleted file mode 100644 index 9942de7cef..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go +++ /dev/null @@ -1,166 +0,0 @@ -package discovery - -import ( - "errors" - "fmt" - "net" - "strings" - "time" - - log "github.com/Sirupsen/logrus" -) - -// An Entry represents a swarm host. -type Entry struct { - Host string - Port string -} - -// NewEntry creates a new entry. -func NewEntry(url string) (*Entry, error) { - host, port, err := net.SplitHostPort(url) - if err != nil { - return nil, err - } - return &Entry{host, port}, nil -} - -// String returns the string form of an entry. -func (e *Entry) String() string { - return fmt.Sprintf("%s:%s", e.Host, e.Port) -} - -// Equals returns true if cmp contains the same data. -func (e *Entry) Equals(cmp *Entry) bool { - return e.Host == cmp.Host && e.Port == cmp.Port -} - -// Entries is a list of *Entry with some helpers. -type Entries []*Entry - -// Equals returns true if cmp contains the same data. -func (e Entries) Equals(cmp Entries) bool { - // Check if the file has really changed. - if len(e) != len(cmp) { - return false - } - for i := range e { - if !e[i].Equals(cmp[i]) { - return false - } - } - return true -} - -// Contains returns true if the Entries contain a given Entry. -func (e Entries) Contains(entry *Entry) bool { - for _, curr := range e { - if curr.Equals(entry) { - return true - } - } - return false -} - -// Diff compares two entries and returns the added and removed entries. -func (e Entries) Diff(cmp Entries) (Entries, Entries) { - added := Entries{} - for _, entry := range cmp { - if !e.Contains(entry) { - added = append(added, entry) - } - } - - removed := Entries{} - for _, entry := range e { - if !cmp.Contains(entry) { - removed = append(removed, entry) - } - } - - return added, removed -} - -// The Discovery interface is implemented by Discovery backends which -// manage swarm host entries. -type Discovery interface { - // Initialize the discovery with URIs, a heartbeat and a ttl. - Initialize(string, time.Duration, time.Duration) error - - // Watch the discovery for entry changes. - // Returns a channel that will receive changes or an error. - // Providing a non-nil stopCh can be used to stop watching. - Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) - - // Register to the discovery - Register(string) error -} - -var ( - discoveries map[string]Discovery - // ErrNotSupported is returned when a discovery service is not supported. - ErrNotSupported = errors.New("discovery service not supported") - // ErrNotImplemented is returned when discovery feature is not implemented - // by discovery backend. - ErrNotImplemented = errors.New("not implemented in this discovery service") -) - -func init() { - discoveries = make(map[string]Discovery) -} - -// Register makes a discovery backend available by the provided scheme. -// If Register is called twice with the same scheme an error is returned. -func Register(scheme string, d Discovery) error { - if _, exists := discoveries[scheme]; exists { - return fmt.Errorf("scheme already registered %s", scheme) - } - log.WithField("name", scheme).Debug("Registering discovery service") - discoveries[scheme] = d - - return nil -} - -func parse(rawurl string) (string, string) { - parts := strings.SplitN(rawurl, "://", 2) - - // nodes:port,node2:port => nodes://node1:port,node2:port - if len(parts) == 1 { - return "nodes", parts[0] - } - return parts[0], parts[1] -} - -// New returns a new Discovery given a URL, heartbeat and ttl settings. -// Returns an error if the URL scheme is not supported. -func New(rawurl string, heartbeat time.Duration, ttl time.Duration) (Discovery, error) { - scheme, uri := parse(rawurl) - - if discovery, exists := discoveries[scheme]; exists { - log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service") - err := discovery.Initialize(uri, heartbeat, ttl) - return discovery, err - } - - return nil, ErrNotSupported -} - -// CreateEntries returns an array of entries based on the given addresses. -func CreateEntries(addrs []string) (Entries, error) { - entries := Entries{} - if addrs == nil { - return entries, nil - } - - for _, addr := range addrs { - if len(addr) == 0 { - continue - } - entry, err := NewEntry(addr) - if err != nil { - return nil, err - } - entries = append(entries, entry) - } - return entries, nil -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go deleted file mode 100644 index b7128ff258..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package discovery - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewEntry(t *testing.T) { - entry, err := NewEntry("127.0.0.1:2375") - assert.NoError(t, err) - assert.True(t, entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"})) - assert.Equal(t, entry.String(), "127.0.0.1:2375") - - _, err = NewEntry("127.0.0.1") - assert.Error(t, err) -} - -func TestParse(t *testing.T) { - scheme, uri := parse("127.0.0.1:2375") - assert.Equal(t, scheme, "nodes") - assert.Equal(t, uri, "127.0.0.1:2375") - - scheme, uri = parse("localhost:2375") - assert.Equal(t, scheme, "nodes") - assert.Equal(t, uri, "localhost:2375") - - scheme, uri = parse("scheme://127.0.0.1:2375") - assert.Equal(t, scheme, "scheme") - assert.Equal(t, uri, "127.0.0.1:2375") - - scheme, uri = parse("scheme://localhost:2375") - assert.Equal(t, scheme, "scheme") - assert.Equal(t, uri, "localhost:2375") - - scheme, uri = parse("") - assert.Equal(t, scheme, "nodes") - assert.Equal(t, uri, "") -} - -func TestCreateEntries(t *testing.T) { - entries, err := CreateEntries(nil) - assert.Equal(t, entries, Entries{}) - assert.NoError(t, err) - - entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) - assert.NoError(t, err) - expected := Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - } - assert.True(t, entries.Equals(expected)) - - _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) - assert.Error(t, err) -} - -func TestContainsEntry(t *testing.T) { - entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) - assert.NoError(t, err) - assert.True(t, entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"})) - assert.False(t, entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"})) -} - -func TestEntriesEquality(t *testing.T) { - entries := Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - } - - // Same - assert.True(t, entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - })) - - // Different size - assert.False(t, entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - &Entry{Host: "127.0.0.3", Port: "2375"}, - })) - - // Different content - assert.False(t, entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.42", Port: "2375"}, - })) -} - -func TestEntriesDiff(t *testing.T) { - entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} - entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} - entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} - entries := Entries{entry1, entry2} - - // No diff - added, removed := entries.Diff(Entries{entry2, entry1}) - assert.Empty(t, added) - assert.Empty(t, removed) - - // Add - added, removed = entries.Diff(Entries{entry2, entry3, entry1}) - assert.Len(t, added, 1) - assert.True(t, added.Contains(entry3)) - assert.Empty(t, removed) - - // Remove - added, removed = entries.Diff(Entries{entry2}) - assert.Empty(t, added) - assert.Len(t, removed, 1) - assert.True(t, removed.Contains(entry1)) - - // Add and remove - added, removed = entries.Diff(Entries{entry1, entry3}) - assert.Len(t, added, 1) - assert.True(t, added.Contains(entry3)) - assert.Len(t, removed, 1) - assert.True(t, removed.Contains(entry2)) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go deleted file mode 100644 index 3e1566ea59..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go +++ /dev/null @@ -1,109 +0,0 @@ -package file - -import ( - "fmt" - "io/ioutil" - "strings" - "time" - - "github.com/docker/swarm/discovery" -) - -// Discovery is exported -type Discovery struct { - heartbeat time.Duration - path string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("file", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration) error { - s.path = path - s.heartbeat = heartbeat - return nil -} - -func parseFileContent(content []byte) []string { - var result []string - for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { - line = strings.TrimSpace(line) - // Ignoring line starts with # - if strings.HasPrefix(line, "#") { - continue - } - // Inlined # comment also ignored. - if strings.Contains(line, "#") { - line = line[0:strings.Index(line, "#")] - // Trim additional spaces caused by above stripping. - line = strings.TrimSpace(line) - } - for _, ip := range discovery.Generate(line) { - result = append(result, ip) - } - } - return result -} - -func (s *Discovery) fetch() (discovery.Entries, error) { - fileContent, err := ioutil.ReadFile(s.path) - if err != nil { - return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) - } - return discovery.CreateEntries(parseFileContent(fileContent)) -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - ticker := time.NewTicker(s.heartbeat) - - go func() { - defer close(errCh) - defer close(ch) - - // Send the initial entries if available. - currentEntries, err := s.fetch() - if err != nil { - errCh <- err - } else { - ch <- currentEntries - } - - // Periodically send updates. - for { - select { - case <-ticker.C: - newEntries, err := s.fetch() - if err != nil { - errCh <- err - continue - } - - // Check if the file has really changed. - if !newEntries.Equals(currentEntries) { - ch <- newEntries - } - currentEntries = newEntries - case <-stopCh: - ticker.Stop() - return - } - } - }() - - return ch, errCh -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - return discovery.ErrNotImplemented -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go deleted file mode 100644 index 6e861f9a82..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package file - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/swarm/discovery" - "github.com/stretchr/testify/assert" -) - -func TestInitialize(t *testing.T) { - d := &Discovery{} - d.Initialize("/path/to/file", 1000, 0) - assert.Equal(t, d.path, "/path/to/file") -} - -func TestNew(t *testing.T) { - d, err := discovery.New("file:///path/to/file", 0, 0) - assert.NoError(t, err) - assert.Equal(t, d.(*Discovery).path, "/path/to/file") -} - -func TestContent(t *testing.T) { - data := ` -1.1.1.[1:2]:1111 -2.2.2.[2:4]:2222 -` - ips := parseFileContent([]byte(data)) - assert.Len(t, ips, 5) - assert.Equal(t, ips[0], "1.1.1.1:1111") - assert.Equal(t, ips[1], "1.1.1.2:1111") - assert.Equal(t, ips[2], "2.2.2.2:2222") - assert.Equal(t, ips[3], "2.2.2.3:2222") - assert.Equal(t, ips[4], "2.2.2.4:2222") -} - -func TestRegister(t *testing.T) { - discovery := &Discovery{path: "/path/to/file"} - assert.Error(t, discovery.Register("0.0.0.0")) -} - -func TestParsingContentsWithComments(t *testing.T) { - data := ` -### test ### -1.1.1.1:1111 # inline comment -# 2.2.2.2:2222 - ### empty line with comment - 3.3.3.3:3333 -### test ### -` - ips := parseFileContent([]byte(data)) - assert.Len(t, ips, 2) - assert.Equal(t, "1.1.1.1:1111", ips[0]) - assert.Equal(t, "3.3.3.3:3333", ips[1]) -} - -func TestWatch(t *testing.T) { - data := ` -1.1.1.1:1111 -2.2.2.2:2222 -` - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - - // Create a temporary file and remove it. - tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") - assert.NoError(t, err) - assert.NoError(t, tmp.Close()) - assert.NoError(t, os.Remove(tmp.Name())) - - // Set up file discovery. - d := &Discovery{} - d.Initialize(tmp.Name(), 1000, 0) - stopCh := make(chan struct{}) - ch, errCh := d.Watch(stopCh) - - // Make sure it fires errors since the file doesn't exist. - assert.Error(t, <-errCh) - // We have to drain the error channel otherwise Watch will get stuck. - go func() { - for _ = range errCh { - } - }() - - // Write the file and make sure we get the expected value back. - assert.NoError(t, ioutil.WriteFile(tmp.Name(), []byte(data), 0600)) - assert.Equal(t, expected, <-ch) - - // Add a new entry and look it up. - expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) - f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) - assert.NoError(t, err) - assert.NotNil(t, f) - _, err = f.WriteString("\n3.3.3.3:3333\n") - assert.NoError(t, err) - f.Close() - assert.Equal(t, expected, <-ch) - - // Stop and make sure it closes all channels. - close(stopCh) - assert.Nil(t, <-ch) - assert.Nil(t, <-errCh) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator.go deleted file mode 100644 index d22298298f..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator.go +++ /dev/null @@ -1,35 +0,0 @@ -package discovery - -import ( - "fmt" - "regexp" - "strconv" -) - -// Generate takes care of IP generation -func Generate(pattern string) []string { - re, _ := regexp.Compile(`\[(.+):(.+)\]`) - submatch := re.FindStringSubmatch(pattern) - if submatch == nil { - return []string{pattern} - } - - from, err := strconv.Atoi(submatch[1]) - if err != nil { - return []string{pattern} - } - to, err := strconv.Atoi(submatch[2]) - if err != nil { - return []string{pattern} - } - - template := re.ReplaceAllString(pattern, "%d") - - var result []string - for val := from; val <= to; val++ { - entry := fmt.Sprintf(template, val) - result = append(result, entry) - } - - return result -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator_test.go deleted file mode 100644 index 747334452f..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package discovery - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGeneratorNotGenerate(t *testing.T) { - ips := Generate("127.0.0.1") - assert.Equal(t, len(ips), 1) - assert.Equal(t, ips[0], "127.0.0.1") -} - -func TestGeneratorWithPortNotGenerate(t *testing.T) { - ips := Generate("127.0.0.1:8080") - assert.Equal(t, len(ips), 1) - assert.Equal(t, ips[0], "127.0.0.1:8080") -} - -func TestGeneratorMatchFailedNotGenerate(t *testing.T) { - ips := Generate("127.0.0.[1]") - assert.Equal(t, len(ips), 1) - assert.Equal(t, ips[0], "127.0.0.[1]") -} - -func TestGeneratorWithPort(t *testing.T) { - ips := Generate("127.0.0.[1:11]:2375") - assert.Equal(t, len(ips), 11) - assert.Equal(t, ips[0], "127.0.0.1:2375") - assert.Equal(t, ips[1], "127.0.0.2:2375") - assert.Equal(t, ips[2], "127.0.0.3:2375") - assert.Equal(t, ips[3], "127.0.0.4:2375") - assert.Equal(t, ips[4], "127.0.0.5:2375") - assert.Equal(t, ips[5], "127.0.0.6:2375") - assert.Equal(t, ips[6], "127.0.0.7:2375") - assert.Equal(t, ips[7], "127.0.0.8:2375") - assert.Equal(t, ips[8], "127.0.0.9:2375") - assert.Equal(t, ips[9], "127.0.0.10:2375") - assert.Equal(t, ips[10], "127.0.0.11:2375") -} - -func TestGenerateWithMalformedInputAtRangeStart(t *testing.T) { - malformedInput := "127.0.0.[x:11]:2375" - ips := Generate(malformedInput) - assert.Equal(t, len(ips), 1) - assert.Equal(t, ips[0], malformedInput) -} - -func TestGenerateWithMalformedInputAtRangeEnd(t *testing.T) { - malformedInput := "127.0.0.[1:x]:2375" - ips := Generate(malformedInput) - assert.Equal(t, len(ips), 1) - assert.Equal(t, ips[0], malformedInput) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv.go deleted file mode 100644 index cd5f6506cf..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv.go +++ /dev/null @@ -1,140 +0,0 @@ -package kv - -import ( - "fmt" - "path" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/swarm/discovery" - "github.com/docker/swarm/pkg/store" -) - -const ( - discoveryPath = "docker/swarm/nodes" -) - -// Discovery is exported -type Discovery struct { - backend store.Backend - store store.Store - heartbeat time.Duration - ttl time.Duration - path string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("zk", &Discovery{backend: store.ZK}) - discovery.Register("consul", &Discovery{backend: store.CONSUL}) - discovery.Register("etcd", &Discovery{backend: store.ETCD}) -} - -// Initialize is exported -func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration) error { - var ( - parts = strings.SplitN(uris, "/", 2) - addrs = strings.Split(parts[0], ",") - prefix = "" - err error - ) - - // A custom prefix to the path can be optionally used. - if len(parts) == 2 { - prefix = parts[1] - } - - s.heartbeat = heartbeat - s.ttl = ttl - s.path = path.Join(prefix, discoveryPath) - - // Creates a new store, will ignore options given - // if not supported by the chosen store - s.store, err = store.NewStore( - s.backend, - addrs, - &store.Config{ - EphemeralTTL: s.ttl, - }, - ) - - return err -} - -// Watch the store until either there's a store error or we receive a stop request. -// Returns false if we shouldn't attempt watching the store anymore (stop request received). -func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { - for { - select { - case pairs := <-watchCh: - if pairs == nil { - return true - } - - log.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) - - // Convert `KVPair` into `discovery.Entry`. - addrs := make([]string, len(pairs)) - for _, pair := range pairs { - addrs = append(addrs, string(pair.Value)) - } - - entries, err := discovery.CreateEntries(addrs) - if err != nil { - errCh <- err - } else { - discoveryCh <- entries - } - case <-stopCh: - // We were requested to stop watching. - return false - } - } -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - - go func() { - defer close(ch) - defer close(errCh) - - // Forever: Create a store watch, watch until we get an error and then try again. - // Will only stop if we receive a stopCh request. - for { - // Set up a watch. - watchCh, err := s.store.WatchTree(s.path, stopCh) - if err != nil { - errCh <- err - } else { - if !s.watchOnce(stopCh, watchCh, ch, errCh) { - return - } - } - - // If we get here it means the store watch channel was closed. This - // is unexpected so let's retry later. - errCh <- fmt.Errorf("Unexpected watch error") - time.Sleep(s.heartbeat) - } - }() - return ch, errCh -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - opts := &store.WriteOptions{Ephemeral: true, Heartbeat: s.heartbeat} - return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) -} - -// Store returns the underlying store used by KV discovery. -func (s *Discovery) Store() store.Store { - return s.store -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv_test.go deleted file mode 100644 index 39d5d74063..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package kv - -import ( - "errors" - "path" - "testing" - "time" - - "github.com/docker/swarm/discovery" - "github.com/docker/swarm/pkg/store" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -func TestInitialize(t *testing.T) { - d := &Discovery{backend: store.MOCK} - assert.NoError(t, d.Initialize("127.0.0.1", 0, 0)) - s := d.store.(*store.Mock) - assert.Len(t, s.Endpoints, 1) - assert.Equal(t, s.Endpoints[0], "127.0.0.1") - assert.Equal(t, d.path, discoveryPath) - - d = &Discovery{backend: store.MOCK} - assert.NoError(t, d.Initialize("127.0.0.1:1234/path", 0, 0)) - s = d.store.(*store.Mock) - assert.Len(t, s.Endpoints, 1) - assert.Equal(t, s.Endpoints[0], "127.0.0.1:1234") - assert.Equal(t, d.path, "path/"+discoveryPath) - - d = &Discovery{backend: store.MOCK} - assert.NoError(t, d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0)) - s = d.store.(*store.Mock) - assert.Len(t, s.Endpoints, 3) - assert.Equal(t, s.Endpoints[0], "127.0.0.1:1234") - assert.Equal(t, s.Endpoints[1], "127.0.0.2:1234") - assert.Equal(t, s.Endpoints[2], "127.0.0.3:1234") - assert.Equal(t, d.path, "path/"+discoveryPath) -} - -func TestWatch(t *testing.T) { - d := &Discovery{backend: store.MOCK} - assert.NoError(t, d.Initialize("127.0.0.1:1234/path", 0, 0)) - s := d.store.(*store.Mock) - - mockCh := make(chan []*store.KVPair) - - // The first watch will fail. - s.On("WatchTree", "path/"+discoveryPath, mock.Anything).Return(mockCh, errors.New("test error")).Once() - // The second one will succeed. - s.On("WatchTree", "path/"+discoveryPath, mock.Anything).Return(mockCh, nil).Once() - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - kvs := []*store.KVPair{ - {Key: path.Join("path", discoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, - {Key: path.Join("path", discoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, - } - - stopCh := make(chan struct{}) - ch, errCh := d.Watch(stopCh) - - // It should fire an error since the first WatchRange call failed. - assert.EqualError(t, <-errCh, "test error") - // We have to drain the error channel otherwise Watch will get stuck. - go func() { - for _ = range errCh { - } - }() - - // Push the entries into the store channel and make sure discovery emits. - mockCh <- kvs - assert.Equal(t, <-ch, expected) - - // Add a new entry. - expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) - kvs = append(kvs, &store.KVPair{Key: path.Join("path", discoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) - mockCh <- kvs - assert.Equal(t, <-ch, expected) - - // Make sure that if an error occurs it retries. - // This third call to WatchTree will be checked later by AssertExpectations. - s.On("WatchTree", "path/"+discoveryPath, mock.Anything).Return(mockCh, nil) - close(mockCh) - // Give it enough time to call WatchTree. - time.Sleep(3) - - // Stop and make sure it closes all channels. - close(stopCh) - assert.Nil(t, <-ch) - assert.Nil(t, <-errCh) - - s.AssertExpectations(t) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go deleted file mode 100644 index 7de7ae2f57..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go +++ /dev/null @@ -1,53 +0,0 @@ -package nodes - -import ( - "strings" - "time" - - "github.com/docker/swarm/discovery" -) - -// Discovery is exported -type Discovery struct { - entries discovery.Entries -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("nodes", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration) error { - for _, input := range strings.Split(uris, ",") { - for _, ip := range discovery.Generate(input) { - entry, err := discovery.NewEntry(ip) - if err != nil { - return err - } - s.entries = append(s.entries, entry) - } - } - - return nil -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - go func() { - defer close(ch) - ch <- s.entries - <-stopCh - }() - return ch, nil -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - return discovery.ErrNotImplemented -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go deleted file mode 100644 index d59e38621d..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package nodes - -import ( - "testing" - - "github.com/docker/swarm/discovery" - "github.com/stretchr/testify/assert" -) - -func TestInitialize(t *testing.T) { - d := &Discovery{} - d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0) - assert.Equal(t, len(d.entries), 2) - assert.Equal(t, d.entries[0].String(), "1.1.1.1:1111") - assert.Equal(t, d.entries[1].String(), "2.2.2.2:2222") -} - -func TestInitializeWithPattern(t *testing.T) { - d := &Discovery{} - d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0) - assert.Equal(t, len(d.entries), 5) - assert.Equal(t, d.entries[0].String(), "1.1.1.1:1111") - assert.Equal(t, d.entries[1].String(), "1.1.1.2:1111") - assert.Equal(t, d.entries[2].String(), "2.2.2.2:2222") - assert.Equal(t, d.entries[3].String(), "2.2.2.3:2222") - assert.Equal(t, d.entries[4].String(), "2.2.2.4:2222") -} - -func TestWatch(t *testing.T) { - d := &Discovery{} - d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0) - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - ch, _ := d.Watch(nil) - assert.True(t, expected.Equals(<-ch)) -} - -func TestRegister(t *testing.T) { - d := &Discovery{} - assert.Error(t, d.Register("0.0.0.0")) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md deleted file mode 100644 index 78d6cc7f2c..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md +++ /dev/null @@ -1,31 +0,0 @@ -#discovery-stage.hub.docker.com - -Docker Swarm comes with a simple discovery service built into the [Docker Hub](http://hub.docker.com) - -The discovery service is still in alpha stage and currently hosted at `https://discovery-stage.hub.docker.com` - -#####Create a new cluster -`-> POST https://discovery-stage.hub.docker.com/v1/clusters` - -`<- ` - -#####Add new nodes to a cluster -`-> POST https://discovery-stage.hub.docker.com/v1/clusters/ Request body: ":"` - -`<- OK` - -`-> POST https://discovery-stage.hub.docker.com/v1/clusters/ Request body: ":")` - -`<- OK` - - -#####List nodes in a cluster -`-> GET https://discovery-stage.hub.docker.com/v1/clusters/` - -`<- [":", ":"]` - - -#####Delete a cluster (all the nodes in a cluster) -`-> DELETE https://discovery-stage.hub.docker.com/v1/clusters/` - -`<- OK` diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go deleted file mode 100644 index 71f03b7179..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go +++ /dev/null @@ -1,143 +0,0 @@ -package token - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/docker/swarm/discovery" -) - -// DiscoveryUrl is exported -const DiscoveryURL = "https://discovery-stage.hub.docker.com/v1" - -// Discovery is exported -type Discovery struct { - heartbeat time.Duration - ttl time.Duration - url string - token string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("token", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(urltoken string, heartbeat time.Duration, ttl time.Duration) error { - if i := strings.LastIndex(urltoken, "/"); i != -1 { - s.url = "https://" + urltoken[:i] - s.token = urltoken[i+1:] - } else { - s.url = DiscoveryURL - s.token = urltoken - } - - if s.token == "" { - return errors.New("token is empty") - } - s.heartbeat = heartbeat - s.ttl = ttl - - return nil -} - -// Fetch returns the list of entries for the discovery service at the specified endpoint -func (s *Discovery) fetch() (discovery.Entries, error) { - resp, err := http.Get(fmt.Sprintf("%s/%s/%s", s.url, "clusters", s.token)) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - - var addrs []string - if resp.StatusCode == http.StatusOK { - if err := json.NewDecoder(resp.Body).Decode(&addrs); err != nil { - return nil, fmt.Errorf("Failed to decode response: %v", err) - } - } else { - return nil, fmt.Errorf("Failed to fetch entries, Discovery service returned %d HTTP status code", resp.StatusCode) - } - - return discovery.CreateEntries(addrs) -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - ticker := time.NewTicker(s.heartbeat) - errCh := make(chan error) - - go func() { - defer close(ch) - defer close(errCh) - - // Send the initial entries if available. - currentEntries, err := s.fetch() - if err != nil { - errCh <- err - } else { - ch <- currentEntries - } - - // Periodically send updates. - for { - select { - case <-ticker.C: - newEntries, err := s.fetch() - if err != nil { - errCh <- err - continue - } - - // Check if the file has really changed. - if !newEntries.Equals(currentEntries) { - ch <- newEntries - } - currentEntries = newEntries - case <-stopCh: - ticker.Stop() - return - } - } - }() - - return ch, nil -} - -// Register adds a new entry identified by the into the discovery service -func (s *Discovery) Register(addr string) error { - buf := strings.NewReader(addr) - - resp, err := http.Post(fmt.Sprintf("%s/%s/%s", s.url, - "clusters", s.token), "application/json", buf) - - if err != nil { - return err - } - - resp.Body.Close() - return nil -} - -// CreateCluster returns a unique cluster token -func (s *Discovery) CreateCluster() (string, error) { - resp, err := http.Post(fmt.Sprintf("%s/%s", s.url, "clusters"), "", nil) - if err != nil { - return "", err - } - - defer resp.Body.Close() - token, err := ioutil.ReadAll(resp.Body) - return string(token), err -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go deleted file mode 100644 index 16e3fa6ab9..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package token - -import ( - "testing" - "time" - - "github.com/docker/swarm/discovery" - "github.com/stretchr/testify/assert" -) - -func TestInitialize(t *testing.T) { - discovery := &Discovery{} - err := discovery.Initialize("token", 0, 0) - assert.NoError(t, err) - assert.Equal(t, discovery.token, "token") - assert.Equal(t, discovery.url, DiscoveryURL) - - err = discovery.Initialize("custom/path/token", 0, 0) - assert.NoError(t, err) - assert.Equal(t, discovery.token, "token") - assert.Equal(t, discovery.url, "https://custom/path") - - err = discovery.Initialize("", 0, 0) - assert.Error(t, err) -} - -func TestRegister(t *testing.T) { - d := &Discovery{token: "TEST_TOKEN", url: DiscoveryURL, heartbeat: 1} - expected := "127.0.0.1:2675" - expectedEntries, err := discovery.CreateEntries([]string{expected}) - assert.NoError(t, err) - - // Register - assert.NoError(t, d.Register(expected)) - - // Watch - ch, errCh := d.Watch(nil) - select { - case entries := <-ch: - assert.True(t, entries.Equals(expectedEntries)) - case err := <-errCh: - t.Fatal(err) - case <-time.After(5 * time.Second): - t.Fatal("Timed out") - } - - assert.NoError(t, d.Register(expected)) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/README.md b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/README.md deleted file mode 100644 index 23fd0f5f14..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# Storage - -The goal of `pkg/store` is to abstract common store operations for multiple Key/Value backends. - -For example, you can use it to store your metadata or for service discovery to register machines and endpoints inside your cluster. - -As of now, `pkg/store` offers support for `Consul`, `Etcd` and `Zookeeper`. - -## Example of usage - -### Create a new store and use Put/Get - -```go -package main - -import ( - "fmt" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/swarm/store" -) - -func main() { - var ( - // Consul local address - client = "localhost:8500" - ) - - // Initialize a new store with consul - kv, err = store.NewStore( - store.CONSUL, // or "consul" - []string{client}, - &store.Config{ - Timeout: 10*time.Second, - }, - ) - if err != nil { - log.Error("Cannot create store consul") - } - - key := "foo" - err = kv.Put(key, []byte("bar"), nil) - if err != nil { - log.Error("Error trying to put value at key `", key, "`") - } - - pair, err := kv.Get(key) - if err != nil { - log.Error("Error trying accessing value at key `", key, "`") - } - - log.Info("value: ", string(pair.Value)) -} -``` - - - -## Contributing to a new storage backend - -A new **storage backend** should include those calls: - -```go -type Store interface { - Put(key string, value []byte, options *WriteOptions) error - Get(key string) (*KVPair, error) - Delete(key string) error - Exists(key string) (bool, error) - Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) - WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*KVPair, error) - NewLock(key string, options *LockOptions) (Locker, error) - List(prefix string) ([]*KVPair, error) - DeleteTree(prefix string) error - AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) - AtomicDelete(key string, previous *KVPair) (bool, error) -} -``` - -In the case of Swarm and to be eligible as a **discovery backend** only, a K/V store implementation should at least offer `Get`, `Put`, `WatchTree` and `List`. - -`Put` should support usage of `ttl` to be able to remove entries in case of a node failure. - -You can get inspiration from existing backends to create a new one. This interface could be subject to changes to improve the experience of using the library and contributing to a new backend. diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul.go deleted file mode 100644 index 159840a6b0..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul.go +++ /dev/null @@ -1,403 +0,0 @@ -package store - -import ( - "crypto/tls" - "net/http" - "strings" - "sync" - "time" - - log "github.com/Sirupsen/logrus" - api "github.com/hashicorp/consul/api" -) - -const ( - // DefaultWatchWaitTime is how long we block for at a time to check if the - // watched key has changed. This affects the minimum time it takes to - // cancel a watch. - DefaultWatchWaitTime = 15 * time.Second -) - -// Consul embeds the client and watches -type Consul struct { - sync.Mutex - config *api.Config - client *api.Client - ephemeralTTL time.Duration - ephemeralSession string -} - -type consulLock struct { - lock *api.Lock -} - -// InitializeConsul creates a new Consul client given -// a list of endpoints and optional tls config -func InitializeConsul(endpoints []string, options *Config) (Store, error) { - s := &Consul{} - - // Create Consul client - config := api.DefaultConfig() - s.config = config - config.HttpClient = http.DefaultClient - config.Address = endpoints[0] - config.Scheme = "http" - - // Set options - if options != nil { - if options.TLS != nil { - s.setTLS(options.TLS) - } - if options.ConnectionTimeout != 0 { - s.setTimeout(options.ConnectionTimeout) - } - if options.EphemeralTTL != 0 { - s.setEphemeralTTL(options.EphemeralTTL) - } - } - - // Creates a new client - client, err := api.NewClient(config) - if err != nil { - log.Errorf("Couldn't initialize consul client..") - return nil, err - } - s.client = client - - return s, nil -} - -// SetTLS sets Consul TLS options -func (s *Consul) setTLS(tls *tls.Config) { - s.config.HttpClient.Transport = &http.Transport{ - TLSClientConfig: tls, - } - s.config.Scheme = "https" -} - -// SetTimeout sets the timout for connecting to Consul -func (s *Consul) setTimeout(time time.Duration) { - s.config.WaitTime = time -} - -// SetEphemeralTTL sets the ttl for ephemeral nodes -func (s *Consul) setEphemeralTTL(ttl time.Duration) { - s.ephemeralTTL = ttl -} - -// createEphemeralSession creates the global session -// once that is used to delete keys at node failure -func (s *Consul) createEphemeralSession() error { - s.Lock() - defer s.Unlock() - - // Create new session - if s.ephemeralSession == "" { - entry := &api.SessionEntry{ - Behavior: api.SessionBehaviorDelete, - TTL: s.ephemeralTTL.String(), - } - // Create global ephemeral keys session - session, _, err := s.client.Session().Create(entry, nil) - if err != nil { - return err - } - s.ephemeralSession = session - } - return nil -} - -// checkActiveSession checks if the key already has a session attached -func (s *Consul) checkActiveSession(key string) (string, error) { - pair, _, err := s.client.KV().Get(key, nil) - if err != nil { - return "", err - } - if pair != nil && pair.Session != "" { - return pair.Session, nil - } - return "", nil -} - -// Normalize the key for usage in Consul -func (s *Consul) normalize(key string) string { - key = normalize(key) - return strings.TrimPrefix(key, "/") -} - -// Get the value at "key", returns the last modified index -// to use in conjunction to CAS calls -func (s *Consul) Get(key string) (*KVPair, error) { - options := &api.QueryOptions{ - AllowStale: false, - RequireConsistent: true, - } - pair, meta, err := s.client.KV().Get(s.normalize(key), options) - if err != nil { - return nil, err - } - if pair == nil { - return nil, ErrKeyNotFound - } - return &KVPair{pair.Key, pair.Value, meta.LastIndex}, nil -} - -// Put a value at "key" -func (s *Consul) Put(key string, value []byte, opts *WriteOptions) error { - - key = s.normalize(key) - - p := &api.KVPair{ - Key: key, - Value: value, - } - - if opts != nil && opts.Ephemeral { - // Check if there is any previous session with an active TTL - previous, err := s.checkActiveSession(key) - if err != nil { - return err - } - - // Create the global ephemeral session if it does not exist yet - if s.ephemeralSession == "" { - if err = s.createEphemeralSession(); err != nil { - return err - } - } - - // If a previous session is still active for that key, use it - // else we use the global ephemeral session - if previous != "" { - p.Session = previous - } else { - p.Session = s.ephemeralSession - } - - // Create lock option with the - // EphemeralSession - lockOpts := &api.LockOptions{ - Key: key, - Session: p.Session, - } - - // Lock and ignore if lock is held - // It's just a placeholder for the - // ephemeral behavior - lock, _ := s.client.LockOpts(lockOpts) - if lock != nil { - lock.Lock(nil) - } - - // Renew the session - _, _, err = s.client.Session().Renew(p.Session, nil) - if err != nil { - s.ephemeralSession = "" - return err - } - } - - _, err := s.client.KV().Put(p, nil) - return err -} - -// Delete a value at "key" -func (s *Consul) Delete(key string) error { - _, err := s.client.KV().Delete(s.normalize(key), nil) - return err -} - -// Exists checks that the key exists inside the store -func (s *Consul) Exists(key string) (bool, error) { - _, err := s.Get(key) - if err != nil && err == ErrKeyNotFound { - return false, err - } - return true, nil -} - -// List the content of a given prefix -func (s *Consul) List(prefix string) ([]*KVPair, error) { - pairs, _, err := s.client.KV().List(s.normalize(prefix), nil) - if err != nil { - return nil, err - } - if len(pairs) == 0 { - return nil, ErrKeyNotFound - } - kv := []*KVPair{} - for _, pair := range pairs { - if pair.Key == prefix { - continue - } - kv = append(kv, &KVPair{pair.Key, pair.Value, pair.ModifyIndex}) - } - return kv, nil -} - -// DeleteTree deletes a range of keys based on prefix -func (s *Consul) DeleteTree(prefix string) error { - _, err := s.client.KV().DeleteTree(s.normalize(prefix), nil) - return err -} - -// Watch changes on a key. -// Returns a channel that will receive changes or an error. -// Upon creating a watch, the current value will be sent to the channel. -// Providing a non-nil stopCh can be used to stop watching. -func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) { - key = s.normalize(key) - kv := s.client.KV() - watchCh := make(chan *KVPair) - - go func() { - defer close(watchCh) - - // Use a wait time in order to check if we should quit from time to - // time. - opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime} - for { - // Check if we should quit - select { - case <-stopCh: - return - default: - } - pair, meta, err := kv.Get(key, opts) - if err != nil { - log.Errorf("consul: %v", err) - return - } - // If LastIndex didn't change then it means `Get` returned because - // of the WaitTime and the key didn't change. - if opts.WaitIndex == meta.LastIndex { - continue - } - opts.WaitIndex = meta.LastIndex - // FIXME: What happens when a key is deleted? - if pair != nil { - watchCh <- &KVPair{pair.Key, pair.Value, pair.ModifyIndex} - } - } - }() - - return watchCh, nil -} - -// WatchTree watches changes on a "directory" -// Returns a channel that will receive changes or an error. -// Upon creating a watch, the current value will be sent to the channel. -// Providing a non-nil stopCh can be used to stop watching. -func (s *Consul) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*KVPair, error) { - prefix = s.normalize(prefix) - kv := s.client.KV() - watchCh := make(chan []*KVPair) - - go func() { - defer close(watchCh) - - // Use a wait time in order to check if we should quit from time to - // time. - opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime} - for { - // Check if we should quit - select { - case <-stopCh: - return - default: - } - - pairs, meta, err := kv.List(prefix, opts) - if err != nil { - log.Errorf("consul: %v", err) - return - } - // If LastIndex didn't change then it means `Get` returned because - // of the WaitTime and the key didn't change. - if opts.WaitIndex == meta.LastIndex { - continue - } - opts.WaitIndex = meta.LastIndex - kv := []*KVPair{} - for _, pair := range pairs { - if pair.Key == prefix { - continue - } - kv = append(kv, &KVPair{pair.Key, pair.Value, pair.ModifyIndex}) - } - watchCh <- kv - } - }() - - return watchCh, nil -} - -// NewLock returns a handle to a lock struct which can be used to acquire and -// release the mutex. -func (s *Consul) NewLock(key string, options *LockOptions) (Locker, error) { - consulOpts := &api.LockOptions{ - Key: s.normalize(key), - } - if options != nil { - consulOpts.Value = options.Value - } - l, err := s.client.LockOpts(consulOpts) - if err != nil { - return nil, err - } - return &consulLock{lock: l}, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Returns a channel that is closed if our lock is lost or an error. -func (l *consulLock) Lock() (<-chan struct{}, error) { - return l.lock.Lock(nil) -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *consulLock) Unlock() error { - return l.lock.Unlock() -} - -// AtomicPut put a value at "key" if the key has not been -// modified in the meantime, throws an error if this is the case -func (s *Consul) AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) { - if previous == nil { - return false, nil, ErrPreviousNotSpecified - } - - p := &api.KVPair{Key: s.normalize(key), Value: value, ModifyIndex: previous.LastIndex} - if work, _, err := s.client.KV().CAS(p, nil); err != nil { - return false, nil, err - } else if !work { - return false, nil, ErrKeyModified - } - - pair, err := s.Get(key) - if err != nil { - return false, nil, err - } - return true, pair, nil -} - -// AtomicDelete deletes a value at "key" if the key has not -// been modified in the meantime, throws an error if this is the case -func (s *Consul) AtomicDelete(key string, previous *KVPair) (bool, error) { - if previous == nil { - return false, ErrPreviousNotSpecified - } - - p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex} - if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil { - return false, err - } else if !work { - return false, ErrKeyModified - } - return true, nil -} - -// Close closes the client connection -func (s *Consul) Close() { - return -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul_test.go deleted file mode 100644 index 380497f531..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package store - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func makeConsulClient(t *testing.T) Store { - client := "localhost:8500" - - kv, err := NewStore( - CONSUL, - []string{client}, - &Config{ - ConnectionTimeout: 3 * time.Second, - EphemeralTTL: 2 * time.Second, - }, - ) - if err != nil { - t.Fatalf("cannot create store: %v", err) - } - - return kv -} - -func TestConsulStore(t *testing.T) { - kv := makeConsulClient(t) - - testStore(t, kv) -} - -func TestCreateEphemeralSession(t *testing.T) { - kv := makeConsulClient(t) - - consul := kv.(*Consul) - - err := consul.createEphemeralSession() - assert.NoError(t, err) - assert.NotEqual(t, consul.ephemeralSession, "") -} - -func TestCheckActiveSession(t *testing.T) { - kv := makeConsulClient(t) - - consul := kv.(*Consul) - - key := "foo" - value := []byte("bar") - - // Put the first key with the Ephemeral flag - err := kv.Put(key, value, &WriteOptions{Ephemeral: true}) - assert.NoError(t, err) - - // Session should not be empty - session, err := consul.checkActiveSession(key) - assert.NoError(t, err) - assert.NotEqual(t, session, "") - - // Delete the key - err = kv.Delete(key) - assert.NoError(t, err) - - // Check the session again, it should return nothing - session, err = consul.checkActiveSession(key) - assert.NoError(t, err) - assert.Equal(t, session, "") -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd.go deleted file mode 100644 index 6b6460ebde..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd.go +++ /dev/null @@ -1,444 +0,0 @@ -package store - -import ( - "crypto/tls" - "net" - "net/http" - "strings" - "time" - - etcd "github.com/coreos/go-etcd/etcd" -) - -// Etcd embeds the client -type Etcd struct { - client *etcd.Client - ephemeralTTL time.Duration -} - -type etcdLock struct { - client *etcd.Client - stopLock chan struct{} - key string - value string - last *etcd.Response - ttl uint64 -} - -const ( - defaultLockTTL = 20 * time.Second - defaultUpdateTime = 5 * time.Second - - // periodicSync is the time between each call to SyncCluster - periodicSync = 10 * time.Minute -) - -// InitializeEtcd creates a new Etcd client given -// a list of endpoints and optional tls config -func InitializeEtcd(addrs []string, options *Config) (Store, error) { - s := &Etcd{} - - entries := createEndpoints(addrs, "http") - s.client = etcd.NewClient(entries) - - // Set options - if options != nil { - if options.TLS != nil { - s.setTLS(options.TLS) - } - if options.ConnectionTimeout != 0 { - s.setTimeout(options.ConnectionTimeout) - } - if options.EphemeralTTL != 0 { - s.setEphemeralTTL(options.EphemeralTTL) - } - } - - go func() { - for { - s.client.SyncCluster() - time.Sleep(periodicSync) - } - }() - return s, nil -} - -// SetTLS sets the tls configuration given the path -// of certificate files -func (s *Etcd) setTLS(tls *tls.Config) { - // Change to https scheme - var addrs []string - entries := s.client.GetCluster() - for _, entry := range entries { - addrs = append(addrs, strings.Replace(entry, "http", "https", -1)) - } - s.client.SetCluster(addrs) - - // Set transport - t := http.Transport{ - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, // default timeout - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tls, - } - s.client.SetTransport(&t) -} - -// SetTimeout sets the timeout used for connecting to the store -func (s *Etcd) setTimeout(time time.Duration) { - s.client.SetDialTimeout(time) -} - -// SetHeartbeat sets the heartbeat value to notify we are alive -func (s *Etcd) setEphemeralTTL(time time.Duration) { - s.ephemeralTTL = time -} - -// Create the entire path for a directory that does not exist -func (s *Etcd) createDirectory(path string) error { - if _, err := s.client.CreateDir(normalize(path), 10); err != nil { - if etcdError, ok := err.(*etcd.EtcdError); ok { - if etcdError.ErrorCode != 105 { // Skip key already exists - return err - } - } else { - return err - } - } - return nil -} - -// Get the value at "key", returns the last modified index -// to use in conjunction to CAS calls -func (s *Etcd) Get(key string) (*KVPair, error) { - result, err := s.client.Get(normalize(key), false, false) - if err != nil { - if etcdError, ok := err.(*etcd.EtcdError); ok { - // Not a Directory or Not a file - if etcdError.ErrorCode == 102 || etcdError.ErrorCode == 104 { - return nil, ErrKeyNotFound - } - } - return nil, err - } - return &KVPair{key, []byte(result.Node.Value), result.Node.ModifiedIndex}, nil -} - -// Put a value at "key" -func (s *Etcd) Put(key string, value []byte, opts *WriteOptions) error { - - // Default TTL = 0 means no expiration - var ttl uint64 - if opts != nil && opts.Ephemeral { - ttl = uint64(s.ephemeralTTL.Seconds()) - } - - if _, err := s.client.Set(key, string(value), ttl); err != nil { - if etcdError, ok := err.(*etcd.EtcdError); ok { - if etcdError.ErrorCode == 104 { // Not a directory - // Remove the last element (the actual key) and set the prefix as a dir - err = s.createDirectory(getDirectory(key)) - if _, err := s.client.Set(key, string(value), ttl); err != nil { - return err - } - } - } - return err - } - return nil -} - -// Delete a value at "key" -func (s *Etcd) Delete(key string) error { - if _, err := s.client.Delete(normalize(key), false); err != nil { - return err - } - return nil -} - -// Exists checks if the key exists inside the store -func (s *Etcd) Exists(key string) (bool, error) { - entry, err := s.Get(key) - if err != nil { - if err == ErrKeyNotFound || entry.Value == nil { - return false, nil - } - return false, err - } - return true, nil -} - -// Watch changes on a key. -// Returns a channel that will receive changes or an error. -// Upon creating a watch, the current value will be sent to the channel. -// Providing a non-nil stopCh can be used to stop watching. -func (s *Etcd) Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) { - // Get the current value - current, err := s.Get(key) - if err != nil { - return nil, err - } - - // Start an etcd watch. - // Note: etcd will send the current value through the channel. - etcdWatchCh := make(chan *etcd.Response) - etcdStopCh := make(chan bool) - go s.client.Watch(normalize(key), 0, false, etcdWatchCh, etcdStopCh) - - // Adapter goroutine: The goal here is to convert wathever format etcd is - // using into our interface. - watchCh := make(chan *KVPair) - go func() { - defer close(watchCh) - - // Push the current value through the channel. - watchCh <- current - - for { - select { - case result := <-etcdWatchCh: - watchCh <- &KVPair{ - key, - []byte(result.Node.Value), - result.Node.ModifiedIndex, - } - case <-stopCh: - etcdStopCh <- true - return - } - } - }() - return watchCh, nil -} - -// WatchTree watches changes on a "directory" -// Returns a channel that will receive changes or an error. -// Upon creating a watch, the current value will be sent to the channel. -// Providing a non-nil stopCh can be used to stop watching. -func (s *Etcd) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*KVPair, error) { - // Get the current value - current, err := s.List(prefix) - if err != nil { - return nil, err - } - - // Start an etcd watch. - etcdWatchCh := make(chan *etcd.Response) - etcdStopCh := make(chan bool) - go s.client.Watch(normalize(prefix), 0, true, etcdWatchCh, etcdStopCh) - - // Adapter goroutine: The goal here is to convert wathever format etcd is - // using into our interface. - watchCh := make(chan []*KVPair) - go func() { - defer close(watchCh) - - // Push the current value through the channel. - watchCh <- current - - for { - select { - case <-etcdWatchCh: - // FIXME: We should probably use the value pushed by the channel. - // However, .Node.Nodes seems to be empty. - if list, err := s.List(prefix); err == nil { - watchCh <- list - } - case <-stopCh: - etcdStopCh <- true - return - } - } - }() - return watchCh, nil -} - -// AtomicPut put a value at "key" if the key has not been -// modified in the meantime, throws an error if this is the case -func (s *Etcd) AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) { - if previous == nil { - return false, nil, ErrPreviousNotSpecified - } - - meta, err := s.client.CompareAndSwap(normalize(key), string(value), 0, "", previous.LastIndex) - if err != nil { - if etcdError, ok := err.(*etcd.EtcdError); ok { - if etcdError.ErrorCode == 101 { // Compare failed - return false, nil, ErrKeyModified - } - } - return false, nil, err - } - return true, &KVPair{Key: key, Value: value, LastIndex: meta.Node.ModifiedIndex}, nil -} - -// AtomicDelete deletes a value at "key" if the key has not -// been modified in the meantime, throws an error if this is the case -func (s *Etcd) AtomicDelete(key string, previous *KVPair) (bool, error) { - if previous == nil { - return false, ErrPreviousNotSpecified - } - - _, err := s.client.CompareAndDelete(normalize(key), "", previous.LastIndex) - if err != nil { - if etcdError, ok := err.(*etcd.EtcdError); ok { - if etcdError.ErrorCode == 101 { // Compare failed - return false, ErrKeyModified - } - } - return false, err - } - return true, nil -} - -// List the content of a given prefix -func (s *Etcd) List(prefix string) ([]*KVPair, error) { - resp, err := s.client.Get(normalize(prefix), true, true) - if err != nil { - return nil, err - } - kv := []*KVPair{} - for _, n := range resp.Node.Nodes { - key := strings.TrimLeft(n.Key, "/") - kv = append(kv, &KVPair{key, []byte(n.Value), n.ModifiedIndex}) - } - return kv, nil -} - -// DeleteTree deletes a range of keys based on prefix -func (s *Etcd) DeleteTree(prefix string) error { - if _, err := s.client.Delete(normalize(prefix), true); err != nil { - return err - } - return nil -} - -// NewLock returns a handle to a lock struct which can be used to acquire and -// release the mutex. -func (s *Etcd) NewLock(key string, options *LockOptions) (Locker, error) { - var value string - ttl := uint64(time.Duration(defaultLockTTL).Seconds()) - - // Apply options - if options != nil { - if options.Value != nil { - value = string(options.Value) - } - if options.TTL != 0 { - ttl = uint64(options.TTL.Seconds()) - } - } - - // Create lock object - lock := &etcdLock{ - client: s.client, - key: key, - value: value, - ttl: ttl, - } - - return lock, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Returns a channel that is closed if our lock is lost or an error. -func (l *etcdLock) Lock() (<-chan struct{}, error) { - - key := normalize(l.key) - - // Lock holder channels - lockHeld := make(chan struct{}) - stopLocking := make(chan struct{}) - - var lastIndex uint64 - - for { - resp, err := l.client.Create(key, l.value, l.ttl) - if err != nil { - if etcdError, ok := err.(*etcd.EtcdError); ok { - // Key already exists - if etcdError.ErrorCode != 105 { - lastIndex = ^uint64(0) - } - } - } else { - lastIndex = resp.Node.ModifiedIndex - } - - _, err = l.client.CompareAndSwap(key, l.value, l.ttl, "", lastIndex) - - if err == nil { - // Leader section - l.stopLock = stopLocking - go l.holdLock(key, lockHeld, stopLocking) - break - } else { - // Seeker section - chW := make(chan *etcd.Response) - chWStop := make(chan bool) - l.waitLock(key, chW, chWStop) - - // Delete or Expire event occured - // Retry - } - } - - return lockHeld, nil -} - -// Hold the lock as long as we can -// Updates the key ttl periodically until we receive -// an explicit stop signal from the Unlock method -func (l *etcdLock) holdLock(key string, lockHeld chan struct{}, stopLocking chan struct{}) { - defer close(lockHeld) - - update := time.NewTicker(defaultUpdateTime) - defer update.Stop() - - var err error - - for { - select { - case <-update.C: - l.last, err = l.client.Update(key, l.value, l.ttl) - if err != nil { - return - } - - case <-stopLocking: - return - } - } -} - -// WaitLock simply waits for the key to be available for creation -func (l *etcdLock) waitLock(key string, eventCh chan *etcd.Response, stopWatchCh chan bool) { - go l.client.Watch(key, 0, false, eventCh, stopWatchCh) - for event := range eventCh { - if event.Action == "delete" || event.Action == "expire" { - return - } - } -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *etcdLock) Unlock() error { - if l.stopLock != nil { - l.stopLock <- struct{}{} - } - if l.last != nil { - _, err := l.client.CompareAndDelete(normalize(l.key), l.value, l.last.Node.ModifiedIndex) - if err != nil { - return err - } - } - return nil -} - -// Close closes the client connection -func (s *Etcd) Close() { - return -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd_test.go deleted file mode 100644 index da3bf5f4ad..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package store - -import ( - "testing" - "time" -) - -func makeEtcdClient(t *testing.T) Store { - client := "localhost:4001" - - kv, err := NewStore( - ETCD, - []string{client}, - &Config{ - ConnectionTimeout: 3 * time.Second, - EphemeralTTL: 2 * time.Second, - }, - ) - if err != nil { - t.Fatalf("cannot create store: %v", err) - } - - return kv -} - -func TestEtcdStore(t *testing.T) { - kv := makeEtcdClient(t) - - testStore(t, kv) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/helpers.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/helpers.go deleted file mode 100644 index a9386e31c1..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/helpers.go +++ /dev/null @@ -1,46 +0,0 @@ -package store - -import ( - "strings" -) - -// Creates a list of endpoints given the right scheme -func createEndpoints(addrs []string, scheme string) (entries []string) { - for _, addr := range addrs { - entries = append(entries, scheme+"://"+addr) - } - return entries -} - -// Normalize the key for each store to the form: -// -// /path/to/key -// -func normalize(key string) string { - return "/" + join(splitKey(key)) -} - -// Get the full directory part of the key to the form: -// -// /path/to/ -// -func getDirectory(key string) string { - parts := splitKey(key) - parts = parts[:len(parts)-1] - return "/" + join(parts) -} - -// SplitKey splits the key to extract path informations -func splitKey(key string) (path []string) { - if strings.Contains(key, "/") { - path = strings.Split(key, "/") - } else { - path = []string{key} - } - return path -} - -// Join the path parts with '/' -func join(parts []string) string { - return strings.Join(parts, "/") -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/mock.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/mock.go deleted file mode 100644 index e7c3396973..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/mock.go +++ /dev/null @@ -1,109 +0,0 @@ -package store - -import "github.com/stretchr/testify/mock" - -// Mock store. Mocks all Store functions using testify.Mock. -type Mock struct { - mock.Mock - - // Endpoints passed to InitializeMock - Endpoints []string - // Options passed to InitializeMock - Options *Config -} - -// InitializeMock creates a Mock store. -func InitializeMock(endpoints []string, options *Config) (Store, error) { - s := &Mock{} - s.Endpoints = endpoints - s.Options = options - return s, nil -} - -// Put mock -func (s *Mock) Put(key string, value []byte, opts *WriteOptions) error { - args := s.Mock.Called(key, value, opts) - return args.Error(0) -} - -// Get mock -func (s *Mock) Get(key string) (*KVPair, error) { - args := s.Mock.Called(key) - return args.Get(0).(*KVPair), args.Error(1) -} - -// Delete mock -func (s *Mock) Delete(key string) error { - args := s.Mock.Called(key) - return args.Error(0) -} - -// Exists mock -func (s *Mock) Exists(key string) (bool, error) { - args := s.Mock.Called(key) - return args.Bool(0), args.Error(1) -} - -// Watch mock -func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) { - args := s.Mock.Called(key, stopCh) - return args.Get(0).(<-chan *KVPair), args.Error(1) -} - -// WatchTree mock -func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*KVPair, error) { - args := s.Mock.Called(prefix, stopCh) - return args.Get(0).(chan []*KVPair), args.Error(1) -} - -// NewLock mock -func (s *Mock) NewLock(key string, options *LockOptions) (Locker, error) { - args := s.Mock.Called(key, options) - return args.Get(0).(Locker), args.Error(1) -} - -// List mock -func (s *Mock) List(prefix string) ([]*KVPair, error) { - args := s.Mock.Called(prefix) - return args.Get(0).([]*KVPair), args.Error(1) -} - -// DeleteTree mock -func (s *Mock) DeleteTree(prefix string) error { - args := s.Mock.Called(prefix) - return args.Error(0) -} - -// AtomicPut mock -func (s *Mock) AtomicPut(key string, value []byte, previous *KVPair, opts *WriteOptions) (bool, *KVPair, error) { - args := s.Mock.Called(key, value, previous, opts) - return args.Bool(0), args.Get(1).(*KVPair), args.Error(2) -} - -// AtomicDelete mock -func (s *Mock) AtomicDelete(key string, previous *KVPair) (bool, error) { - args := s.Mock.Called(key, previous) - return args.Bool(0), args.Error(1) -} - -// MockLock mock implementation of Locker -type MockLock struct { - mock.Mock -} - -// Lock mock -func (l *MockLock) Lock() (<-chan struct{}, error) { - args := l.Mock.Called() - return args.Get(0).(<-chan struct{}), args.Error(1) -} - -// Unlock mock -func (l *MockLock) Unlock() error { - args := l.Mock.Called() - return args.Error(0) -} - -// Close mock -func (s *Mock) Close() { - return -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store.go deleted file mode 100644 index 0a4813aa34..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store.go +++ /dev/null @@ -1,154 +0,0 @@ -package store - -import ( - "crypto/tls" - "errors" - "time" - - log "github.com/Sirupsen/logrus" -) - -// Backend represents a KV Store Backend -type Backend string - -const ( - // MOCK backend - MOCK Backend = "mock" - // CONSUL backend - CONSUL = "consul" - // ETCD backend - ETCD = "etcd" - // ZK backend - ZK = "zk" -) - -var ( - // ErrInvalidTTL is a specific error to consul - ErrInvalidTTL = errors.New("Invalid TTL, please change the value to the miminum allowed ttl for the chosen store") - // ErrNotSupported is exported - ErrNotSupported = errors.New("Backend storage not supported yet, please choose another one") - // ErrNotImplemented is exported - ErrNotImplemented = errors.New("Call not implemented in current backend") - // ErrNotReachable is exported - ErrNotReachable = errors.New("Api not reachable") - // ErrCannotLock is exported - ErrCannotLock = errors.New("Error acquiring the lock") - // ErrWatchDoesNotExist is exported - ErrWatchDoesNotExist = errors.New("No watch found for specified key") - // ErrKeyModified is exported - ErrKeyModified = errors.New("Unable to complete atomic operation, key modified") - // ErrKeyNotFound is exported - ErrKeyNotFound = errors.New("Key not found in store") - // ErrPreviousNotSpecified is exported - ErrPreviousNotSpecified = errors.New("Previous K/V pair should be provided for the Atomic operation") -) - -// Config contains the options for a storage client -type Config struct { - TLS *tls.Config - ConnectionTimeout time.Duration - EphemeralTTL time.Duration -} - -// Store represents the backend K/V storage -// Each store should support every call listed -// here. Or it couldn't be implemented as a K/V -// backend for libkv -type Store interface { - // Put a value at the specified key - Put(key string, value []byte, options *WriteOptions) error - - // Get a value given its key - Get(key string) (*KVPair, error) - - // Delete the value at the specified key - Delete(key string) error - - // Verify if a Key exists in the store - Exists(key string) (bool, error) - - // Watch changes on a key. - // Returns a channel that will receive changes or an error. - // Upon creating a watch, the current value will be sent to the channel. - // Providing a non-nil stopCh can be used to stop watching. - Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) - - // WatchTree watches changes on a "directory" - // Returns a channel that will receive changes or an error. - // Upon creating a watch, the current value will be sent to the channel. - // Providing a non-nil stopCh can be used to stop watching. - WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*KVPair, error) - - // CreateLock for a given key. - // The returned Locker is not held and must be acquired with `.Lock`. - // value is optional. - NewLock(key string, options *LockOptions) (Locker, error) - - // List the content of a given prefix - List(prefix string) ([]*KVPair, error) - - // DeleteTree deletes a range of keys based on prefix - DeleteTree(prefix string) error - - // Atomic operation on a single value - AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) - - // Atomic delete of a single value - AtomicDelete(key string, previous *KVPair) (bool, error) - - // Close the store connection - Close() -} - -// KVPair represents {Key, Value, Lastindex} tuple -type KVPair struct { - Key string - Value []byte - LastIndex uint64 -} - -// WriteOptions contains optional request parameters -type WriteOptions struct { - Heartbeat time.Duration - Ephemeral bool -} - -// LockOptions contains optional request parameters -type LockOptions struct { - Value []byte // Optional, value to associate with the lock - TTL time.Duration // Optional, expiration ttl associated with the lock -} - -// WatchCallback is used for watch methods on keys -// and is triggered on key change -type WatchCallback func(entries ...*KVPair) - -// Locker provides locking mechanism on top of the store. -// Similar to `sync.Lock` except it may return errors. -type Locker interface { - Lock() (<-chan struct{}, error) - Unlock() error -} - -// Initialize creates a new Store object, initializing the client -type Initialize func(addrs []string, options *Config) (Store, error) - -var ( - // Backend initializers - initializers = map[Backend]Initialize{ - MOCK: InitializeMock, - CONSUL: InitializeConsul, - ETCD: InitializeEtcd, - ZK: InitializeZookeeper, - } -) - -// NewStore creates a an instance of store -func NewStore(backend Backend, addrs []string, options *Config) (Store, error) { - if init, exists := initializers[backend]; exists { - log.WithFields(log.Fields{"backend": backend}).Debug("Initializing store service") - return init(addrs, options) - } - - return nil, ErrNotSupported -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store_test.go deleted file mode 100644 index 89c982911b..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store_test.go +++ /dev/null @@ -1,401 +0,0 @@ -package store - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func testStore(t *testing.T, kv Store) { - testPutGetDelete(t, kv) - testWatch(t, kv) - testWatchTree(t, kv) - testAtomicPut(t, kv) - testAtomicDelete(t, kv) - testLockUnlock(t, kv) - testPutEphemeral(t, kv) - testList(t, kv) - testDeleteTree(t, kv) -} - -func testPutGetDelete(t *testing.T, kv Store) { - key := "foo" - value := []byte("bar") - - // Put the key - err := kv.Put(key, value, nil) - assert.NoError(t, err) - - // Get should return the value and an incremented index - pair, err := kv.Get(key) - assert.NoError(t, err) - if assert.NotNil(t, pair) { - assert.NotNil(t, pair.Value) - } - assert.Equal(t, pair.Value, value) - assert.NotEqual(t, pair.LastIndex, 0) - - // Delete the key - err = kv.Delete(key) - assert.NoError(t, err) - - // Get should fail - pair, err = kv.Get(key) - assert.Error(t, err) - assert.Nil(t, pair) -} - -func testWatch(t *testing.T, kv Store) { - key := "hello" - value := []byte("world") - newValue := []byte("world!") - - // Put the key - err := kv.Put(key, value, nil) - assert.NoError(t, err) - - stopCh := make(<-chan struct{}) - events, err := kv.Watch(key, stopCh) - assert.NoError(t, err) - assert.NotNil(t, events) - - // Update loop - go func() { - timeout := time.After(1 * time.Second) - tick := time.Tick(250 * time.Millisecond) - for { - select { - case <-timeout: - return - case <-tick: - err := kv.Put(key, newValue, nil) - if assert.NoError(t, err) { - continue - } - return - } - } - }() - - // Check for updates - timeout := time.After(2 * time.Second) - eventCount := 1 - for { - select { - case event := <-events: - assert.NotNil(t, event) - if eventCount == 1 { - assert.Equal(t, event.Key, key) - assert.Equal(t, event.Value, value) - } else { - assert.Equal(t, event.Key, key) - assert.Equal(t, event.Value, newValue) - } - eventCount++ - // We received all the events we wanted to check - if eventCount >= 4 { - return - } - case <-timeout: - t.Fatal("Timeout reached") - return - } - } -} - -func testWatchTree(t *testing.T, kv Store) { - dir := "tree" - - node1 := "tree/node1" - value1 := []byte("node1") - - node2 := "tree/node2" - value2 := []byte("node2") - - node3 := "tree/node3" - value3 := []byte("node3") - - err := kv.Put(node1, value1, nil) - assert.NoError(t, err) - err = kv.Put(node2, value2, nil) - assert.NoError(t, err) - err = kv.Put(node3, value3, nil) - assert.NoError(t, err) - - stopCh := make(<-chan struct{}) - events, err := kv.WatchTree(dir, stopCh) - assert.NoError(t, err) - assert.NotNil(t, events) - - // Update loop - go func() { - timeout := time.After(250 * time.Millisecond) - for { - select { - case <-timeout: - err := kv.Delete(node3) - assert.NoError(t, err) - return - } - } - }() - - // Check for updates - timeout := time.After(4 * time.Second) - for { - select { - case event := <-events: - assert.NotNil(t, event) - // We received the Delete event on a child node - // Exit test successfully - if len(event) == 2 { - return - } - case <-timeout: - t.Fatal("Timeout reached") - return - } - } -} - -func testAtomicPut(t *testing.T, kv Store) { - key := "hello" - value := []byte("world") - - // Put the key - err := kv.Put(key, value, nil) - assert.NoError(t, err) - - // Get should return the value and an incremented index - pair, err := kv.Get(key) - assert.NoError(t, err) - if assert.NotNil(t, pair) { - assert.NotNil(t, pair.Value) - } - assert.Equal(t, pair.Value, value) - assert.NotEqual(t, pair.LastIndex, 0) - - // This CAS should succeed - success, _, err := kv.AtomicPut("hello", []byte("WORLD"), pair, nil) - assert.NoError(t, err) - assert.True(t, success) - - // This CAS should fail - pair.LastIndex = 0 - success, _, err = kv.AtomicPut("hello", []byte("WORLDWORLD"), pair, nil) - assert.Error(t, err) - assert.False(t, success) -} - -func testAtomicDelete(t *testing.T, kv Store) { - key := "atomic" - value := []byte("world") - - // Put the key - err := kv.Put(key, value, nil) - assert.NoError(t, err) - - // Get should return the value and an incremented index - pair, err := kv.Get(key) - assert.NoError(t, err) - if assert.NotNil(t, pair) { - assert.NotNil(t, pair.Value) - } - assert.Equal(t, pair.Value, value) - assert.NotEqual(t, pair.LastIndex, 0) - - tempIndex := pair.LastIndex - - // AtomicDelete should fail - pair.LastIndex = 0 - success, err := kv.AtomicDelete(key, pair) - assert.Error(t, err) - assert.False(t, success) - - // AtomicDelete should succeed - pair.LastIndex = tempIndex - success, err = kv.AtomicDelete(key, pair) - assert.NoError(t, err) - assert.True(t, success) -} - -func testLockUnlock(t *testing.T, kv Store) { - t.Parallel() - - key := "foo" - value := []byte("bar") - - // We should be able to create a new lock on key - lock, err := kv.NewLock(key, &LockOptions{Value: value}) - assert.NoError(t, err) - assert.NotNil(t, lock) - - // Lock should successfully succeed or block - lockChan, err := lock.Lock() - assert.NoError(t, err) - assert.NotNil(t, lockChan) - - // Get should work - pair, err := kv.Get(key) - assert.NoError(t, err) - if assert.NotNil(t, pair) { - assert.NotNil(t, pair.Value) - } - assert.Equal(t, pair.Value, value) - assert.NotEqual(t, pair.LastIndex, 0) - - // Unlock should succeed - err = lock.Unlock() - assert.NoError(t, err) - - // Get should work - pair, err = kv.Get(key) - assert.NoError(t, err) - if assert.NotNil(t, pair) { - assert.NotNil(t, pair.Value) - } - assert.Equal(t, pair.Value, value) - assert.NotEqual(t, pair.LastIndex, 0) -} - -// FIXME Gracefully handle Zookeeper -func testPutEphemeral(t *testing.T, kv Store) { - // Zookeeper: initialize client here (Close() hangs otherwise) - zookeeper := false - if _, ok := kv.(*Zookeeper); ok { - zookeeper = true - kv = makeZkClient(t) - } - - firstKey := "first" - firstValue := []byte("foo") - - secondKey := "second" - secondValue := []byte("bar") - - // Put the first key with the Ephemeral flag - err := kv.Put(firstKey, firstValue, &WriteOptions{Ephemeral: true}) - assert.NoError(t, err) - - // Put a second key with the Ephemeral flag - err = kv.Put(secondKey, secondValue, &WriteOptions{Ephemeral: true}) - assert.NoError(t, err) - - // Get on firstKey should work - pair, err := kv.Get(firstKey) - assert.NoError(t, err) - assert.NotNil(t, pair) - - // Get on secondKey should work - pair, err = kv.Get(secondKey) - assert.NoError(t, err) - assert.NotNil(t, pair) - - // Zookeeper: close client connection - if zookeeper { - kv.Close() - } - - // Let the session expire - time.Sleep(5 * time.Second) - - // Zookeeper: re-create the client - if zookeeper { - kv = makeZkClient(t) - } - - // Get on firstKey shouldn't work - pair, err = kv.Get(firstKey) - assert.Error(t, err) - assert.Nil(t, pair) - - // Get on secondKey shouldn't work - pair, err = kv.Get(secondKey) - assert.Error(t, err) - assert.Nil(t, pair) -} - -func testList(t *testing.T, kv Store) { - prefix := "nodes" - - firstKey := "nodes/first" - firstValue := []byte("first") - - secondKey := "nodes/second" - secondValue := []byte("second") - - // Put the first key - err := kv.Put(firstKey, firstValue, nil) - assert.NoError(t, err) - - // Put the second key - err = kv.Put(secondKey, secondValue, nil) - assert.NoError(t, err) - - // List should work and return the two correct values - pairs, err := kv.List(prefix) - assert.NoError(t, err) - if assert.NotNil(t, pairs) { - assert.Equal(t, len(pairs), 2) - } - - // Check pairs, those are not necessarily in Put order - for _, pair := range pairs { - if pair.Key == firstKey { - assert.Equal(t, pair.Value, firstValue) - } - if pair.Key == secondKey { - assert.Equal(t, pair.Value, secondValue) - } - } -} - -func testDeleteTree(t *testing.T, kv Store) { - prefix := "nodes" - - firstKey := "nodes/first" - firstValue := []byte("first") - - secondKey := "nodes/second" - secondValue := []byte("second") - - // Put the first key - err := kv.Put(firstKey, firstValue, nil) - assert.NoError(t, err) - - // Put the second key - err = kv.Put(secondKey, secondValue, nil) - assert.NoError(t, err) - - // Get should work on the first Key - pair, err := kv.Get(firstKey) - assert.NoError(t, err) - if assert.NotNil(t, pair) { - assert.NotNil(t, pair.Value) - } - assert.Equal(t, pair.Value, firstValue) - assert.NotEqual(t, pair.LastIndex, 0) - - // Get should work on the second Key - pair, err = kv.Get(secondKey) - assert.NoError(t, err) - if assert.NotNil(t, pair) { - assert.NotNil(t, pair.Value) - } - assert.Equal(t, pair.Value, secondValue) - assert.NotEqual(t, pair.LastIndex, 0) - - // Delete Values under directory `nodes` - err = kv.DeleteTree(prefix) - assert.NoError(t, err) - - // Get should fail on both keys - pair, err = kv.Get(firstKey) - assert.Error(t, err) - assert.Nil(t, pair) - - pair, err = kv.Get(secondKey) - assert.Error(t, err) - assert.Nil(t, pair) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper.go deleted file mode 100644 index 355f513332..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper.go +++ /dev/null @@ -1,311 +0,0 @@ -package store - -import ( - "strings" - "time" - - log "github.com/Sirupsen/logrus" - zk "github.com/samuel/go-zookeeper/zk" -) - -const defaultTimeout = 10 * time.Second - -// Zookeeper embeds the zookeeper client -type Zookeeper struct { - timeout time.Duration - client *zk.Conn -} - -type zookeeperLock struct { - client *zk.Conn - lock *zk.Lock - key string - value []byte -} - -// InitializeZookeeper creates a new Zookeeper client -// given a list of endpoints and optional tls config -func InitializeZookeeper(endpoints []string, options *Config) (Store, error) { - s := &Zookeeper{} - s.timeout = defaultTimeout - - // Set options - if options != nil { - if options.ConnectionTimeout != 0 { - s.setTimeout(options.ConnectionTimeout) - } - } - - conn, _, err := zk.Connect(endpoints, s.timeout) - if err != nil { - log.Error(err) - return nil, err - } - s.client = conn - return s, nil -} - -// SetTimeout sets the timout for connecting to Zookeeper -func (s *Zookeeper) setTimeout(time time.Duration) { - s.timeout = time -} - -// Get the value at "key", returns the last modified index -// to use in conjunction to CAS calls -func (s *Zookeeper) Get(key string) (*KVPair, error) { - resp, meta, err := s.client.Get(normalize(key)) - if err != nil { - return nil, err - } - if resp == nil { - return nil, ErrKeyNotFound - } - return &KVPair{key, resp, uint64(meta.Version)}, nil -} - -// Create the entire path for a directory that does not exist -func (s *Zookeeper) createFullpath(path []string, ephemeral bool) error { - for i := 1; i <= len(path); i++ { - newpath := "/" + strings.Join(path[:i], "/") - if i == len(path) && ephemeral { - _, err := s.client.Create(newpath, []byte{1}, zk.FlagEphemeral, zk.WorldACL(zk.PermAll)) - return err - } - _, err := s.client.Create(newpath, []byte{1}, 0, zk.WorldACL(zk.PermAll)) - if err != nil { - // Skip if node already exists - if err != zk.ErrNodeExists { - return err - } - } - } - return nil -} - -// Put a value at "key" -func (s *Zookeeper) Put(key string, value []byte, opts *WriteOptions) error { - fkey := normalize(key) - exists, err := s.Exists(key) - if err != nil { - return err - } - if !exists { - if opts != nil && opts.Ephemeral { - s.createFullpath(splitKey(key), opts.Ephemeral) - } else { - s.createFullpath(splitKey(key), false) - } - } - _, err = s.client.Set(fkey, value, -1) - return err -} - -// Delete a value at "key" -func (s *Zookeeper) Delete(key string) error { - err := s.client.Delete(normalize(key), -1) - return err -} - -// Exists checks if the key exists inside the store -func (s *Zookeeper) Exists(key string) (bool, error) { - exists, _, err := s.client.Exists(normalize(key)) - if err != nil { - return false, err - } - return exists, nil -} - -// Watch changes on a key. -// Returns a channel that will receive changes or an error. -// Upon creating a watch, the current value will be sent to the channel. -// Providing a non-nil stopCh can be used to stop watching. -func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) { - fkey := normalize(key) - pair, err := s.Get(key) - if err != nil { - return nil, err - } - - // Catch zk notifications and fire changes into the channel. - watchCh := make(chan *KVPair) - go func() { - defer close(watchCh) - - // Get returns the current value before setting the watch. - watchCh <- pair - for { - _, _, eventCh, err := s.client.GetW(fkey) - if err != nil { - return - } - select { - case e := <-eventCh: - if e.Type == zk.EventNodeDataChanged { - if entry, err := s.Get(key); err == nil { - watchCh <- entry - } - } - case <-stopCh: - // There is no way to stop GetW so just quit - return - } - } - }() - - return watchCh, nil -} - -// WatchTree watches changes on a "directory" -// Returns a channel that will receive changes or an error. -// Upon creating a watch, the current value will be sent to the channel. -// Providing a non-nil stopCh can be used to stop watching. -func (s *Zookeeper) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*KVPair, error) { - fprefix := normalize(prefix) - entries, err := s.List(prefix) - if err != nil { - return nil, err - } - - // Catch zk notifications and fire changes into the channel. - watchCh := make(chan []*KVPair) - go func() { - defer close(watchCh) - - // List returns the current values before setting the watch. - watchCh <- entries - - for { - _, _, eventCh, err := s.client.ChildrenW(fprefix) - if err != nil { - return - } - select { - case e := <-eventCh: - if e.Type == zk.EventNodeChildrenChanged { - if kv, err := s.List(prefix); err == nil { - watchCh <- kv - } - } - case <-stopCh: - // There is no way to stop GetW so just quit - return - } - } - }() - - return watchCh, nil -} - -// List the content of a given prefix -func (s *Zookeeper) List(prefix string) ([]*KVPair, error) { - keys, stat, err := s.client.Children(normalize(prefix)) - if err != nil { - return nil, err - } - kv := []*KVPair{} - for _, key := range keys { - // FIXME Costly Get request for each child key.. - pair, err := s.Get(prefix + normalize(key)) - if err != nil { - return nil, err - } - kv = append(kv, &KVPair{key, []byte(pair.Value), uint64(stat.Version)}) - } - return kv, nil -} - -// DeleteTree deletes a range of keys based on prefix -func (s *Zookeeper) DeleteTree(prefix string) error { - pairs, err := s.List(prefix) - if err != nil { - return err - } - var reqs []interface{} - for _, pair := range pairs { - reqs = append(reqs, &zk.DeleteRequest{ - Path: normalize(prefix + "/" + pair.Key), - Version: -1, - }) - } - _, err = s.client.Multi(reqs...) - return err -} - -// AtomicPut put a value at "key" if the key has not been -// modified in the meantime, throws an error if this is the case -func (s *Zookeeper) AtomicPut(key string, value []byte, previous *KVPair, _ *WriteOptions) (bool, *KVPair, error) { - if previous == nil { - return false, nil, ErrPreviousNotSpecified - } - - meta, err := s.client.Set(normalize(key), value, int32(previous.LastIndex)) - if err != nil { - if err == zk.ErrBadVersion { - return false, nil, ErrKeyModified - } - return false, nil, err - } - return true, &KVPair{Key: key, Value: value, LastIndex: uint64(meta.Version)}, nil -} - -// AtomicDelete deletes a value at "key" if the key has not -// been modified in the meantime, throws an error if this is the case -func (s *Zookeeper) AtomicDelete(key string, previous *KVPair) (bool, error) { - if previous == nil { - return false, ErrPreviousNotSpecified - } - - err := s.client.Delete(normalize(key), int32(previous.LastIndex)) - if err != nil { - if err == zk.ErrBadVersion { - return false, ErrKeyModified - } - return false, err - } - return true, nil -} - -// NewLock returns a handle to a lock struct which can be used to acquire and -// release the mutex. -func (s *Zookeeper) NewLock(key string, options *LockOptions) (Locker, error) { - value := []byte("") - - // Apply options - if options != nil { - if options.Value != nil { - value = options.Value - } - } - - return &zookeeperLock{ - client: s.client, - key: normalize(key), - value: value, - lock: zk.NewLock(s.client, normalize(key), zk.WorldACL(zk.PermAll)), - }, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Returns a channel that is closed if our lock is lost or an error. -func (l *zookeeperLock) Lock() (<-chan struct{}, error) { - err := l.lock.Lock() - - if err == nil { - // We hold the lock, we can set our value - // FIXME: When the last leader leaves the election, this value will be left behind - _, err = l.client.Set(l.key, l.value, -1) - } - - return make(chan struct{}), err -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *zookeeperLock) Unlock() error { - return l.lock.Unlock() -} - -// Close closes the client connection -func (s *Zookeeper) Close() { - s.client.Close() -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper_test.go b/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper_test.go deleted file mode 100644 index f5e4fec0db..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package store - -import ( - "testing" - "time" -) - -func makeZkClient(t *testing.T) Store { - client := "localhost:2181" - - kv, err := NewStore( - ZK, - []string{client}, - &Config{ - ConnectionTimeout: 3 * time.Second, - EphemeralTTL: 2 * time.Second, - }, - ) - if err != nil { - t.Fatalf("cannot create store: %v", err) - } - - return kv -} - -func TestZkStore(t *testing.T) { - kv := makeZkClient(t) - - testStore(t, kv) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/conn.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/conn.go index a4f5394010..9aa2e12809 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/conn.go +++ b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/conn.go @@ -16,6 +16,7 @@ var ( systemBusLck sync.Mutex sessionBus *Conn sessionBusLck sync.Mutex + sessionEnvLck sync.Mutex ) // ErrClosed is the error returned by calls on a closed connection. @@ -46,7 +47,7 @@ type Conn struct { calls map[uint32]*Call callsLck sync.RWMutex - handlers map[ObjectPath]map[string]exportWithMapping + handlers map[ObjectPath]map[string]exportedObj handlersLck sync.RWMutex out chan *Message @@ -91,6 +92,8 @@ func SessionBus() (conn *Conn, err error) { // SessionBusPrivate returns a new private connection to the session bus. func SessionBusPrivate() (*Conn, error) { + sessionEnvLck.Lock() + defer sessionEnvLck.Unlock() address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") if address != "" && address != "autolaunch:" { return Dial(address) @@ -157,7 +160,7 @@ func newConn(tr transport) (*Conn, error) { conn.transport = tr conn.calls = make(map[uint32]*Call) conn.out = make(chan *Message, 10) - conn.handlers = make(map[ObjectPath]map[string]exportWithMapping) + conn.handlers = make(map[ObjectPath]map[string]exportedObj) conn.nextSerial = 1 conn.serialUsed = map[uint32]bool{0: true} conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") @@ -499,9 +502,7 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { // The caller has to make sure that ch is sufficiently buffered; if a message // arrives when a write to c is not possible, it is discarded. // -// Multiple of these channels can be registered at the same time. Passing a -// channel that already is registered will remove it from the list of the -// registered channels. +// Multiple of these channels can be registered at the same time. // // These channels are "overwritten" by Eavesdrop; i.e., if there currently is a // channel for eavesdropped messages, this channel receives all signals, and @@ -512,6 +513,19 @@ func (conn *Conn) Signal(ch chan<- *Signal) { conn.signalsLck.Unlock() } +// RemoveSignal removes the given channel from the list of the registered channels. +func (conn *Conn) RemoveSignal(ch chan<- *Signal) { + conn.signalsLck.Lock() + for i := len(conn.signals) - 1; i >= 0; i-- { + if ch == conn.signals[i] { + copy(conn.signals[i:], conn.signals[i+1:]) + conn.signals[len(conn.signals)-1] = nil + conn.signals = conn.signals[:len(conn.signals)-1] + } + } + conn.signalsLck.Unlock() +} + // SupportsUnixFDs returns whether the underlying transport supports passing of // unix file descriptors. If this is false, method calls containing unix file // descriptors will return an error and emitted signals containing them will @@ -610,16 +624,11 @@ func dereferenceAll(vs []interface{}) []interface{} { // getKey gets a key from a the list of keys. Returns "" on error / not found... func getKey(s, key string) string { - i := strings.Index(s, key) - if i == -1 { - return "" + for _, keyEqualsValue := range strings.Split(s, ",") { + keyValue := strings.SplitN(keyEqualsValue, "=", 2) + if len(keyValue) == 2 && keyValue[0] == key { + return keyValue[1] + } } - if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' { - return "" - } - j := strings.Index(s, ",") - if j == -1 { - j = len(s) - } - return s[i+len(key)+1 : j] + return "" } diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go index f74b8758d4..289e8c5d2b 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go +++ b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go @@ -5,6 +5,7 @@ package dbus import ( "bytes" "errors" + "os" "os/exec" ) @@ -23,5 +24,8 @@ func sessionBusPlatform() (*Conn, error) { return nil, errors.New("dbus: couldn't determine address of session bus") } - return Dial(string(b[i+1 : j])) + env, addr := string(b[0:i]), string(b[i+1:j]) + os.Setenv(env, addr) + + return Dial(addr) } diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/export.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/export.go index c6440a7416..6c33522026 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/export.go +++ b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/export.go @@ -1,6 +1,7 @@ package dbus import ( + "bytes" "errors" "fmt" "reflect" @@ -22,67 +23,60 @@ var ( } ) -// exportWithMapping represents an exported struct along with a method name -// mapping to allow for exporting lower-case methods, etc. -type exportWithMapping struct { - export interface{} - - // Method name mapping; key -> struct method, value -> dbus method. - mapping map[string]string +// exportedObj represents an exported object. It stores a precomputed +// method table that represents the methods exported on the bus. +type exportedObj struct { + methods map[string]reflect.Value // Whether or not this export is for the entire subtree includeSubtree bool } +func (obj exportedObj) Method(name string) (reflect.Value, bool) { + out, exists := obj.methods[name] + return out, exists +} + // Sender is a type which can be used in exported methods to receive the message // sender. type Sender string -func exportedMethod(export exportWithMapping, name string) reflect.Value { - if export.export == nil { - return reflect.Value{} +func computeMethodName(name string, mapping map[string]string) string { + newname, ok := mapping[name] + if ok { + name = newname } + return name +} - // If a mapping was included in the export, check the map to see if we - // should be looking for a different method in the export. - if export.mapping != nil { - for key, value := range export.mapping { - if value == name { - name = key - break - } - - // Catch the case where a method is aliased but the client is calling - // the original, e.g. the "Foo" method was exported mapped to - // "foo," and dbus client called the original "Foo." - if key == name { - return reflect.Value{} - } +func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Value { + if in == nil { + return nil + } + methods := make(map[string]reflect.Value) + val := reflect.ValueOf(in) + typ := val.Type() + for i := 0; i < typ.NumMethod(); i++ { + methtype := typ.Method(i) + method := val.Method(i) + t := method.Type() + // only track valid methods must return *Error as last arg + // and must be exported + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) || + methtype.PkgPath != "" { + continue } + // map names while building table + methods[computeMethodName(methtype.Name, mapping)] = method } - - value := reflect.ValueOf(export.export) - m := value.MethodByName(name) - - // Catch the case of attempting to call an unexported method - method, ok := value.Type().MethodByName(name) - - if !m.IsValid() || !ok || method.PkgPath != "" { - return reflect.Value{} - } - t := m.Type() - if t.NumOut() == 0 || - t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) { - - return reflect.Value{} - } - return m + return methods } // searchHandlers will look through all registered handlers looking for one // to handle the given path. If a verbatim one isn't found, it will check for // a subtree registration for the path as well. -func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, bool) { +func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportedObj, bool) { conn.handlersLck.RLock() defer conn.handlersLck.RUnlock() @@ -93,10 +87,10 @@ func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, // If handlers weren't found for this exact path, look for a matching subtree // registration - handlers = make(map[string]exportWithMapping) + handlers = make(map[string]exportedObj) path = path[:strings.LastIndex(string(path), "/")] for len(path) > 0 { - var subtreeHandlers map[string]exportWithMapping + var subtreeHandlers map[string]exportedObj subtreeHandlers, ok = conn.handlers[path] if ok { for iface, handler := range subtreeHandlers { @@ -133,6 +127,28 @@ func (conn *Conn) handleCall(msg *Message) { conn.sendError(errmsgUnknownMethod, sender, serial) } return + } else if ifaceName == "org.freedesktop.DBus.Introspectable" && name == "Introspect" { + if _, ok := conn.handlers[path]; !ok { + subpath := make(map[string]struct{}) + var xml bytes.Buffer + xml.WriteString("") + for h, _ := range conn.handlers { + p := string(path) + if p != "/" { + p += "/" + } + if strings.HasPrefix(string(h), p) { + node_name := strings.Split(string(h[len(p):]), "/")[0] + subpath[node_name] = struct{}{} + } + } + for s, _ := range subpath { + xml.WriteString("\n\t") + } + xml.WriteString("\n") + conn.sendReply(sender, serial, xml.String()) + return + } } if len(name) == 0 { conn.sendError(errmsgUnknownMethod, sender, serial) @@ -146,19 +162,20 @@ func (conn *Conn) handleCall(msg *Message) { } var m reflect.Value + var exists bool if hasIface { iface := handlers[ifaceName] - m = exportedMethod(iface, name) + m, exists = iface.Method(name) } else { for _, v := range handlers { - m = exportedMethod(v, name) - if m.IsValid() { + m, exists = v.Method(name) + if exists { break } } } - if !m.IsValid() { + if !exists { conn.sendError(errmsgUnknownMethod, sender, serial) return } @@ -303,7 +320,7 @@ func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { // The keys in the map are the real method names (exported on the struct), and // the values are the method names to be exported on DBus. func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { - return conn.exportWithMap(v, mapping, path, iface, false) + return conn.export(getMethods(v, mapping), path, iface, false) } // ExportSubtree works exactly like Export but registers the given value for @@ -326,11 +343,48 @@ func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) er // The keys in the map are the real method names (exported on the struct), and // the values are the method names to be exported on DBus. func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { - return conn.exportWithMap(v, mapping, path, iface, true) + return conn.export(getMethods(v, mapping), path, iface, true) +} + +// ExportMethodTable like Export registers the given methods as an object +// on the message bus. Unlike Export the it uses a method table to define +// the object instead of a native go object. +// +// The method table is a map from method name to function closure +// representing the method. This allows an object exported on the bus to not +// necessarily be a native go object. It can be useful for generating exposed +// methods on the fly. +// +// Any non-function objects in the method table are ignored. +func (conn *Conn) ExportMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error { + return conn.exportMethodTable(methods, path, iface, false) +} + +// Like ExportSubtree, but with the same caveats as ExportMethodTable. +func (conn *Conn) ExportSubtreeMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error { + return conn.exportMethodTable(methods, path, iface, true) +} + +func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectPath, iface string, includeSubtree bool) error { + out := make(map[string]reflect.Value) + for name, method := range methods { + rval := reflect.ValueOf(method) + if rval.Kind() != reflect.Func { + continue + } + t := rval.Type() + // only track valid methods must return *Error as last arg + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) { + continue + } + out[name] = rval + } + return conn.export(out, path, iface, includeSubtree) } // exportWithMap is the worker function for all exports/registrations. -func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string, includeSubtree bool) error { +func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error { if !path.IsValid() { return fmt.Errorf(`dbus: Invalid path name: "%s"`, path) } @@ -339,7 +393,7 @@ func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path O defer conn.handlersLck.Unlock() // Remove a previous export if the interface is nil - if v == nil { + if methods == nil { if _, ok := conn.handlers[path]; ok { delete(conn.handlers[path], iface) if len(conn.handlers[path]) == 0 { @@ -353,11 +407,14 @@ func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path O // If this is the first handler for this path, make a new map to hold all // handlers for this path. if _, ok := conn.handlers[path]; !ok { - conn.handlers[path] = make(map[string]exportWithMapping) + conn.handlers[path] = make(map[string]exportedObj) } // Finally, save this handler - conn.handlers[path][iface] = exportWithMapping{export: v, mapping: mapping, includeSubtree: includeSubtree} + conn.handlers[path][iface] = exportedObj{ + methods: methods, + includeSubtree: includeSubtree, + } return nil } diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go deleted file mode 100644 index 790a23ec24..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go +++ /dev/null @@ -1,27 +0,0 @@ -package introspect - -import ( - "encoding/xml" - "github.com/godbus/dbus" - "strings" -) - -// Call calls org.freedesktop.Introspectable.Introspect on a remote object -// and returns the introspection data. -func Call(o dbus.BusObject) (*Node, error) { - var xmldata string - var node Node - - err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata) - if err != nil { - return nil, err - } - err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node) - if err != nil { - return nil, err - } - if node.Name == "" { - node.Name = string(o.Path()) - } - return &node, nil -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go deleted file mode 100644 index b06c3f1cf2..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go +++ /dev/null @@ -1,86 +0,0 @@ -// Package introspect provides some utilities for dealing with the DBus -// introspection format. -package introspect - -import "encoding/xml" - -// The introspection data for the org.freedesktop.DBus.Introspectable interface. -var IntrospectData = Interface{ - Name: "org.freedesktop.DBus.Introspectable", - Methods: []Method{ - { - Name: "Introspect", - Args: []Arg{ - {"out", "s", "out"}, - }, - }, - }, -} - -// XML document type declaration of the introspection format version 1.0 -const IntrospectDeclarationString = ` - -` - -// The introspection data for the org.freedesktop.DBus.Introspectable interface, -// as a string. -const IntrospectDataString = ` - - - - - -` - -// Node is the root element of an introspection. -type Node struct { - XMLName xml.Name `xml:"node"` - Name string `xml:"name,attr,omitempty"` - Interfaces []Interface `xml:"interface"` - Children []Node `xml:"node,omitempty"` -} - -// Interface describes a DBus interface that is available on the message bus. -type Interface struct { - Name string `xml:"name,attr"` - Methods []Method `xml:"method"` - Signals []Signal `xml:"signal"` - Properties []Property `xml:"property"` - Annotations []Annotation `xml:"annotation"` -} - -// Method describes a Method on an Interface as retured by an introspection. -type Method struct { - Name string `xml:"name,attr"` - Args []Arg `xml:"arg"` - Annotations []Annotation `xml:"annotation"` -} - -// Signal describes a Signal emitted on an Interface. -type Signal struct { - Name string `xml:"name,attr"` - Args []Arg `xml:"arg"` - Annotations []Annotation `xml:"annotation"` -} - -// Property describes a property of an Interface. -type Property struct { - Name string `xml:"name,attr"` - Type string `xml:"type,attr"` - Access string `xml:"access,attr"` - Annotations []Annotation `xml:"annotation"` -} - -// Arg represents an argument of a method or a signal. -type Arg struct { - Name string `xml:"name,attr,omitempty"` - Type string `xml:"type,attr"` - Direction string `xml:"direction,attr,omitempty"` -} - -// Annotation is an annotation in the introspection format. -type Annotation struct { - Name string `xml:"name,attr"` - Value string `xml:"value,attr"` -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go deleted file mode 100644 index 2f16690b99..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go +++ /dev/null @@ -1,76 +0,0 @@ -package introspect - -import ( - "encoding/xml" - "github.com/godbus/dbus" - "reflect" - "strings" -) - -// Introspectable implements org.freedesktop.Introspectable. -// -// You can create it by converting the XML-formatted introspection data from a -// string to an Introspectable or call NewIntrospectable with a Node. Then, -// export it as org.freedesktop.Introspectable on you object. -type Introspectable string - -// NewIntrospectable returns an Introspectable that returns the introspection -// data that corresponds to the given Node. If n.Interfaces doesn't contain the -// data for org.freedesktop.DBus.Introspectable, it is added automatically. -func NewIntrospectable(n *Node) Introspectable { - found := false - for _, v := range n.Interfaces { - if v.Name == "org.freedesktop.DBus.Introspectable" { - found = true - break - } - } - if !found { - n.Interfaces = append(n.Interfaces, IntrospectData) - } - b, err := xml.Marshal(n) - if err != nil { - panic(err) - } - return Introspectable(strings.TrimSpace(IntrospectDeclarationString) + string(b)) -} - -// Introspect implements org.freedesktop.Introspectable.Introspect. -func (i Introspectable) Introspect() (string, *dbus.Error) { - return string(i), nil -} - -// Methods returns the description of the methods of v. This can be used to -// create a Node which can be passed to NewIntrospectable. -func Methods(v interface{}) []Method { - t := reflect.TypeOf(v) - ms := make([]Method, 0, t.NumMethod()) - for i := 0; i < t.NumMethod(); i++ { - if t.Method(i).PkgPath != "" { - continue - } - mt := t.Method(i).Type - if mt.NumOut() == 0 || - mt.Out(mt.NumOut()-1) != reflect.TypeOf(&dbus.Error{}) { - - continue - } - var m Method - m.Name = t.Method(i).Name - m.Args = make([]Arg, 0, mt.NumIn()+mt.NumOut()-2) - for j := 1; j < mt.NumIn(); j++ { - if mt.In(j) != reflect.TypeOf((*dbus.Sender)(nil)).Elem() && - mt.In(j) != reflect.TypeOf((*dbus.Message)(nil)).Elem() { - arg := Arg{"", dbus.SignatureOfType(mt.In(j)).String(), "in"} - m.Args = append(m.Args, arg) - } - } - for j := 0; j < mt.NumOut()-1; j++ { - arg := Arg{"", dbus.SignatureOfType(mt.Out(j)).String(), "out"} - m.Args = append(m.Args, arg) - } - m.Annotations = make([]Annotation, 0) - ms = append(ms, m) - } - return ms -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/message.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/message.go index 075d6e38ba..6a925367eb 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/message.go +++ b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/message.go @@ -22,6 +22,13 @@ const ( // FlagNoAutoStart signals that the message bus should not automatically // start an application when handling this message. FlagNoAutoStart + // FlagAllowInteractiveAuthorization may be set on a method call + // message to inform the receiving side that the caller is prepared + // to wait for interactive authorization, which might take a + // considerable time to complete. For instance, if this flag is set, + // it would be appropriate to query the user for passwords or + // confirmation via Polkit or a similar framework. + FlagAllowInteractiveAuthorization ) // Type represents the possible types of a D-Bus message. @@ -248,7 +255,7 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { // IsValid checks whether msg is a valid message and returns an // InvalidMessageError if it is not. func (msg *Message) IsValid() error { - if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 { + if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 { return InvalidMessageError("invalid flags") } if msg.Type == 0 || msg.Type >= typeMax { diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/object.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/object.go index 7ef45da4c8..9573b7095a 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/object.go +++ b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/object.go @@ -27,6 +27,16 @@ func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { return <-o.Go(method, flags, make(chan *Call, 1), args...).Done } +// AddMatchSignal subscribes BusObject to signals from specified interface and +// method (member). +func (o *Object) AddMatchSignal(iface, member string) *Call { + return o.Call( + "org.freedesktop.DBus.AddMatch", + 0, + "type='signal',interface='"+iface+"',member='"+member+"'", + ) +} + // Go calls a method with the given arguments asynchronously. It returns a // Call structure representing this method call. The passed channel will // return the same value once the call is done. If ch is nil, a new channel diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go deleted file mode 100644 index 834a1fa893..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go +++ /dev/null @@ -1,264 +0,0 @@ -// Package prop provides the Properties struct which can be used to implement -// org.freedesktop.DBus.Properties. -package prop - -import ( - "github.com/godbus/dbus" - "github.com/godbus/dbus/introspect" - "sync" -) - -// EmitType controls how org.freedesktop.DBus.Properties.PropertiesChanged is -// emitted for a property. If it is EmitTrue, the signal is emitted. If it is -// EmitInvalidates, the signal is also emitted, but the new value of the property -// is not disclosed. -type EmitType byte - -const ( - EmitFalse EmitType = iota - EmitTrue - EmitInvalidates -) - -// ErrIfaceNotFound is the error returned to peers who try to access properties -// on interfaces that aren't found. -var ErrIfaceNotFound = dbus.NewError("org.freedesktop.DBus.Properties.Error.InterfaceNotFound", nil) - -// ErrPropNotFound is the error returned to peers trying to access properties -// that aren't found. -var ErrPropNotFound = dbus.NewError("org.freedesktop.DBus.Properties.Error.PropertyNotFound", nil) - -// ErrReadOnly is the error returned to peers trying to set a read-only -// property. -var ErrReadOnly = dbus.NewError("org.freedesktop.DBus.Properties.Error.ReadOnly", nil) - -// ErrInvalidArg is returned to peers if the type of the property that is being -// changed and the argument don't match. -var ErrInvalidArg = dbus.NewError("org.freedesktop.DBus.Properties.Error.InvalidArg", nil) - -// The introspection data for the org.freedesktop.DBus.Properties interface. -var IntrospectData = introspect.Interface{ - Name: "org.freedesktop.DBus.Properties", - Methods: []introspect.Method{ - { - Name: "Get", - Args: []introspect.Arg{ - {"interface", "s", "in"}, - {"property", "s", "in"}, - {"value", "v", "out"}, - }, - }, - { - Name: "GetAll", - Args: []introspect.Arg{ - {"interface", "s", "in"}, - {"props", "a{sv}", "out"}, - }, - }, - { - Name: "Set", - Args: []introspect.Arg{ - {"interface", "s", "in"}, - {"property", "s", "in"}, - {"value", "v", "in"}, - }, - }, - }, - Signals: []introspect.Signal{ - { - Name: "PropertiesChanged", - Args: []introspect.Arg{ - {"interface", "s", "out"}, - {"changed_properties", "a{sv}", "out"}, - {"invalidates_properties", "as", "out"}, - }, - }, - }, -} - -// The introspection data for the org.freedesktop.DBus.Properties interface, as -// a string. -const IntrospectDataString = ` - - - - - - - - - - - - - - - - - - - - - -` - -// Prop represents a single property. It is used for creating a Properties -// value. -type Prop struct { - // Initial value. Must be a DBus-representable type. - Value interface{} - - // If true, the value can be modified by calls to Set. - Writable bool - - // Controls how org.freedesktop.DBus.Properties.PropertiesChanged is - // emitted if this property changes. - Emit EmitType - - // If not nil, anytime this property is changed by Set, this function is - // called with an appropiate Change as its argument. If the returned error - // is not nil, it is sent back to the caller of Set and the property is not - // changed. - Callback func(*Change) *dbus.Error -} - -// Change represents a change of a property by a call to Set. -type Change struct { - Props *Properties - Iface string - Name string - Value interface{} -} - -// Properties is a set of values that can be made available to the message bus -// using the org.freedesktop.DBus.Properties interface. It is safe for -// concurrent use by multiple goroutines. -type Properties struct { - m map[string]map[string]*Prop - mut sync.RWMutex - conn *dbus.Conn - path dbus.ObjectPath -} - -// New returns a new Properties structure that manages the given properties. -// The key for the first-level map of props is the name of the interface; the -// second-level key is the name of the property. The returned structure will be -// exported as org.freedesktop.DBus.Properties on path. -func New(conn *dbus.Conn, path dbus.ObjectPath, props map[string]map[string]*Prop) *Properties { - p := &Properties{m: props, conn: conn, path: path} - conn.Export(p, path, "org.freedesktop.DBus.Properties") - return p -} - -// Get implements org.freedesktop.DBus.Properties.Get. -func (p *Properties) Get(iface, property string) (dbus.Variant, *dbus.Error) { - p.mut.RLock() - defer p.mut.RUnlock() - m, ok := p.m[iface] - if !ok { - return dbus.Variant{}, ErrIfaceNotFound - } - prop, ok := m[property] - if !ok { - return dbus.Variant{}, ErrPropNotFound - } - return dbus.MakeVariant(prop.Value), nil -} - -// GetAll implements org.freedesktop.DBus.Properties.GetAll. -func (p *Properties) GetAll(iface string) (map[string]dbus.Variant, *dbus.Error) { - p.mut.RLock() - defer p.mut.RUnlock() - m, ok := p.m[iface] - if !ok { - return nil, ErrIfaceNotFound - } - rm := make(map[string]dbus.Variant, len(m)) - for k, v := range m { - rm[k] = dbus.MakeVariant(v.Value) - } - return rm, nil -} - -// GetMust returns the value of the given property and panics if either the -// interface or the property name are invalid. -func (p *Properties) GetMust(iface, property string) interface{} { - p.mut.RLock() - defer p.mut.RUnlock() - return p.m[iface][property].Value -} - -// Introspection returns the introspection data that represents the properties -// of iface. -func (p *Properties) Introspection(iface string) []introspect.Property { - p.mut.RLock() - defer p.mut.RUnlock() - m := p.m[iface] - s := make([]introspect.Property, 0, len(m)) - for k, v := range m { - p := introspect.Property{Name: k, Type: dbus.SignatureOf(v.Value).String()} - if v.Writable { - p.Access = "readwrite" - } else { - p.Access = "read" - } - s = append(s, p) - } - return s -} - -// set sets the given property and emits PropertyChanged if appropiate. p.mut -// must already be locked. -func (p *Properties) set(iface, property string, v interface{}) { - prop := p.m[iface][property] - prop.Value = v - switch prop.Emit { - case EmitFalse: - // do nothing - case EmitInvalidates: - p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged", - iface, map[string]dbus.Variant{}, []string{property}) - case EmitTrue: - p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged", - iface, map[string]dbus.Variant{property: dbus.MakeVariant(v)}, - []string{}) - default: - panic("invalid value for EmitType") - } -} - -// Set implements org.freedesktop.Properties.Set. -func (p *Properties) Set(iface, property string, newv dbus.Variant) *dbus.Error { - p.mut.Lock() - defer p.mut.Unlock() - m, ok := p.m[iface] - if !ok { - return ErrIfaceNotFound - } - prop, ok := m[property] - if !ok { - return ErrPropNotFound - } - if !prop.Writable { - return ErrReadOnly - } - if newv.Signature() != dbus.SignatureOf(prop.Value) { - return ErrInvalidArg - } - if prop.Callback != nil { - err := prop.Callback(&Change{p, iface, property, newv.Value()}) - if err != nil { - return err - } - } - p.set(iface, property, newv.Value()) - return nil -} - -// SetMust sets the value of the given property and panics if the interface or -// the property name are invalid. -func (p *Properties) SetMust(iface, property string, v interface{}) { - p.mut.Lock() - p.set(iface, property, v) - p.mut.Unlock() -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/transport_tcp.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/transport_tcp.go new file mode 100644 index 0000000000..dd1c8e59c5 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/transport_tcp.go @@ -0,0 +1,43 @@ +//+build !windows + +package dbus + +import ( + "errors" + "net" +) + +func init() { + transports["tcp"] = newTcpTransport +} + +func tcpFamily(keys string) (string, error) { + switch getKey(keys, "family") { + case "": + return "tcp", nil + case "ipv4": + return "tcp4", nil + case "ipv6": + return "tcp6", nil + default: + return "", errors.New("dbus: invalid tcp family (must be ipv4 or ipv6)") + } +} + +func newTcpTransport(keys string) (transport, error) { + host := getKey(keys, "host") + port := getKey(keys, "port") + if host == "" || port == "" { + return nil, errors.New("dbus: unsupported address (must set host and port)") + } + + protocol, err := tcpFamily(keys) + if err != nil { + return nil, err + } + socket, err := net.Dial(protocol, net.JoinHostPort(host, port)) + if err != nil { + return nil, err + } + return NewConn(socket) +} diff --git a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go index 3fafeabb15..a1d00cbc12 100644 --- a/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go +++ b/libnetwork/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go @@ -1,4 +1,4 @@ -//+build !windows +//+build !windows,!solaris package dbus diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000000..1b1b1921ef --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go deleted file mode 100644 index aa67b10cde..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go +++ /dev/null @@ -1,2060 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "testing" - "time" - - . "./testdata" - . "github.com/golang/protobuf/proto" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f fakeMarshaler) Marshal() ([]byte, error) { - return f.b, f.err -} - -func (f fakeMarshaler) String() string { - return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) -} - -func (f fakeMarshaler) ProtoMessage() {} - -func (f fakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - wantErr error - }{ - { - name: "Marshaler that fails", - m: fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since there's an error, nothing should be written to buffer. - want: nil, - wantErr: errors.New("some marshal err"), - }, - { - name: "Marshaler that succeeds", - m: fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - wantErr: nil, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if !reflect.DeepEqual(test.wantErr, err) { - t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err := Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - x = pbd.F_Uint64Repeated[i] - if x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - x = pbd.F_Fixed64Repeated[i] - if x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) - - // Marshal - o := old() - o.Marshal(pb1) - - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if strings.Index(err.Error(), "Label") < 0 { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if strings.Index(err.Error(), "{Unknown}") < 0 { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - _, err := Marshal((*GoEnum)(nil)) - if err != ErrNil { - t.Errorf("Marshal: got err %v, want ErrNil", err) - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err := Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 - - o := old() - bytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("expected = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 1", bytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(bytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { - t.Errorf("unmarshal wrong err msg: %v", err) - } - bytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 2", bytes) - t.Fatalf("string = %s", expected) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: &FloatingPoint{F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go deleted file mode 100644 index 1ac177d216..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go +++ /dev/null @@ -1,227 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - - pb "./testdata" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go deleted file mode 100644 index cc25833ca4..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - pb "./testdata" - . "github.com/golang/protobuf/proto" -) - -// Four identical base messages. -// The init function adds extensions to some of them. -var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} - -// Two messages with non-message extensions. -var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} -var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} - -func init() { - ext1 := &pb.Ext{Data: String("Kirk")} - ext2 := &pb.Ext{Data: String("Picard")} - - // messageWithExtension1a has ext1, but never marshals it. - if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1a failed: " + err.Error()) - } - - // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. - if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1b failed: " + err.Error()) - } - buf, err := Marshal(messageWithExtension1b) - if err != nil { - panic("Marshal of 1b failed: " + err.Error()) - } - messageWithExtension1b.Reset() - if err := Unmarshal(buf, messageWithExtension1b); err != nil { - panic("Unmarshal of 1b failed: " + err.Error()) - } - - // messageWithExtension2 has ext2. - if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { - panic("SetExtension on 2 failed: " + err.Error()) - } - - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { - panic("SetExtension on Int32-1 failed: " + err.Error()) - } - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { - panic("SetExtension on Int32-2 failed: " + err.Error()) - } -} - -var EqualTests = []struct { - desc string - a, b Message - exp bool -}{ - {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, - {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, - {"nil vs nil", nil, nil, true}, - {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, - {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, - {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, - - {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, - {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, - {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, - {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, - - {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, - {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, - {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, - {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, - {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, - {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, - {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, - - { - "nested, different", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, - false, - }, - { - "nested, equal", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - true, - }, - - {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, - {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, - {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, - { - "repeated bytes", - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - true, - }, - - {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, - {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, - {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, - - {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, - {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, - - { - "message with group", - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - true, - }, - - { - "map same", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - true, - }, - { - "map different entry", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, - false, - }, - { - "map different key only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, - false, - }, - { - "map different value only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, - false, - }, -} - -func TestEqual(t *testing.T) { - for _, tc := range EqualTests { - if res := Equal(tc.a, tc.b); res != tc.exp { - t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go deleted file mode 100644 index 6495f5679e..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - pb "./testdata" - "github.com/golang/protobuf/proto" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", ext1) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go deleted file mode 100644 index 7c29bccf4b..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &MessageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - m := make(map[int32]Extension) - if err := UnmarshalMessageSet(b, m); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := m[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", m) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile deleted file mode 100644 index 75144b582e..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2014 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -include ../../Make.protobuf - -all: regenerate - -regenerate: - rm -f proto3.pb.go - make proto3.pb.go - -# The following rules are just aids to development. Not needed for typical testing. - -diff: regenerate - git diff proto3.pb.go diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto deleted file mode 100644 index 3e327ded1d..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto +++ /dev/null @@ -1,58 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package proto3_proto; - -message Message { - enum Humour { - UNKNOWN = 0; - PUNS = 1; - SLAPSTICK = 2; - BILL_BAILEY = 3; - } - - string name = 1; - Humour hilarity = 2; - uint32 height_in_cm = 3; - bytes data = 4; - int64 result_count = 7; - bool true_scotsman = 8; - float score = 9; - - repeated uint64 key = 5; - Nested nested = 6; -} - -message Nested { - string bunny = 1; -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go deleted file mode 100644 index d4c96a9e73..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - pb "./proto3_proto" - "github.com/golang/protobuf/proto" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go deleted file mode 100644 index a2729c39a1..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "testing" -) - -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. - -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := sizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go deleted file mode 100644 index e5f92d6b90..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "testing" - - proto3pb "./proto3_proto" - pb "./testdata" - . "github.com/golang/protobuf/proto" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile deleted file mode 100644 index fc288628a7..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -include ../../Make.protobuf - -all: regenerate - -regenerate: - rm -f test.pb.go - make test.pb.go - -# The following rules are just aids to development. Not needed for typical testing. - -diff: regenerate - git diff test.pb.go - -restore: - cp test.pb.go.golden test.pb.go - -preserve: - cp test.pb.go test.pb.go.golden diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go deleted file mode 100644 index 7172d0e969..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Verify that the compiler output for test.proto is unchanged. - -package testdata - -import ( - "crypto/sha1" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "testing" -) - -// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. -func sum(t *testing.T, name string) string { - data, err := ioutil.ReadFile(name) - if err != nil { - t.Fatal(err) - } - t.Logf("sum(%q): length is %d", name, len(data)) - hash := sha1.New() - _, err = hash.Write(data) - if err != nil { - t.Fatal(err) - } - return fmt.Sprintf("% x", hash.Sum(nil)) -} - -func run(t *testing.T, name string, args ...string) { - cmd := exec.Command(name, args...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - t.Fatal(err) - } -} - -func TestGolden(t *testing.T) { - // Compute the original checksum. - goldenSum := sum(t, "test.pb.go") - // Run the proto compiler. - run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") - newFile := filepath.Join(os.TempDir(), "test.pb.go") - defer os.Remove(newFile) - // Compute the new checksum. - newSum := sum(t, newFile) - // Verify - if newSum != goldenSum { - run(t, "diff", "-u", "test.pb.go", newFile) - t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go deleted file mode 100644 index f47d9e0e39..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go +++ /dev/null @@ -1,2389 +0,0 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! - -/* -Package testdata is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - GoEnum - GoTestField - GoTest - GoSkipTest - NonPackedTest - PackedTest - MaxTag - OldMessage - NewMessage - InnerMessage - OtherMessage - MyMessage - Ext - MyMessageSet - Empty - MessageList - Strings - Defaults - SubDefaults - RepeatedEnum - MoreRepeated - GroupOld - GroupNew - FloatingPoint - MessageWithMap -*/ -package testdata - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} - -// An enum, for completeness. -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - // Basic types - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - // Groupings - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - // Table types - GoTest_TABLE GoTest_KIND = 11 - // Functions - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return FOO_FOO1 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` - // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` - // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` - // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` - // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` - // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return GoTest_VOID -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -// Required, repeated, and optional groups. -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *OldMessage) GetNum() int32 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - // This is an int32 in OldMessage. - Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *NewMessage) GetNum() int64 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` - // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} - -var extRange_MyMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} -func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetRepInner() []*InnerMessage { - if m != nil { - return m.RepInner - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return MyMessage_RED -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "testdata.Ext.more", - Tag: "bytes,103,opt,name=more", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "testdata.Ext.text", - Tag: "bytes,104,opt,name=text", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "testdata.Ext.number", - Tag: "varint,105,opt,name=number", -} - -type MyMessageSet struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} - -func (m *MyMessageSet) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(m.ExtensionMap()) -} -func (m *MyMessageSet) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) -} -func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(m.XXX_extensions) -} -func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) -} - -// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*MyMessageSet)(nil) -var _ proto.Unmarshaler = (*MyMessageSet)(nil) - -var extRange_MyMessageSet = []proto.ExtensionRange{ - {100, 2147483646}, -} - -func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessageSet -} -func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` - // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` - // Sub-message. - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -func (m *Defaults) GetStrZero() string { - if m != nil && m.StrZero != nil { - return *m.StrZero - } - return "" -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetInt64SPacked() []int64 { - if m != nil { - return m.Int64SPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -func (m *MoreRepeated) GetFixeds() []uint32 { - if m != nil { - return m.Fixeds - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type FloatingPoint struct { - F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} - -func (m *FloatingPoint) GetF() float64 { - if m != nil && m.F != nil { - return *m.F - } - return 0 -} - -type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} - -func (m *MessageWithMap) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "testdata.greeting", - Tag: "bytes,106,rep,name=greeting", -} - -var E_X201 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 201, - Name: "testdata.x201", - Tag: "bytes,201,opt,name=x201", -} - -var E_X202 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 202, - Name: "testdata.x202", - Tag: "bytes,202,opt,name=x202", -} - -var E_X203 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 203, - Name: "testdata.x203", - Tag: "bytes,203,opt,name=x203", -} - -var E_X204 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 204, - Name: "testdata.x204", - Tag: "bytes,204,opt,name=x204", -} - -var E_X205 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 205, - Name: "testdata.x205", - Tag: "bytes,205,opt,name=x205", -} - -var E_X206 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 206, - Name: "testdata.x206", - Tag: "bytes,206,opt,name=x206", -} - -var E_X207 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 207, - Name: "testdata.x207", - Tag: "bytes,207,opt,name=x207", -} - -var E_X208 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 208, - Name: "testdata.x208", - Tag: "bytes,208,opt,name=x208", -} - -var E_X209 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 209, - Name: "testdata.x209", - Tag: "bytes,209,opt,name=x209", -} - -var E_X210 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 210, - Name: "testdata.x210", - Tag: "bytes,210,opt,name=x210", -} - -var E_X211 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 211, - Name: "testdata.x211", - Tag: "bytes,211,opt,name=x211", -} - -var E_X212 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 212, - Name: "testdata.x212", - Tag: "bytes,212,opt,name=x212", -} - -var E_X213 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 213, - Name: "testdata.x213", - Tag: "bytes,213,opt,name=x213", -} - -var E_X214 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 214, - Name: "testdata.x214", - Tag: "bytes,214,opt,name=x214", -} - -var E_X215 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 215, - Name: "testdata.x215", - Tag: "bytes,215,opt,name=x215", -} - -var E_X216 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 216, - Name: "testdata.x216", - Tag: "bytes,216,opt,name=x216", -} - -var E_X217 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 217, - Name: "testdata.x217", - Tag: "bytes,217,opt,name=x217", -} - -var E_X218 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 218, - Name: "testdata.x218", - Tag: "bytes,218,opt,name=x218", -} - -var E_X219 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 219, - Name: "testdata.x219", - Tag: "bytes,219,opt,name=x219", -} - -var E_X220 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 220, - Name: "testdata.x220", - Tag: "bytes,220,opt,name=x220", -} - -var E_X221 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 221, - Name: "testdata.x221", - Tag: "bytes,221,opt,name=x221", -} - -var E_X222 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 222, - Name: "testdata.x222", - Tag: "bytes,222,opt,name=x222", -} - -var E_X223 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 223, - Name: "testdata.x223", - Tag: "bytes,223,opt,name=x223", -} - -var E_X224 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 224, - Name: "testdata.x224", - Tag: "bytes,224,opt,name=x224", -} - -var E_X225 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 225, - Name: "testdata.x225", - Tag: "bytes,225,opt,name=x225", -} - -var E_X226 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 226, - Name: "testdata.x226", - Tag: "bytes,226,opt,name=x226", -} - -var E_X227 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 227, - Name: "testdata.x227", - Tag: "bytes,227,opt,name=x227", -} - -var E_X228 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 228, - Name: "testdata.x228", - Tag: "bytes,228,opt,name=x228", -} - -var E_X229 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 229, - Name: "testdata.x229", - Tag: "bytes,229,opt,name=x229", -} - -var E_X230 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 230, - Name: "testdata.x230", - Tag: "bytes,230,opt,name=x230", -} - -var E_X231 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 231, - Name: "testdata.x231", - Tag: "bytes,231,opt,name=x231", -} - -var E_X232 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 232, - Name: "testdata.x232", - Tag: "bytes,232,opt,name=x232", -} - -var E_X233 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 233, - Name: "testdata.x233", - Tag: "bytes,233,opt,name=x233", -} - -var E_X234 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 234, - Name: "testdata.x234", - Tag: "bytes,234,opt,name=x234", -} - -var E_X235 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 235, - Name: "testdata.x235", - Tag: "bytes,235,opt,name=x235", -} - -var E_X236 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 236, - Name: "testdata.x236", - Tag: "bytes,236,opt,name=x236", -} - -var E_X237 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 237, - Name: "testdata.x237", - Tag: "bytes,237,opt,name=x237", -} - -var E_X238 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 238, - Name: "testdata.x238", - Tag: "bytes,238,opt,name=x238", -} - -var E_X239 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 239, - Name: "testdata.x239", - Tag: "bytes,239,opt,name=x239", -} - -var E_X240 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 240, - Name: "testdata.x240", - Tag: "bytes,240,opt,name=x240", -} - -var E_X241 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 241, - Name: "testdata.x241", - Tag: "bytes,241,opt,name=x241", -} - -var E_X242 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 242, - Name: "testdata.x242", - Tag: "bytes,242,opt,name=x242", -} - -var E_X243 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 243, - Name: "testdata.x243", - Tag: "bytes,243,opt,name=x243", -} - -var E_X244 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 244, - Name: "testdata.x244", - Tag: "bytes,244,opt,name=x244", -} - -var E_X245 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 245, - Name: "testdata.x245", - Tag: "bytes,245,opt,name=x245", -} - -var E_X246 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 246, - Name: "testdata.x246", - Tag: "bytes,246,opt,name=x246", -} - -var E_X247 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 247, - Name: "testdata.x247", - Tag: "bytes,247,opt,name=x247", -} - -var E_X248 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 248, - Name: "testdata.x248", - Tag: "bytes,248,opt,name=x248", -} - -var E_X249 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 249, - Name: "testdata.x249", - Tag: "bytes,249,opt,name=x249", -} - -var E_X250 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 250, - Name: "testdata.x250", - Tag: "bytes,250,opt,name=x250", -} - -func init() { - proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) - proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) - proto.RegisterExtension(E_X201) - proto.RegisterExtension(E_X202) - proto.RegisterExtension(E_X203) - proto.RegisterExtension(E_X204) - proto.RegisterExtension(E_X205) - proto.RegisterExtension(E_X206) - proto.RegisterExtension(E_X207) - proto.RegisterExtension(E_X208) - proto.RegisterExtension(E_X209) - proto.RegisterExtension(E_X210) - proto.RegisterExtension(E_X211) - proto.RegisterExtension(E_X212) - proto.RegisterExtension(E_X213) - proto.RegisterExtension(E_X214) - proto.RegisterExtension(E_X215) - proto.RegisterExtension(E_X216) - proto.RegisterExtension(E_X217) - proto.RegisterExtension(E_X218) - proto.RegisterExtension(E_X219) - proto.RegisterExtension(E_X220) - proto.RegisterExtension(E_X221) - proto.RegisterExtension(E_X222) - proto.RegisterExtension(E_X223) - proto.RegisterExtension(E_X224) - proto.RegisterExtension(E_X225) - proto.RegisterExtension(E_X226) - proto.RegisterExtension(E_X227) - proto.RegisterExtension(E_X228) - proto.RegisterExtension(E_X229) - proto.RegisterExtension(E_X230) - proto.RegisterExtension(E_X231) - proto.RegisterExtension(E_X232) - proto.RegisterExtension(E_X233) - proto.RegisterExtension(E_X234) - proto.RegisterExtension(E_X235) - proto.RegisterExtension(E_X236) - proto.RegisterExtension(E_X237) - proto.RegisterExtension(E_X238) - proto.RegisterExtension(E_X239) - proto.RegisterExtension(E_X240) - proto.RegisterExtension(E_X241) - proto.RegisterExtension(E_X242) - proto.RegisterExtension(E_X243) - proto.RegisterExtension(E_X244) - proto.RegisterExtension(E_X245) - proto.RegisterExtension(E_X246) - proto.RegisterExtension(E_X247) - proto.RegisterExtension(E_X248) - proto.RegisterExtension(E_X249) - proto.RegisterExtension(E_X250) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto deleted file mode 100644 index f413ad7f23..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto +++ /dev/null @@ -1,434 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A feature-rich test file for the protocol compiler and libraries. - -syntax = "proto2"; - -package test_proto; - -enum FOO { FOO1 = 1; }; - -message GoEnum { - required FOO foo = 1; -} - -message GoTestField { - required string Label = 1; - required string Type = 2; -} - -message GoTest { - // An enum, for completeness. - enum KIND { - VOID = 0; - - // Basic types - BOOL = 1; - BYTES = 2; - FINGERPRINT = 3; - FLOAT = 4; - INT = 5; - STRING = 6; - TIME = 7; - - // Groupings - TUPLE = 8; - ARRAY = 9; - MAP = 10; - - // Table types - TABLE = 11; - - // Functions - FUNCTION = 12; // last tag - }; - - // Some typical parameters - required KIND Kind = 1; - optional string Table = 2; - optional int32 Param = 3; - - // Required, repeated and optional foreign fields. - required GoTestField RequiredField = 4; - repeated GoTestField RepeatedField = 5; - optional GoTestField OptionalField = 6; - - // Required fields of all basic types - required bool F_Bool_required = 10; - required int32 F_Int32_required = 11; - required int64 F_Int64_required = 12; - required fixed32 F_Fixed32_required = 13; - required fixed64 F_Fixed64_required = 14; - required uint32 F_Uint32_required = 15; - required uint64 F_Uint64_required = 16; - required float F_Float_required = 17; - required double F_Double_required = 18; - required string F_String_required = 19; - required bytes F_Bytes_required = 101; - required sint32 F_Sint32_required = 102; - required sint64 F_Sint64_required = 103; - - // Repeated fields of all basic types - repeated bool F_Bool_repeated = 20; - repeated int32 F_Int32_repeated = 21; - repeated int64 F_Int64_repeated = 22; - repeated fixed32 F_Fixed32_repeated = 23; - repeated fixed64 F_Fixed64_repeated = 24; - repeated uint32 F_Uint32_repeated = 25; - repeated uint64 F_Uint64_repeated = 26; - repeated float F_Float_repeated = 27; - repeated double F_Double_repeated = 28; - repeated string F_String_repeated = 29; - repeated bytes F_Bytes_repeated = 201; - repeated sint32 F_Sint32_repeated = 202; - repeated sint64 F_Sint64_repeated = 203; - - // Optional fields of all basic types - optional bool F_Bool_optional = 30; - optional int32 F_Int32_optional = 31; - optional int64 F_Int64_optional = 32; - optional fixed32 F_Fixed32_optional = 33; - optional fixed64 F_Fixed64_optional = 34; - optional uint32 F_Uint32_optional = 35; - optional uint64 F_Uint64_optional = 36; - optional float F_Float_optional = 37; - optional double F_Double_optional = 38; - optional string F_String_optional = 39; - optional bytes F_Bytes_optional = 301; - optional sint32 F_Sint32_optional = 302; - optional sint64 F_Sint64_optional = 303; - - // Default-valued fields of all basic types - optional bool F_Bool_defaulted = 40 [default=true]; - optional int32 F_Int32_defaulted = 41 [default=32]; - optional int64 F_Int64_defaulted = 42 [default=64]; - optional fixed32 F_Fixed32_defaulted = 43 [default=320]; - optional fixed64 F_Fixed64_defaulted = 44 [default=640]; - optional uint32 F_Uint32_defaulted = 45 [default=3200]; - optional uint64 F_Uint64_defaulted = 46 [default=6400]; - optional float F_Float_defaulted = 47 [default=314159.]; - optional double F_Double_defaulted = 48 [default=271828.]; - optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; - optional sint32 F_Sint32_defaulted = 402 [default = -32]; - optional sint64 F_Sint64_defaulted = 403 [default = -64]; - - // Packed repeated fields (no string or bytes). - repeated bool F_Bool_repeated_packed = 50 [packed=true]; - repeated int32 F_Int32_repeated_packed = 51 [packed=true]; - repeated int64 F_Int64_repeated_packed = 52 [packed=true]; - repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; - repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; - repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; - repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; - repeated float F_Float_repeated_packed = 57 [packed=true]; - repeated double F_Double_repeated_packed = 58 [packed=true]; - repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; - repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; - - // Required, repeated, and optional groups. - required group RequiredGroup = 70 { - required string RequiredField = 71; - }; - - repeated group RepeatedGroup = 80 { - required string RequiredField = 81; - }; - - optional group OptionalGroup = 90 { - required string RequiredField = 91; - }; -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -message GoSkipTest { - required int32 skip_int32 = 11; - required fixed32 skip_fixed32 = 12; - required fixed64 skip_fixed64 = 13; - required string skip_string = 14; - required group SkipGroup = 15 { - required int32 group_int32 = 16; - required string group_string = 17; - } -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -message NonPackedTest { - repeated int32 a = 1; -} - -message PackedTest { - repeated int32 b = 1 [packed=true]; -} - -message MaxTag { - // Maximum possible tag number. - optional string last_field = 536870911; -} - -message OldMessage { - message Nested { - optional string name = 1; - } - optional Nested nested = 1; - - optional int32 num = 2; -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -message NewMessage { - message Nested { - optional string name = 1; - optional string food_group = 2; - } - optional Nested nested = 1; - - // This is an int32 in OldMessage. - optional int64 num = 2; -} - -// Smaller tests for ASCII formatting. - -message InnerMessage { - required string host = 1; - optional int32 port = 2 [default=4000]; - optional bool connected = 3; -} - -message OtherMessage { - optional int64 key = 1; - optional bytes value = 2; - optional float weight = 3; - optional InnerMessage inner = 4; -} - -message MyMessage { - required int32 count = 1; - optional string name = 2; - optional string quote = 3; - repeated string pet = 4; - optional InnerMessage inner = 5; - repeated OtherMessage others = 6; - repeated InnerMessage rep_inner = 12; - - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - }; - optional Color bikeshed = 7; - - optional group SomeGroup = 8 { - optional int32 group_field = 9; - } - - // This field becomes [][]byte in the generated code. - repeated bytes rep_bytes = 10; - - optional double bigfloat = 11; - - extensions 100 to max; -} - -message Ext { - extend MyMessage { - optional Ext more = 103; - optional string text = 104; - optional int32 number = 105; - } - - optional string data = 1; -} - -extend MyMessage { - repeated string greeting = 106; -} - -message MyMessageSet { - option message_set_wire_format = true; - extensions 100 to max; -} - -message Empty { -} - -extend MyMessageSet { - optional Empty x201 = 201; - optional Empty x202 = 202; - optional Empty x203 = 203; - optional Empty x204 = 204; - optional Empty x205 = 205; - optional Empty x206 = 206; - optional Empty x207 = 207; - optional Empty x208 = 208; - optional Empty x209 = 209; - optional Empty x210 = 210; - optional Empty x211 = 211; - optional Empty x212 = 212; - optional Empty x213 = 213; - optional Empty x214 = 214; - optional Empty x215 = 215; - optional Empty x216 = 216; - optional Empty x217 = 217; - optional Empty x218 = 218; - optional Empty x219 = 219; - optional Empty x220 = 220; - optional Empty x221 = 221; - optional Empty x222 = 222; - optional Empty x223 = 223; - optional Empty x224 = 224; - optional Empty x225 = 225; - optional Empty x226 = 226; - optional Empty x227 = 227; - optional Empty x228 = 228; - optional Empty x229 = 229; - optional Empty x230 = 230; - optional Empty x231 = 231; - optional Empty x232 = 232; - optional Empty x233 = 233; - optional Empty x234 = 234; - optional Empty x235 = 235; - optional Empty x236 = 236; - optional Empty x237 = 237; - optional Empty x238 = 238; - optional Empty x239 = 239; - optional Empty x240 = 240; - optional Empty x241 = 241; - optional Empty x242 = 242; - optional Empty x243 = 243; - optional Empty x244 = 244; - optional Empty x245 = 245; - optional Empty x246 = 246; - optional Empty x247 = 247; - optional Empty x248 = 248; - optional Empty x249 = 249; - optional Empty x250 = 250; -} - -message MessageList { - repeated group Message = 1 { - required string name = 2; - required int32 count = 3; - } -} - -message Strings { - optional string string_field = 1; - optional bytes bytes_field = 2; -} - -message Defaults { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - } - - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - optional bool F_Bool = 1 [default=true]; - optional int32 F_Int32 = 2 [default=32]; - optional int64 F_Int64 = 3 [default=64]; - optional fixed32 F_Fixed32 = 4 [default=320]; - optional fixed64 F_Fixed64 = 5 [default=640]; - optional uint32 F_Uint32 = 6 [default=3200]; - optional uint64 F_Uint64 = 7 [default=6400]; - optional float F_Float = 8 [default=314159.]; - optional double F_Double = 9 [default=271828.]; - optional string F_String = 10 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes = 11 [default="Bignose"]; - optional sint32 F_Sint32 = 12 [default=-32]; - optional sint64 F_Sint64 = 13 [default=-64]; - optional Color F_Enum = 14 [default=GREEN]; - - // More fields with crazy defaults. - optional float F_Pinf = 15 [default=inf]; - optional float F_Ninf = 16 [default=-inf]; - optional float F_Nan = 17 [default=nan]; - - // Sub-message. - optional SubDefaults sub = 18; - - // Redundant but explicit defaults. - optional string str_zero = 19 [default=""]; -} - -message SubDefaults { - optional int64 n = 1 [default=7]; -} - -message RepeatedEnum { - enum Color { - RED = 1; - } - repeated Color color = 1; -} - -message MoreRepeated { - repeated bool bools = 1; - repeated bool bools_packed = 2 [packed=true]; - repeated int32 ints = 3; - repeated int32 ints_packed = 4 [packed=true]; - repeated int64 int64s_packed = 7 [packed=true]; - repeated string strings = 5; - repeated fixed32 fixeds = 6; -} - -// GroupOld and GroupNew have the same wire format. -// GroupNew has a new field inside a group. - -message GroupOld { - optional group G = 101 { - optional int32 x = 2; - } -} - -message GroupNew { - optional group G = 101 { - optional int32 x = 2; - optional int32 y = 3; - } -} - -message FloatingPoint { - required double f = 1; -} - -message MessageWithMap { - map name_mapping = 1; - map msg_mapping = 2; - map byte_mapping = 3; -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go deleted file mode 100644 index 1360e8e8b7..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,511 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "math" - "reflect" - "testing" - - proto3pb "./proto3_proto" - . "./testdata" - . "github.com/golang/protobuf/proto" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\xAB'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\xAB"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0"`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:>` + - `msg_mapping>` + // no colon after "value" - `byte_mapping:` - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - }, - ByteMapping: map[bool][]byte{ - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go b/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go deleted file mode 100644 index 707bedd000..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go +++ /dev/null @@ -1,436 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "./proto3_proto" - pb "./testdata" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[testdata.Ext.more]: < - data: "Big gobs for big rats" -> -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pb := new(pb.Strings) - if err := proto.UnmarshalText(s, pb); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pb, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - &pb.MessageList_Message{ - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; current map format is the same as a repeated struct - { - &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}}, - `name_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/libnetwork/Godeps/_workspace/src/github.com/gorilla/context/context_test.go deleted file mode 100644 index 9814c501e8..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/gorilla/context/context_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "testing" -) - -type keyType int - -const ( - key1 keyType = iota - key2 -) - -func TestContext(t *testing.T) { - assertEqual := func(val interface{}, exp interface{}) { - if val != exp { - t.Errorf("Expected %v, got %v.", exp, val) - } - } - - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - - // Get() - assertEqual(Get(r, key1), nil) - - // Set() - Set(r, key1, "1") - assertEqual(Get(r, key1), "1") - assertEqual(len(data[r]), 1) - - Set(r, key2, "2") - assertEqual(Get(r, key2), "2") - assertEqual(len(data[r]), 2) - - //GetOk - value, ok := GetOk(r, key1) - assertEqual(value, "1") - assertEqual(ok, true) - - value, ok = GetOk(r, "not exists") - assertEqual(value, nil) - assertEqual(ok, false) - - Set(r, "nil value", nil) - value, ok = GetOk(r, "nil value") - assertEqual(value, nil) - assertEqual(ok, true) - - // GetAll() - values := GetAll(r) - assertEqual(len(values), 3) - - // GetAll() for empty request - values = GetAll(emptyR) - if values != nil { - t.Error("GetAll didn't return nil value for invalid request") - } - - // GetAllOk() - values, ok = GetAllOk(r) - assertEqual(len(values), 3) - assertEqual(ok, true) - - // GetAllOk() for empty request - values, ok = GetAllOk(emptyR) - assertEqual(value, nil) - assertEqual(ok, false) - - // Delete() - Delete(r, key1) - assertEqual(Get(r, key1), nil) - assertEqual(len(data[r]), 2) - - Delete(r, key2) - assertEqual(Get(r, key2), nil) - assertEqual(len(data[r]), 1) - - // Clear() - Clear(r) - assertEqual(len(data), 0) -} - -func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Set(r, key, value) - } - done <- struct{}{} - -} - -func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { - - b.StopTimer() - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - done := make(chan struct{}) - b.StartTimer() - - for i := 0; i < b.N; i++ { - wait := make(chan struct{}) - - for i := 0; i < numReaders; i++ { - go parallelReader(r, "test", iterations, wait, done) - } - - for i := 0; i < numWriters; i++ { - go parallelWriter(r, "test", "123", iterations, wait, done) - } - - close(wait) - - for i := 0; i < numReaders+numWriters; i++ { - <-done - } - - } - -} - -func BenchmarkMutexSameReadWrite1(b *testing.B) { - benchmarkMutex(b, 1, 1, 32) -} -func BenchmarkMutexSameReadWrite2(b *testing.B) { - benchmarkMutex(b, 2, 2, 32) -} -func BenchmarkMutexSameReadWrite4(b *testing.B) { - benchmarkMutex(b, 4, 4, 32) -} -func BenchmarkMutex1(b *testing.B) { - benchmarkMutex(b, 2, 8, 32) -} -func BenchmarkMutex2(b *testing.B) { - benchmarkMutex(b, 16, 4, 64) -} -func BenchmarkMutex3(b *testing.B) { - benchmarkMutex(b, 1, 2, 128) -} -func BenchmarkMutex4(b *testing.B) { - benchmarkMutex(b, 128, 32, 256) -} -func BenchmarkMutex5(b *testing.B) { - benchmarkMutex(b, 1024, 2048, 64) -} -func BenchmarkMutex6(b *testing.B) { - benchmarkMutex(b, 2048, 1024, 512) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index c5f97b2b2a..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index 075dedba48..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,1003 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "testing" - - "github.com/gorilla/context" -) - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all - shouldRedirect bool // whether the request should result in a redirect -} - -func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/a"), - vars: map[string]string{"category": "a"}, - host: "", - path: "/a", - shouldMatch: true, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/b/c"), - vars: map[string]string{"category": "b/c"}, - host: "", - path: "/b/c", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, - host: "", - path: "/a/product_name/1", - shouldMatch: true, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, match with trailing slash in request and path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - }, - { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: false, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, match substring", - route: new(Route).PathPrefix("/1"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/1", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } - -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v1": "bar"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with multiple patterns, match", - route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v1": "10"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=a"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, match https", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "https://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "ftp://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - if r.URL.Host == "aaa.bbb.ccc" { - return true - } - return false - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestBuildVarsFunc(t *testing.T) { - tests := []routeTest{ - { - title: "BuildVarsFunc set on route", - route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "3" - vars["v2"] = "a" - return vars - }), - request: newRequest("GET", "http://localhost/111/2"), - path: "/111/3a", - shouldMatch: true, - }, - { - title: "BuildVarsFunc set on route and parent route", - route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "2" - return vars - }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v2"] = "b" - return vars - }), - request: newRequest("GET", "http://localhost/1/a"), - path: "/2/b", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { - t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) - } else if r1.Get("i") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestStrictSlash(t *testing.T) { - r := NewRouter() - r.StrictSlash(true) - - tests := []routeTest{ - { - title: "Redirect path without slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path with slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Redirect path with slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path without slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Propagate StrictSlash to subrouters", - route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), - request: newRequest("GET", "http://localhost/static/images"), - vars: map[string]string{}, - host: "", - path: "/static/images/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Ignore StrictSlash for path prefix", - route: r.NewRoute().PathPrefix("/static/"), - request: newRequest("GET", "http://localhost/static/logo.png"), - vars: map[string]string{}, - host: "", - path: "/static/", - shouldMatch: true, - shouldRedirect: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, path := "none", "none" - if route.regexp != nil { - if route.regexp.host != nil { - host = route.regexp.host.template - } - if route.regexp.path != nil { - path = route.regexp.path.template - } - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - host := test.host - path := test.path - url := test.host + test.path - shouldRedirect := test.shouldRedirect - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if host != "" { - u, _ := test.route.URLHost(mapToPairs(match.Vars)...) - if host != u.Host { - t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) - return - } - } - if path != "" { - u, _ := route.URLPath(mapToPairs(match.Vars)...) - if path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) - return - } - } - if url != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) - if url != u.Host+u.Path { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) - return - } - } - if shouldRedirect && match.Handler == nil { - t.Errorf("(%v) Did not redirect", test.title) - return - } - if !shouldRedirect && match.Handler != nil { - t.Errorf("(%v) Unexpected redirect", test.title) - return - } - } -} - -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") - } - - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") - } - -} - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho TestA301ResponseWriter) Header() http.Header { - return http.Header(ho.hh) -} - -func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { - return 0, nil -} - -func (ho TestA301ResponseWriter) WriteHeader(code int) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status: 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// newRequest is a helper function to create a new request with a method and url -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go deleted file mode 100644 index 1f7c190c0f..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,714 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - // Need better messages. :) - if matched { - t.Errorf("Should match.") - } else { - t.Errorf("Should not match.") - } - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range schemeMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.domain.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtUrl := u.String() - // Yay, subroute aware of the domain when building! - if builtUrl != url { - t.Errorf("Expected %q, got %q.", url, builtUrl) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false, false) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/LICENSE b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go deleted file mode 100644 index 166b892dbe..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package api - -import ( - "os" - "testing" -) - -// ROOT is a management token for the tests -var CONSUL_ROOT string - -func init() { - CONSUL_ROOT = os.Getenv("CONSUL_ROOT") -} - -func TestACL_CreateDestroy(t *testing.T) { - if CONSUL_ROOT == "" { - t.SkipNow() - } - c, s := makeClient(t) - defer s.stop() - - c.config.Token = CONSUL_ROOT - acl := c.ACL() - - ae := ACLEntry{ - Name: "API test", - Type: ACLClientType, - Rules: `key "" { policy = "deny" }`, - } - - id, wm, err := acl.Create(&ae, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if wm.RequestTime == 0 { - t.Fatalf("bad: %v", wm) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - ae2, _, err := acl.Info(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules { - t.Fatalf("Bad: %#v", ae2) - } - - wm, err = acl.Destroy(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if wm.RequestTime == 0 { - t.Fatalf("bad: %v", wm) - } -} - -func TestACL_CloneDestroy(t *testing.T) { - if CONSUL_ROOT == "" { - t.SkipNow() - } - c, s := makeClient(t) - defer s.stop() - - c.config.Token = CONSUL_ROOT - acl := c.ACL() - - id, wm, err := acl.Clone(CONSUL_ROOT, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if wm.RequestTime == 0 { - t.Fatalf("bad: %v", wm) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - wm, err = acl.Destroy(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if wm.RequestTime == 0 { - t.Fatalf("bad: %v", wm) - } -} - -func TestACL_Info(t *testing.T) { - if CONSUL_ROOT == "" { - t.SkipNow() - } - c, s := makeClient(t) - defer s.stop() - - c.config.Token = CONSUL_ROOT - acl := c.ACL() - - ae, qm, err := acl.Info(CONSUL_ROOT, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } - - if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType { - t.Fatalf("bad: %#v", ae) - } -} - -func TestACL_List(t *testing.T) { - if CONSUL_ROOT == "" { - t.SkipNow() - } - c, s := makeClient(t) - defer s.stop() - - c.config.Token = CONSUL_ROOT - acl := c.ACL() - - acls, qm, err := acl.List(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(acls) < 2 { - t.Fatalf("bad: %v", acls) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go deleted file mode 100644 index 60cc4ae1e6..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go +++ /dev/null @@ -1,404 +0,0 @@ -package api - -import ( - "strings" - "testing" -) - -func TestAgent_Self(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - info, err := agent.Self() - if err != nil { - t.Fatalf("err: %v", err) - } - - name := info["Config"]["NodeName"] - if name == "" { - t.Fatalf("bad: %v", info) - } -} - -func TestAgent_Members(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - members, err := agent.Members(false) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(members) != 1 { - t.Fatalf("bad: %v", members) - } -} - -func TestAgent_Services(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - reg := &AgentServiceRegistration{ - Name: "foo", - Tags: []string{"bar", "baz"}, - Port: 8000, - Check: &AgentServiceCheck{ - TTL: "15s", - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - services, err := agent.Services() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := services["foo"]; !ok { - t.Fatalf("missing service: %v", services) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := checks["service:foo"]; !ok { - t.Fatalf("missing check: %v", checks) - } - - if err := agent.ServiceDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_ServiceAddress(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - reg1 := &AgentServiceRegistration{ - Name: "foo1", - Port: 8000, - Address: "192.168.0.42", - } - reg2 := &AgentServiceRegistration{ - Name: "foo2", - Port: 8000, - } - if err := agent.ServiceRegister(reg1); err != nil { - t.Fatalf("err: %v", err) - } - if err := agent.ServiceRegister(reg2); err != nil { - t.Fatalf("err: %v", err) - } - - services, err := agent.Services() - if err != nil { - t.Fatalf("err: %v", err) - } - - if _, ok := services["foo1"]; !ok { - t.Fatalf("missing service: %v", services) - } - if _, ok := services["foo2"]; !ok { - t.Fatalf("missing service: %v", services) - } - - if services["foo1"].Address != "192.168.0.42" { - t.Fatalf("missing Address field in service foo1: %v", services) - } - if services["foo2"].Address != "" { - t.Fatalf("missing Address field in service foo2: %v", services) - } - - if err := agent.ServiceDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_Services_MultipleChecks(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - reg := &AgentServiceRegistration{ - Name: "foo", - Tags: []string{"bar", "baz"}, - Port: 8000, - Checks: AgentServiceChecks{ - &AgentServiceCheck{ - TTL: "15s", - }, - &AgentServiceCheck{ - TTL: "30s", - }, - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - services, err := agent.Services() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := services["foo"]; !ok { - t.Fatalf("missing service: %v", services) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := checks["service:foo:1"]; !ok { - t.Fatalf("missing check: %v", checks) - } - if _, ok := checks["service:foo:2"]; !ok { - t.Fatalf("missing check: %v", checks) - } -} - -func TestAgent_SetTTLStatus(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - reg := &AgentServiceRegistration{ - Name: "foo", - Check: &AgentServiceCheck{ - TTL: "15s", - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - if err := agent.WarnTTL("service:foo", "test"); err != nil { - t.Fatalf("err: %v", err) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - chk, ok := checks["service:foo"] - if !ok { - t.Fatalf("missing check: %v", checks) - } - if chk.Status != "warning" { - t.Fatalf("Bad: %#v", chk) - } - if chk.Output != "test" { - t.Fatalf("Bad: %#v", chk) - } - - if err := agent.ServiceDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_Checks(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - reg := &AgentCheckRegistration{ - Name: "foo", - } - reg.TTL = "15s" - if err := agent.CheckRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := checks["foo"]; !ok { - t.Fatalf("missing check: %v", checks) - } - - if err := agent.CheckDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_Checks_serviceBound(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - // First register a service - serviceReg := &AgentServiceRegistration{ - Name: "redis", - } - if err := agent.ServiceRegister(serviceReg); err != nil { - t.Fatalf("err: %v", err) - } - - // Register a check bound to the service - reg := &AgentCheckRegistration{ - Name: "redischeck", - ServiceID: "redis", - } - reg.TTL = "15s" - if err := agent.CheckRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - - check, ok := checks["redischeck"] - if !ok { - t.Fatalf("missing check: %v", checks) - } - if check.ServiceID != "redis" { - t.Fatalf("missing service association for check: %v", check) - } -} - -func TestAgent_Join(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - info, err := agent.Self() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Join ourself - addr := info["Config"]["AdvertiseAddr"].(string) - err = agent.Join(addr, false) - if err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_ForceLeave(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - // Eject somebody - err := agent.ForceLeave("foo") - if err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestServiceMaintenance(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - // First register a service - serviceReg := &AgentServiceRegistration{ - Name: "redis", - } - if err := agent.ServiceRegister(serviceReg); err != nil { - t.Fatalf("err: %v", err) - } - - // Enable maintenance mode - if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure a critical check was added - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - found := false - for _, check := range checks { - if strings.Contains(check.CheckID, "maintenance") { - found = true - if check.Status != "critical" || check.Notes != "broken" { - t.Fatalf("bad: %#v", checks) - } - } - } - if !found { - t.Fatalf("bad: %#v", checks) - } - - // Disable maintenance mode - if err := agent.DisableServiceMaintenance("redis"); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure the critical health check was removed - checks, err = agent.Checks() - if err != nil { - t.Fatalf("err: %s", err) - } - for _, check := range checks { - if strings.Contains(check.CheckID, "maintenance") { - t.Fatalf("should have removed health check") - } - } -} - -func TestNodeMaintenance(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - - // Enable maintenance mode - if err := agent.EnableNodeMaintenance("broken"); err != nil { - t.Fatalf("err: %s", err) - } - - // Check that a critical check was added - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %s", err) - } - found := false - for _, check := range checks { - if strings.Contains(check.CheckID, "maintenance") { - found = true - if check.Status != "critical" || check.Notes != "broken" { - t.Fatalf("bad: %#v", checks) - } - } - } - if !found { - t.Fatalf("bad: %#v", checks) - } - - // Disable maintenance mode - if err := agent.DisableNodeMaintenance(); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure the check was removed - checks, err = agent.Checks() - if err != nil { - t.Fatalf("err: %s", err) - } - for _, check := range checks { - if strings.Contains(check.CheckID, "maintenance") { - t.Fatalf("should have removed health check") - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go deleted file mode 100644 index cbf6ccefeb..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go +++ /dev/null @@ -1,339 +0,0 @@ -package api - -import ( - crand "crypto/rand" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "os/exec" - "path/filepath" - "runtime" - "testing" - "time" - - "github.com/hashicorp/consul/testutil" -) - -var consulConfig = `{ - "ports": { - "dns": 19000, - "http": 18800, - "rpc": 18600, - "serf_lan": 18200, - "serf_wan": 18400, - "server": 18000 - }, - "bind_addr": "127.0.0.1", - "data_dir": "%s", - "bootstrap": true, - "log_level": "debug", - "server": true -}` - -type testServer struct { - pid int - dataDir string - configFile string -} - -type testPortConfig struct { - DNS int `json:"dns,omitempty"` - HTTP int `json:"http,omitempty"` - RPC int `json:"rpc,omitempty"` - SerfLan int `json:"serf_lan,omitempty"` - SerfWan int `json:"serf_wan,omitempty"` - Server int `json:"server,omitempty"` -} - -type testAddressConfig struct { - HTTP string `json:"http,omitempty"` -} - -type testServerConfig struct { - Bootstrap bool `json:"bootstrap,omitempty"` - Server bool `json:"server,omitempty"` - DataDir string `json:"data_dir,omitempty"` - LogLevel string `json:"log_level,omitempty"` - Addresses *testAddressConfig `json:"addresses,omitempty"` - Ports testPortConfig `json:"ports,omitempty"` -} - -// Callback functions for modifying config -type configCallback func(c *Config) -type serverConfigCallback func(c *testServerConfig) - -func defaultConfig() *testServerConfig { - return &testServerConfig{ - Bootstrap: true, - Server: true, - LogLevel: "debug", - Ports: testPortConfig{ - DNS: 19000, - HTTP: 18800, - RPC: 18600, - SerfLan: 18200, - SerfWan: 18400, - Server: 18000, - }, - } -} - -func (s *testServer) stop() { - defer os.RemoveAll(s.dataDir) - defer os.RemoveAll(s.configFile) - - cmd := exec.Command("kill", "-9", fmt.Sprintf("%d", s.pid)) - if err := cmd.Run(); err != nil { - panic(err) - } -} - -func newTestServer(t *testing.T) *testServer { - return newTestServerWithConfig(t, func(c *testServerConfig) {}) -} - -func newTestServerWithConfig(t *testing.T, cb serverConfigCallback) *testServer { - if path, err := exec.LookPath("consul"); err != nil || path == "" { - t.Log("consul not found on $PATH, skipping") - t.SkipNow() - } - - pidFile, err := ioutil.TempFile("", "consul") - if err != nil { - t.Fatalf("err: %s", err) - } - pidFile.Close() - os.Remove(pidFile.Name()) - - dataDir, err := ioutil.TempDir("", "consul") - if err != nil { - t.Fatalf("err: %s", err) - } - - configFile, err := ioutil.TempFile("", "consul") - if err != nil { - t.Fatalf("err: %s", err) - } - - consulConfig := defaultConfig() - consulConfig.DataDir = dataDir - - cb(consulConfig) - - configContent, err := json.Marshal(consulConfig) - if err != nil { - t.Fatalf("err: %s", err) - } - - if _, err := configFile.Write(configContent); err != nil { - t.Fatalf("err: %s", err) - } - configFile.Close() - - // Start the server - cmd := exec.Command("consul", "agent", "-config-file", configFile.Name()) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Start(); err != nil { - t.Fatalf("err: %s", err) - } - - return &testServer{ - pid: cmd.Process.Pid, - dataDir: dataDir, - configFile: configFile.Name(), - } -} - -func makeClient(t *testing.T) (*Client, *testServer) { - return makeClientWithConfig(t, func(c *Config) { - c.Address = "127.0.0.1:18800" - }, func(c *testServerConfig) {}) -} - -func makeClientWithConfig(t *testing.T, cb1 configCallback, cb2 serverConfigCallback) (*Client, *testServer) { - // Make client config - conf := DefaultConfig() - cb1(conf) - - // Create client - client, err := NewClient(conf) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Create server - server := newTestServerWithConfig(t, cb2) - - // Allow the server some time to start, and verify we have a leader. - testutil.WaitForResult(func() (bool, error) { - req := client.newRequest("GET", "/v1/catalog/nodes") - _, resp, err := client.doRequest(req) - if err != nil { - return false, err - } - resp.Body.Close() - - // Ensure we have a leader and a node registeration - if leader := resp.Header.Get("X-Consul-KnownLeader"); leader != "true" { - return false, fmt.Errorf("Consul leader status: %#v", leader) - } - if resp.Header.Get("X-Consul-Index") == "0" { - return false, fmt.Errorf("Consul index is 0") - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) - - return client, server -} - -func testKey() string { - buf := make([]byte, 16) - if _, err := crand.Read(buf); err != nil { - panic(fmt.Errorf("Failed to read random bytes: %v", err)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]) -} - -func TestSetQueryOptions(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - r := c.newRequest("GET", "/v1/kv/foo") - q := &QueryOptions{ - Datacenter: "foo", - AllowStale: true, - RequireConsistent: true, - WaitIndex: 1000, - WaitTime: 100 * time.Second, - Token: "12345", - } - r.setQueryOptions(q) - - if r.params.Get("dc") != "foo" { - t.Fatalf("bad: %v", r.params) - } - if _, ok := r.params["stale"]; !ok { - t.Fatalf("bad: %v", r.params) - } - if _, ok := r.params["consistent"]; !ok { - t.Fatalf("bad: %v", r.params) - } - if r.params.Get("index") != "1000" { - t.Fatalf("bad: %v", r.params) - } - if r.params.Get("wait") != "100000ms" { - t.Fatalf("bad: %v", r.params) - } - if r.params.Get("token") != "12345" { - t.Fatalf("bad: %v", r.params) - } -} - -func TestSetWriteOptions(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - r := c.newRequest("GET", "/v1/kv/foo") - q := &WriteOptions{ - Datacenter: "foo", - Token: "23456", - } - r.setWriteOptions(q) - - if r.params.Get("dc") != "foo" { - t.Fatalf("bad: %v", r.params) - } - if r.params.Get("token") != "23456" { - t.Fatalf("bad: %v", r.params) - } -} - -func TestRequestToHTTP(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - r := c.newRequest("DELETE", "/v1/kv/foo") - q := &QueryOptions{ - Datacenter: "foo", - } - r.setQueryOptions(q) - req, err := r.toHTTP() - if err != nil { - t.Fatalf("err: %v", err) - } - - if req.Method != "DELETE" { - t.Fatalf("bad: %v", req) - } - if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" { - t.Fatalf("bad: %v", req) - } -} - -func TestParseQueryMeta(t *testing.T) { - resp := &http.Response{ - Header: make(map[string][]string), - } - resp.Header.Set("X-Consul-Index", "12345") - resp.Header.Set("X-Consul-LastContact", "80") - resp.Header.Set("X-Consul-KnownLeader", "true") - - qm := &QueryMeta{} - if err := parseQueryMeta(resp, qm); err != nil { - t.Fatalf("err: %v", err) - } - - if qm.LastIndex != 12345 { - t.Fatalf("Bad: %v", qm) - } - if qm.LastContact != 80*time.Millisecond { - t.Fatalf("Bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("Bad: %v", qm) - } -} - -func TestAPI_UnixSocket(t *testing.T) { - if runtime.GOOS == "windows" { - t.SkipNow() - } - - tempDir, err := ioutil.TempDir("", "consul") - if err != nil { - t.Fatalf("err: %s", err) - } - defer os.RemoveAll(tempDir) - socket := filepath.Join(tempDir, "test.sock") - - c, s := makeClientWithConfig(t, func(c *Config) { - c.Address = "unix://" + socket - }, func(c *testServerConfig) { - c.Addresses = &testAddressConfig{ - HTTP: "unix://" + socket, - } - }) - defer s.stop() - - agent := c.Agent() - - info, err := agent.Self() - if err != nil { - t.Fatalf("err: %s", err) - } - if info["Config"]["NodeName"] == "" { - t.Fatalf("bad: %v", info) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go deleted file mode 100644 index 61980fcc24..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package api - -import ( - "fmt" - "testing" - - "github.com/hashicorp/consul/testutil" -) - -func TestCatalog_Datacenters(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - catalog := c.Catalog() - - testutil.WaitForResult(func() (bool, error) { - datacenters, err := catalog.Datacenters() - if err != nil { - return false, err - } - - if len(datacenters) == 0 { - return false, fmt.Errorf("Bad: %v", datacenters) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Nodes(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - catalog := c.Catalog() - - testutil.WaitForResult(func() (bool, error) { - nodes, meta, err := catalog.Nodes(nil) - if err != nil { - return false, err - } - - if meta.LastIndex == 0 { - return false, fmt.Errorf("Bad: %v", meta) - } - - if len(nodes) == 0 { - return false, fmt.Errorf("Bad: %v", nodes) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Services(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - catalog := c.Catalog() - - testutil.WaitForResult(func() (bool, error) { - services, meta, err := catalog.Services(nil) - if err != nil { - return false, err - } - - if meta.LastIndex == 0 { - return false, fmt.Errorf("Bad: %v", meta) - } - - if len(services) == 0 { - return false, fmt.Errorf("Bad: %v", services) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Service(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - catalog := c.Catalog() - - testutil.WaitForResult(func() (bool, error) { - services, meta, err := catalog.Service("consul", "", nil) - if err != nil { - return false, err - } - - if meta.LastIndex == 0 { - return false, fmt.Errorf("Bad: %v", meta) - } - - if len(services) == 0 { - return false, fmt.Errorf("Bad: %v", services) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Node(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - catalog := c.Catalog() - name, _ := c.Agent().NodeName() - - testutil.WaitForResult(func() (bool, error) { - info, meta, err := catalog.Node(name, nil) - if err != nil { - return false, err - } - - if meta.LastIndex == 0 { - return false, fmt.Errorf("Bad: %v", meta) - } - if len(info.Services) == 0 { - return false, fmt.Errorf("Bad: %v", info) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Registration(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - catalog := c.Catalog() - - service := &AgentService{ - ID: "redis1", - Service: "redis", - Tags: []string{"master", "v1"}, - Port: 8000, - } - - check := &AgentCheck{ - Node: "foobar", - CheckID: "service:redis1", - Name: "Redis health check", - Notes: "Script based health check", - Status: "passing", - ServiceID: "redis1", - } - - reg := &CatalogRegistration{ - Datacenter: "dc1", - Node: "foobar", - Address: "192.168.10.10", - Service: service, - Check: check, - } - - testutil.WaitForResult(func() (bool, error) { - if _, err := catalog.Register(reg, nil); err != nil { - return false, err - } - - node, _, err := catalog.Node("foobar", nil) - if err != nil { - return false, err - } - - if _, ok := node.Services["redis1"]; !ok { - return false, fmt.Errorf("missing service: redis1") - } - - health, _, err := c.Health().Node("foobar", nil) - if err != nil { - return false, err - } - - if health[0].CheckID != "service:redis1" { - return false, fmt.Errorf("missing checkid service:redis1") - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) - - // Test catalog deregistration of the previously registered service - dereg := &CatalogDeregistration{ - Datacenter: "dc1", - Node: "foobar", - Address: "192.168.10.10", - ServiceID: "redis1", - } - - if _, err := catalog.Deregister(dereg, nil); err != nil { - t.Fatalf("err: %v", err) - } - - testutil.WaitForResult(func() (bool, error) { - node, _, err := catalog.Node("foobar", nil) - if err != nil { - return false, err - } - - if _, ok := node.Services["redis1"]; ok { - return false, fmt.Errorf("ServiceID:redis1 is not deregistered") - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) - - // Test deregistration of the previously registered check - dereg = &CatalogDeregistration{ - Datacenter: "dc1", - Node: "foobar", - Address: "192.168.10.10", - CheckID: "service:redis1", - } - - if _, err := catalog.Deregister(dereg, nil); err != nil { - t.Fatalf("err: %v", err) - } - - testutil.WaitForResult(func() (bool, error) { - health, _, err := c.Health().Node("foobar", nil) - if err != nil { - return false, err - } - - if len(health) != 0 { - return false, fmt.Errorf("CheckID:service:redis1 is not deregistered") - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) - - // Test node deregistration of the previously registered node - dereg = &CatalogDeregistration{ - Datacenter: "dc1", - Node: "foobar", - Address: "192.168.10.10", - } - - if _, err := catalog.Deregister(dereg, nil); err != nil { - t.Fatalf("err: %v", err) - } - - testutil.WaitForResult(func() (bool, error) { - node, _, err := catalog.Node("foobar", nil) - if err != nil { - return false, err - } - - if node != nil { - return false, fmt.Errorf("node is not deregistered: %v", node) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go deleted file mode 100644 index 974c40e42d..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package api - -import ( - "testing" -) - -func TestEvent_FireList(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - event := c.Event() - - params := &UserEvent{Name: "foo"} - id, meta, err := event.Fire(params, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - events, qm, err := event.List("", nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if qm.LastIndex != event.IDToIndex(id) { - t.Fatalf("Bad: %#v", qm) - } - - if events[len(events)-1].ID != id { - t.Fatalf("bad: %#v", events) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go deleted file mode 100644 index e445910240..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package api - -import ( - "fmt" - "testing" - - "github.com/hashicorp/consul/testutil" -) - -func TestHealth_Node(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - health := c.Health() - - info, err := agent.Self() - if err != nil { - t.Fatalf("err: %v", err) - } - name := info["Config"]["NodeName"].(string) - - testutil.WaitForResult(func() (bool, error) { - checks, meta, err := health.Node(name, nil) - if err != nil { - return false, err - } - if meta.LastIndex == 0 { - return false, fmt.Errorf("bad: %v", meta) - } - if len(checks) == 0 { - return false, fmt.Errorf("bad: %v", checks) - } - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestHealth_Checks(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - agent := c.Agent() - health := c.Health() - - // Make a service with a check - reg := &AgentServiceRegistration{ - Name: "foo", - Check: &AgentServiceCheck{ - TTL: "15s", - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - defer agent.ServiceDeregister("foo") - - testutil.WaitForResult(func() (bool, error) { - checks, meta, err := health.Checks("foo", nil) - if err != nil { - return false, err - } - if meta.LastIndex == 0 { - return false, fmt.Errorf("bad: %v", meta) - } - if len(checks) == 0 { - return false, fmt.Errorf("Bad: %v", checks) - } - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestHealth_Service(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - health := c.Health() - - testutil.WaitForResult(func() (bool, error) { - // consul service should always exist... - checks, meta, err := health.Service("consul", "", true, nil) - if err != nil { - return false, err - } - if meta.LastIndex == 0 { - return false, fmt.Errorf("bad: %v", meta) - } - if len(checks) == 0 { - return false, fmt.Errorf("Bad: %v", checks) - } - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestHealth_State(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - health := c.Health() - - testutil.WaitForResult(func() (bool, error) { - checks, meta, err := health.State("any", nil) - if err != nil { - return false, err - } - if meta.LastIndex == 0 { - return false, fmt.Errorf("bad: %v", meta) - } - if len(checks) == 0 { - return false, fmt.Errorf("Bad: %v", checks) - } - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go deleted file mode 100644 index 8f2b54945d..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go +++ /dev/null @@ -1,431 +0,0 @@ -package api - -import ( - "bytes" - "path" - "testing" - "time" -) - -func TestClientPutGetDelete(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - kv := c.KV() - - // Get a get without a key - key := testKey() - pair, _, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair != nil { - t.Fatalf("unexpected value: %#v", pair) - } - - // Put the key - value := []byte("test") - p := &KVPair{Key: key, Flags: 42, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // Get should work - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if !bytes.Equal(pair.Value, value) { - t.Fatalf("unexpected value: %#v", pair) - } - if pair.Flags != 42 { - t.Fatalf("unexpected value: %#v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Delete - if _, err := kv.Delete(key, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // Get should fail - pair, _, err = kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair != nil { - t.Fatalf("unexpected value: %#v", pair) - } -} - -func TestClient_List_DeleteRecurse(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - kv := c.KV() - - // Generate some test keys - prefix := testKey() - var keys []string - for i := 0; i < 100; i++ { - keys = append(keys, path.Join(prefix, testKey())) - } - - // Set values - value := []byte("test") - for _, key := range keys { - p := &KVPair{Key: key, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - } - - // List the values - pairs, meta, err := kv.List(prefix, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(pairs) != len(keys) { - t.Fatalf("got %d keys", len(pairs)) - } - for _, pair := range pairs { - if !bytes.Equal(pair.Value, value) { - t.Fatalf("unexpected value: %#v", pair) - } - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Delete all - if _, err := kv.DeleteTree(prefix, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // List the values - pairs, _, err = kv.List(prefix, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(pairs) != 0 { - t.Fatalf("got %d keys", len(pairs)) - } -} - -func TestClient_DeleteCAS(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - kv := c.KV() - - // Put the key - key := testKey() - value := []byte("test") - p := &KVPair{Key: key, Value: value} - if work, _, err := kv.CAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("CAS failure") - } - - // Get should work - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // CAS update with bad index - p.ModifyIndex = 1 - if work, _, err := kv.DeleteCAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if work { - t.Fatalf("unexpected CAS") - } - - // CAS update with valid index - p.ModifyIndex = meta.LastIndex - if work, _, err := kv.DeleteCAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("unexpected CAS failure") - } -} - -func TestClient_CAS(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - kv := c.KV() - - // Put the key - key := testKey() - value := []byte("test") - p := &KVPair{Key: key, Value: value} - if work, _, err := kv.CAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("CAS failure") - } - - // Get should work - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // CAS update with bad index - newVal := []byte("foo") - p.Value = newVal - p.ModifyIndex = 1 - if work, _, err := kv.CAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if work { - t.Fatalf("unexpected CAS") - } - - // CAS update with valid index - p.ModifyIndex = meta.LastIndex - if work, _, err := kv.CAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("unexpected CAS failure") - } -} - -func TestClient_WatchGet(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - kv := c.KV() - - // Get a get without a key - key := testKey() - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair != nil { - t.Fatalf("unexpected value: %#v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Put the key - value := []byte("test") - go func() { - kv := c.KV() - - time.Sleep(100 * time.Millisecond) - p := &KVPair{Key: key, Flags: 42, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - }() - - // Get should work - options := &QueryOptions{WaitIndex: meta.LastIndex} - pair, meta2, err := kv.Get(key, options) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if !bytes.Equal(pair.Value, value) { - t.Fatalf("unexpected value: %#v", pair) - } - if pair.Flags != 42 { - t.Fatalf("unexpected value: %#v", pair) - } - if meta2.LastIndex <= meta.LastIndex { - t.Fatalf("unexpected value: %#v", meta2) - } -} - -func TestClient_WatchList(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - kv := c.KV() - - // Get a get without a key - prefix := testKey() - key := path.Join(prefix, testKey()) - pairs, meta, err := kv.List(prefix, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(pairs) != 0 { - t.Fatalf("unexpected value: %#v", pairs) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Put the key - value := []byte("test") - go func() { - kv := c.KV() - - time.Sleep(100 * time.Millisecond) - p := &KVPair{Key: key, Flags: 42, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - }() - - // Get should work - options := &QueryOptions{WaitIndex: meta.LastIndex} - pairs, meta2, err := kv.List(prefix, options) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(pairs) != 1 { - t.Fatalf("expected value: %#v", pairs) - } - if !bytes.Equal(pairs[0].Value, value) { - t.Fatalf("unexpected value: %#v", pairs) - } - if pairs[0].Flags != 42 { - t.Fatalf("unexpected value: %#v", pairs) - } - if meta2.LastIndex <= meta.LastIndex { - t.Fatalf("unexpected value: %#v", meta2) - } - -} - -func TestClient_Keys_DeleteRecurse(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - kv := c.KV() - - // Generate some test keys - prefix := testKey() - var keys []string - for i := 0; i < 100; i++ { - keys = append(keys, path.Join(prefix, testKey())) - } - - // Set values - value := []byte("test") - for _, key := range keys { - p := &KVPair{Key: key, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - } - - // List the values - out, meta, err := kv.Keys(prefix, "", nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(out) != len(keys) { - t.Fatalf("got %d keys", len(out)) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Delete all - if _, err := kv.DeleteTree(prefix, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // List the values - out, _, err = kv.Keys(prefix, "", nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(out) != 0 { - t.Fatalf("got %d keys", len(out)) - } -} - -func TestClient_AcquireRelease(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - session := c.Session() - kv := c.KV() - - // Make a session - id, _, err := session.CreateNoChecks(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - // Acquire the key - key := testKey() - value := []byte("test") - p := &KVPair{Key: key, Value: value, Session: id} - if work, _, err := kv.Acquire(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("Lock failure") - } - - // Get should work - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if pair.LockIndex != 1 { - t.Fatalf("Expected lock: %v", pair) - } - if pair.Session != id { - t.Fatalf("Expected lock: %v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Release - if work, _, err := kv.Release(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("Release fail") - } - - // Get should work - pair, meta, err = kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if pair.LockIndex != 1 { - t.Fatalf("Expected lock: %v", pair) - } - if pair.Session != "" { - t.Fatalf("Expected unlock: %v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go deleted file mode 100644 index a4aea7349c..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package api - -import ( - "log" - "sync" - "testing" - "time" -) - -func TestLock_LockUnlock(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Initial unlock should fail - err = lock.Unlock() - if err != ErrLockNotHeld { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - - // Double lock should fail - _, err = lock.Lock(nil) - if err != ErrLockHeld { - t.Fatalf("err: %v", err) - } - - // Should be leader - select { - case <-leaderCh: - t.Fatalf("should be leader") - default: - } - - // Initial unlock should work - err = lock.Unlock() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Double unlock should fail - err = lock.Unlock() - if err != ErrLockNotHeld { - t.Fatalf("err: %v", err) - } - - // Should loose leadership - select { - case <-leaderCh: - case <-time.After(time.Second): - t.Fatalf("should not be leader") - } -} - -func TestLock_ForceInvalidate(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - - go func() { - // Nuke the session, simulator an operator invalidation - // or a health check failure - session := c.Session() - session.Destroy(lock.lockSession, nil) - }() - - // Should loose leadership - select { - case <-leaderCh: - case <-time.After(time.Second): - t.Fatalf("should not be leader") - } -} - -func TestLock_DeleteKey(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - - go func() { - // Nuke the key, simulate an operator intervention - kv := c.KV() - kv.Delete("test/lock", nil) - }() - - // Should loose leadership - select { - case <-leaderCh: - case <-time.After(time.Second): - t.Fatalf("should not be leader") - } -} - -func TestLock_Contend(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - wg := &sync.WaitGroup{} - acquired := make([]bool, 3) - for idx := range acquired { - wg.Add(1) - go func(idx int) { - defer wg.Done() - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work eventually, will contend - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - log.Printf("Contender %d acquired", idx) - - // Set acquired and then leave - acquired[idx] = true - }(idx) - } - - // Wait for termination - doneCh := make(chan struct{}) - go func() { - wg.Wait() - close(doneCh) - }() - - // Wait for everybody to get a turn - select { - case <-doneCh: - case <-time.After(3 * DefaultLockRetryTime): - t.Fatalf("timeout") - } - - for idx, did := range acquired { - if !did { - t.Fatalf("contender %d never acquired", idx) - } - } -} - -func TestLock_Destroy(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - - // Destroy should fail - if err := lock.Destroy(); err != ErrLockHeld { - t.Fatalf("err: %v", err) - } - - // Should be able to release - err = lock.Unlock() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Acquire with a different lock - l2, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err = l2.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - - // Destroy should still fail - if err := lock.Destroy(); err != ErrLockInUse { - t.Fatalf("err: %v", err) - } - - // Should relese - err = l2.Unlock() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should work - err = lock.Destroy() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Double destroy should work - err = l2.Destroy() - if err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestLock_Conflict(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - sema, err := c.SemaphorePrefix("test/lock/", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not hold") - } - defer sema.Release() - - lock, err := c.LockKey("test/lock/.lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should conflict with semaphore - _, err = lock.Lock(nil) - if err != ErrLockConflict { - t.Fatalf("err: %v", err) - } - - // Should conflict with semaphore - err = lock.Destroy() - if err != ErrLockConflict { - t.Fatalf("err: %v", err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go deleted file mode 100644 index b931d25938..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package api - -import ( - "log" - "sync" - "testing" - "time" -) - -func TestSemaphore_AcquireRelease(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Initial release should fail - err = sema.Release() - if err != ErrSemaphoreNotHeld { - t.Fatalf("err: %v", err) - } - - // Should work - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not hold") - } - - // Double lock should fail - _, err = sema.Acquire(nil) - if err != ErrSemaphoreHeld { - t.Fatalf("err: %v", err) - } - - // Should be held - select { - case <-lockCh: - t.Fatalf("should be held") - default: - } - - // Initial release should work - err = sema.Release() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Double unlock should fail - err = sema.Release() - if err != ErrSemaphoreNotHeld { - t.Fatalf("err: %v", err) - } - - // Should lose resource - select { - case <-lockCh: - case <-time.After(time.Second): - t.Fatalf("should not be held") - } -} - -func TestSemaphore_ForceInvalidate(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not acquired") - } - defer sema.Release() - - go func() { - // Nuke the session, simulator an operator invalidation - // or a health check failure - session := c.Session() - session.Destroy(sema.lockSession, nil) - }() - - // Should loose slot - select { - case <-lockCh: - case <-time.After(time.Second): - t.Fatalf("should not be locked") - } -} - -func TestSemaphore_DeleteKey(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not locked") - } - defer sema.Release() - - go func() { - // Nuke the key, simulate an operator intervention - kv := c.KV() - kv.DeleteTree("test/semaphore", nil) - }() - - // Should loose leadership - select { - case <-lockCh: - case <-time.After(time.Second): - t.Fatalf("should not be locked") - } -} - -func TestSemaphore_Contend(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - wg := &sync.WaitGroup{} - acquired := make([]bool, 4) - for idx := range acquired { - wg.Add(1) - go func(idx int) { - defer wg.Done() - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work eventually, will contend - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not locked") - } - defer sema.Release() - log.Printf("Contender %d acquired", idx) - - // Set acquired and then leave - acquired[idx] = true - }(idx) - } - - // Wait for termination - doneCh := make(chan struct{}) - go func() { - wg.Wait() - close(doneCh) - }() - - // Wait for everybody to get a turn - select { - case <-doneCh: - case <-time.After(3 * DefaultLockRetryTime): - t.Fatalf("timeout") - } - - for idx, did := range acquired { - if !did { - t.Fatalf("contender %d never acquired", idx) - } - } -} - -func TestSemaphore_BadLimit(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 0) - if err == nil { - t.Fatalf("should error") - } - - sema, err = c.SemaphorePrefix("test/semaphore", 1) - if err != nil { - t.Fatalf("err: %v", err) - } - - _, err = sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - sema2, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - _, err = sema2.Acquire(nil) - if err.Error() != "semaphore limit conflict (lock: 1, local: 2)" { - t.Fatalf("err: %v", err) - } -} - -func TestSemaphore_Destroy(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - sema2, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - _, err = sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - _, err = sema2.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should fail, still held - if err := sema.Destroy(); err != ErrSemaphoreHeld { - t.Fatalf("err: %v", err) - } - - err = sema.Release() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should fail, still in use - if err := sema.Destroy(); err != ErrSemaphoreInUse { - t.Fatalf("err: %v", err) - } - - err = sema2.Release() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should work - if err := sema.Destroy(); err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should work - if err := sema2.Destroy(); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestSemaphore_Conflict(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - lock, err := c.LockKey("test/sema/.lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - - sema, err := c.SemaphorePrefix("test/sema/", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should conflict with lock - _, err = sema.Acquire(nil) - if err != ErrSemaphoreConflict { - t.Fatalf("err: %v", err) - } - - // Should conflict with lock - err = sema.Destroy() - if err != ErrSemaphoreConflict { - t.Fatalf("err: %v", err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go deleted file mode 100644 index 3579e48b1b..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package api - -import ( - "testing" -) - -func TestSession_CreateDestroy(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - session := c.Session() - - id, meta, err := session.Create(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - meta, err = session.Destroy(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } -} - -func TestSession_CreateRenewDestroy(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - session := c.Session() - - se := &SessionEntry{ - TTL: "10s", - } - - id, meta, err := session.Create(se, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - renew, meta, err := session.Renew(id, nil) - - if err != nil { - t.Fatalf("err: %v", err) - } - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - if renew == nil { - t.Fatalf("should get session") - } - - if renew.ID != id { - t.Fatalf("should have matching id") - } - - if renew.TTL != "10s" { - t.Fatalf("should get session with TTL") - } -} - -func TestSession_Info(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - session := c.Session() - - id, _, err := session.Create(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - info, qm, err := session.Info(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } - - if info == nil { - t.Fatalf("should get session") - } - if info.CreateIndex == 0 { - t.Fatalf("bad: %v", info) - } - if info.ID != id { - t.Fatalf("bad: %v", info) - } - if info.Name != "" { - t.Fatalf("bad: %v", info) - } - if info.Node == "" { - t.Fatalf("bad: %v", info) - } - if len(info.Checks) == 0 { - t.Fatalf("bad: %v", info) - } - if info.LockDelay == 0 { - t.Fatalf("bad: %v", info) - } - if info.Behavior != "release" { - t.Fatalf("bad: %v", info) - } - if info.TTL != "" { - t.Fatalf("bad: %v", info) - } -} - -func TestSession_Node(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - session := c.Session() - - id, _, err := session.Create(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - info, qm, err := session.Info(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - sessions, qm, err := session.Node(info.Node, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(sessions) != 1 { - t.Fatalf("bad: %v", sessions) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } -} - -func TestSession_List(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - session := c.Session() - - id, _, err := session.Create(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - sessions, qm, err := session.List(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(sessions) != 1 { - t.Fatalf("bad: %v", sessions) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go deleted file mode 100644 index 096b13da09..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package api - -import ( - "testing" -) - -func TestStatusLeader(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - status := c.Status() - - leader, err := status.Leader() - if err != nil { - t.Fatalf("err: %v", err) - } - if leader == "" { - t.Fatalf("Expected leader") - } -} - -func TestStatusPeers(t *testing.T) { - c, s := makeClient(t) - defer s.stop() - - status := c.Status() - - peers, err := status.Peers() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(peers) == 0 { - t.Fatalf("Expected peers ") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/LICENSE b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/LICENSE new file mode 100644 index 0000000000..ccae99f6a9 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012, 2013 Ugorji Nwoke. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the author nor the names of its contributors may be used + to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go deleted file mode 100644 index 4d437035e0..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "flag" - "fmt" - "reflect" - "runtime" - "testing" - "time" -) - -// Sample way to run: -// go test -bi -bv -bd=1 -benchmem -bench=. - -var ( - _ = fmt.Printf - benchTs *TestStruc - - approxSize int - - benchDoInitBench bool - benchVerify bool - benchUnscientificRes bool = false - //depth of 0 maps to ~400bytes json-encoded string, 1 maps to ~1400 bytes, etc - //For depth>1, we likely trigger stack growth for encoders, making benchmarking unreliable. - benchDepth int - benchInitDebug bool - benchCheckers []benchChecker -) - -type benchEncFn func(interface{}) ([]byte, error) -type benchDecFn func([]byte, interface{}) error -type benchIntfFn func() interface{} - -type benchChecker struct { - name string - encodefn benchEncFn - decodefn benchDecFn -} - -func benchInitFlags() { - flag.BoolVar(&benchInitDebug, "bg", false, "Bench Debug") - flag.IntVar(&benchDepth, "bd", 1, "Bench Depth: If >1, potential unreliable results due to stack growth") - flag.BoolVar(&benchDoInitBench, "bi", false, "Run Bench Init") - flag.BoolVar(&benchVerify, "bv", false, "Verify Decoded Value during Benchmark") - flag.BoolVar(&benchUnscientificRes, "bu", false, "Show Unscientific Results during Benchmark") -} - -func benchInit() { - benchTs = newTestStruc(benchDepth, true) - approxSize = approxDataSize(reflect.ValueOf(benchTs)) - bytesLen := 1024 * 4 * (benchDepth + 1) * (benchDepth + 1) - if bytesLen < approxSize { - bytesLen = approxSize - } - - benchCheckers = append(benchCheckers, - benchChecker{"msgpack", fnMsgpackEncodeFn, fnMsgpackDecodeFn}, - benchChecker{"binc-nosym", fnBincNoSymEncodeFn, fnBincNoSymDecodeFn}, - benchChecker{"binc-sym", fnBincSymEncodeFn, fnBincSymDecodeFn}, - benchChecker{"simple", fnSimpleEncodeFn, fnSimpleDecodeFn}, - benchChecker{"gob", fnGobEncodeFn, fnGobDecodeFn}, - benchChecker{"json", fnJsonEncodeFn, fnJsonDecodeFn}, - ) - if benchDoInitBench { - runBenchInit() - } -} - -func runBenchInit() { - logT(nil, "..............................................") - logT(nil, "BENCHMARK INIT: %v", time.Now()) - logT(nil, "To run full benchmark comparing encodings (MsgPack, Binc, Simple, JSON, GOB, etc), "+ - "use: \"go test -bench=.\"") - logT(nil, "Benchmark: ") - logT(nil, "\tStruct recursive Depth: %d", benchDepth) - if approxSize > 0 { - logT(nil, "\tApproxDeepSize Of benchmark Struct: %d bytes", approxSize) - } - if benchUnscientificRes { - logT(nil, "Benchmark One-Pass Run (with Unscientific Encode/Decode times): ") - } else { - logT(nil, "Benchmark One-Pass Run:") - } - for _, bc := range benchCheckers { - doBenchCheck(bc.name, bc.encodefn, bc.decodefn) - } - logT(nil, "..............................................") - if benchInitDebug { - logT(nil, "<<<<====>>>> depth: %v, ts: %#v\n", benchDepth, benchTs) - } -} - -func fnBenchNewTs() interface{} { - return new(TestStruc) -} - -func doBenchCheck(name string, encfn benchEncFn, decfn benchDecFn) { - runtime.GC() - tnow := time.Now() - buf, err := encfn(benchTs) - if err != nil { - logT(nil, "\t%10s: **** Error encoding benchTs: %v", name, err) - } - encDur := time.Now().Sub(tnow) - encLen := len(buf) - runtime.GC() - if !benchUnscientificRes { - logT(nil, "\t%10s: len: %d bytes\n", name, encLen) - return - } - tnow = time.Now() - if err = decfn(buf, new(TestStruc)); err != nil { - logT(nil, "\t%10s: **** Error decoding into new TestStruc: %v", name, err) - } - decDur := time.Now().Sub(tnow) - logT(nil, "\t%10s: len: %d bytes, encode: %v, decode: %v\n", name, encLen, encDur, decDur) -} - -func fnBenchmarkEncode(b *testing.B, encName string, ts interface{}, encfn benchEncFn) { - runtime.GC() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := encfn(ts) - if err != nil { - logT(b, "Error encoding benchTs: %s: %v", encName, err) - b.FailNow() - } - } -} - -func fnBenchmarkDecode(b *testing.B, encName string, ts interface{}, - encfn benchEncFn, decfn benchDecFn, newfn benchIntfFn, -) { - buf, err := encfn(ts) - if err != nil { - logT(b, "Error encoding benchTs: %s: %v", encName, err) - b.FailNow() - } - runtime.GC() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ts = newfn() - if err = decfn(buf, ts); err != nil { - logT(b, "Error decoding into new TestStruc: %s: %v", encName, err) - b.FailNow() - } - if benchVerify { - if vts, vok := ts.(*TestStruc); vok { - verifyTsTree(b, vts) - } - } - } -} - -func verifyTsTree(b *testing.B, ts *TestStruc) { - var ts0, ts1m, ts2m, ts1s, ts2s *TestStruc - ts0 = ts - - if benchDepth > 0 { - ts1m, ts1s = verifyCheckAndGet(b, ts0) - } - - if benchDepth > 1 { - ts2m, ts2s = verifyCheckAndGet(b, ts1m) - } - for _, tsx := range []*TestStruc{ts0, ts1m, ts2m, ts1s, ts2s} { - if tsx != nil { - verifyOneOne(b, tsx) - } - } -} - -func verifyCheckAndGet(b *testing.B, ts0 *TestStruc) (ts1m *TestStruc, ts1s *TestStruc) { - // if len(ts1m.Ms) <= 2 { - // logT(b, "Error: ts1m.Ms len should be > 2. Got: %v", len(ts1m.Ms)) - // b.FailNow() - // } - if len(ts0.Its) == 0 { - logT(b, "Error: ts0.Islice len should be > 0. Got: %v", len(ts0.Its)) - b.FailNow() - } - ts1m = ts0.Mtsptr["0"] - ts1s = ts0.Its[0] - if ts1m == nil || ts1s == nil { - logT(b, "Error: At benchDepth 1, No *TestStruc found") - b.FailNow() - } - return -} - -func verifyOneOne(b *testing.B, ts *TestStruc) { - if ts.I64slice[2] != int64(3) { - logT(b, "Error: Decode failed by checking values") - b.FailNow() - } -} - -func fnMsgpackEncodeFn(ts interface{}) (bs []byte, err error) { - err = NewEncoderBytes(&bs, testMsgpackH).Encode(ts) - return -} - -func fnMsgpackDecodeFn(buf []byte, ts interface{}) error { - return NewDecoderBytes(buf, testMsgpackH).Decode(ts) -} - -func fnBincEncodeFn(ts interface{}, sym AsSymbolFlag) (bs []byte, err error) { - tSym := testBincH.AsSymbols - testBincH.AsSymbols = sym - err = NewEncoderBytes(&bs, testBincH).Encode(ts) - testBincH.AsSymbols = tSym - return -} - -func fnBincDecodeFn(buf []byte, ts interface{}, sym AsSymbolFlag) (err error) { - tSym := testBincH.AsSymbols - testBincH.AsSymbols = sym - err = NewDecoderBytes(buf, testBincH).Decode(ts) - testBincH.AsSymbols = tSym - return -} - -func fnBincNoSymEncodeFn(ts interface{}) (bs []byte, err error) { - return fnBincEncodeFn(ts, AsSymbolNone) -} - -func fnBincNoSymDecodeFn(buf []byte, ts interface{}) error { - return fnBincDecodeFn(buf, ts, AsSymbolNone) -} - -func fnBincSymEncodeFn(ts interface{}) (bs []byte, err error) { - return fnBincEncodeFn(ts, AsSymbolAll) -} - -func fnBincSymDecodeFn(buf []byte, ts interface{}) error { - return fnBincDecodeFn(buf, ts, AsSymbolAll) -} - -func fnSimpleEncodeFn(ts interface{}) (bs []byte, err error) { - err = NewEncoderBytes(&bs, testSimpleH).Encode(ts) - return -} - -func fnSimpleDecodeFn(buf []byte, ts interface{}) error { - return NewDecoderBytes(buf, testSimpleH).Decode(ts) -} - -func fnGobEncodeFn(ts interface{}) ([]byte, error) { - bbuf := new(bytes.Buffer) - err := gob.NewEncoder(bbuf).Encode(ts) - return bbuf.Bytes(), err -} - -func fnGobDecodeFn(buf []byte, ts interface{}) error { - return gob.NewDecoder(bytes.NewBuffer(buf)).Decode(ts) -} - -func fnJsonEncodeFn(ts interface{}) ([]byte, error) { - return json.Marshal(ts) -} - -func fnJsonDecodeFn(buf []byte, ts interface{}) error { - return json.Unmarshal(buf, ts) -} - -func Benchmark__Msgpack____Encode(b *testing.B) { - fnBenchmarkEncode(b, "msgpack", benchTs, fnMsgpackEncodeFn) -} - -func Benchmark__Msgpack____Decode(b *testing.B) { - fnBenchmarkDecode(b, "msgpack", benchTs, fnMsgpackEncodeFn, fnMsgpackDecodeFn, fnBenchNewTs) -} - -func Benchmark__Binc_NoSym_Encode(b *testing.B) { - fnBenchmarkEncode(b, "binc", benchTs, fnBincNoSymEncodeFn) -} - -func Benchmark__Binc_NoSym_Decode(b *testing.B) { - fnBenchmarkDecode(b, "binc", benchTs, fnBincNoSymEncodeFn, fnBincNoSymDecodeFn, fnBenchNewTs) -} - -func Benchmark__Binc_Sym___Encode(b *testing.B) { - fnBenchmarkEncode(b, "binc", benchTs, fnBincSymEncodeFn) -} - -func Benchmark__Binc_Sym___Decode(b *testing.B) { - fnBenchmarkDecode(b, "binc", benchTs, fnBincSymEncodeFn, fnBincSymDecodeFn, fnBenchNewTs) -} - -func Benchmark__Simple____Encode(b *testing.B) { - fnBenchmarkEncode(b, "simple", benchTs, fnSimpleEncodeFn) -} - -func Benchmark__Simple____Decode(b *testing.B) { - fnBenchmarkDecode(b, "simple", benchTs, fnSimpleEncodeFn, fnSimpleDecodeFn, fnBenchNewTs) -} - -func Benchmark__Gob________Encode(b *testing.B) { - fnBenchmarkEncode(b, "gob", benchTs, fnGobEncodeFn) -} - -func Benchmark__Gob________Decode(b *testing.B) { - fnBenchmarkDecode(b, "gob", benchTs, fnGobEncodeFn, fnGobDecodeFn, fnBenchNewTs) -} - -func Benchmark__Json_______Encode(b *testing.B) { - fnBenchmarkEncode(b, "json", benchTs, fnJsonEncodeFn) -} - -func Benchmark__Json_______Decode(b *testing.B) { - fnBenchmarkDecode(b, "json", benchTs, fnJsonEncodeFn, fnJsonDecodeFn, fnBenchNewTs) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go deleted file mode 100644 index cb184491f1..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go +++ /dev/null @@ -1,1002 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// Test works by using a slice of interfaces. -// It can test for encoding/decoding into/from a nil interface{} -// or passing the object to encode/decode into. -// -// There are basically 2 main tests here. -// First test internally encodes and decodes things and verifies that -// the artifact was as expected. -// Second test will use python msgpack to create a bunch of golden files, -// read those files, and compare them to what it should be. It then -// writes those files back out and compares the byte streams. -// -// Taken together, the tests are pretty extensive. - -import ( - "bytes" - "encoding/gob" - "flag" - "fmt" - "io/ioutil" - "math" - "net" - "net/rpc" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "strconv" - "sync/atomic" - "testing" - "time" -) - -type testVerifyArg int - -const ( - testVerifyMapTypeSame testVerifyArg = iota - testVerifyMapTypeStrIntf - testVerifyMapTypeIntfIntf - // testVerifySliceIntf - testVerifyForPython -) - -var ( - testInitDebug bool - testUseIoEncDec bool - testStructToArray bool - testWriteNoSymbols bool - - _ = fmt.Printf - skipVerifyVal interface{} = &(struct{}{}) - - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - timeLoc = time.FixedZone("", -8*60*60) // UTC-08:00 //time.UTC-8 - timeToCompare1 = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc) - timeToCompare2 = time.Date(1900, 2, 2, 2, 2, 2, 2000, timeLoc) - timeToCompare3 = time.Unix(0, 0).UTC() - timeToCompare4 = time.Time{}.UTC() - - table []interface{} // main items we encode - tableVerify []interface{} // we verify encoded things against this after decode - tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different) - tablePythonVerify []interface{} // for verifying for python, since Python sometimes - // will encode a float32 as float64, or large int as uint - testRpcInt = new(TestRpcInt) - testMsgpackH = &MsgpackHandle{} - testBincH = &BincHandle{} - testSimpleH = &SimpleHandle{} -) - -func testInitFlags() { - // delete(testDecOpts.ExtFuncs, timeTyp) - flag.BoolVar(&testInitDebug, "tg", false, "Test Debug") - flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal") - flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option") - flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option") -} - -type AnonInTestStruc struct { - AS string - AI64 int64 - AI16 int16 - AUi64 uint64 - ASslice []string - AI64slice []int64 -} - -type TestStruc struct { - S string - I64 int64 - I16 int16 - Ui64 uint64 - Ui8 uint8 - B bool - By byte - - Sslice []string - I64slice []int64 - I16slice []int16 - Ui64slice []uint64 - Ui8slice []uint8 - Bslice []bool - Byslice []byte - - Islice []interface{} - Iptrslice []*int64 - - AnonInTestStruc - - //M map[interface{}]interface{} `json:"-",bson:"-"` - Ms map[string]interface{} - Msi64 map[string]int64 - - Nintf interface{} //don't set this, so we can test for nil - T time.Time - Nmap map[string]bool //don't set this, so we can test for nil - Nslice []byte //don't set this, so we can test for nil - Nint64 *int64 //don't set this, so we can test for nil - Mtsptr map[string]*TestStruc - Mts map[string]TestStruc - Its []*TestStruc - Nteststruc *TestStruc -} - -type TestABC struct { - A, B, C string -} - -type TestRpcInt struct { - i int -} - -func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil } -func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil } -func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil } -func (r *TestRpcInt) EchoStruct(arg TestABC, res *string) error { - *res = fmt.Sprintf("%#v", arg) - return nil -} -func (r *TestRpcInt) Echo123(args []string, res *string) error { - *res = fmt.Sprintf("%#v", args) - return nil -} - -func testVerifyVal(v interface{}, arg testVerifyArg) (v2 interface{}) { - //for python msgpack, - // - all positive integers are unsigned 64-bit ints - // - all floats are float64 - switch iv := v.(type) { - case int8: - if iv > 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int16: - if iv > 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int32: - if iv > 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int64: - if iv > 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case uint8: - v2 = uint64(iv) - case uint16: - v2 = uint64(iv) - case uint32: - v2 = uint64(iv) - case uint64: - v2 = uint64(iv) - case float32: - v2 = float64(iv) - case float64: - v2 = float64(iv) - case []interface{}: - m2 := make([]interface{}, len(iv)) - for j, vj := range iv { - m2[j] = testVerifyVal(vj, arg) - } - v2 = m2 - case map[string]bool: - switch arg { - case testVerifyMapTypeSame: - m2 := make(map[string]bool) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - case testVerifyMapTypeStrIntf, testVerifyForPython: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - case testVerifyMapTypeIntfIntf: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - } - case map[string]interface{}: - switch arg { - case testVerifyMapTypeSame: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - case testVerifyMapTypeStrIntf, testVerifyForPython: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - case testVerifyMapTypeIntfIntf: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - } - case map[interface{}]interface{}: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[testVerifyVal(kj, arg)] = testVerifyVal(kv, arg) - } - v2 = m2 - case time.Time: - switch arg { - case testVerifyForPython: - if iv2 := iv.UnixNano(); iv2 > 0 { - v2 = uint64(iv2) - } else { - v2 = int64(iv2) - } - default: - v2 = v - } - default: - v2 = v - } - return -} - -func testInit() { - gob.Register(new(TestStruc)) - if testInitDebug { - ts0 := newTestStruc(2, false) - fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0) - } - - testBincH.StructToArray = testStructToArray - if testWriteNoSymbols { - testBincH.AsSymbols = AsSymbolNone - } else { - testBincH.AsSymbols = AsSymbolAll - } - testMsgpackH.StructToArray = testStructToArray - testMsgpackH.RawToString = true - // testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt) - // testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt) - timeEncExt := func(rv reflect.Value) ([]byte, error) { - return encodeTime(rv.Interface().(time.Time)), nil - } - timeDecExt := func(rv reflect.Value, bs []byte) error { - tt, err := decodeTime(bs) - if err == nil { - rv.Set(reflect.ValueOf(tt)) - } - return err - } - - // add extensions for msgpack, simple for time.Time, so we can encode/decode same way. - testMsgpackH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) - testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) - - primitives := []interface{}{ - int8(-8), - int16(-1616), - int32(-32323232), - int64(-6464646464646464), - uint8(192), - uint16(1616), - uint32(32323232), - uint64(6464646464646464), - byte(192), - float32(-3232.0), - float64(-6464646464.0), - float32(3232.0), - float64(6464646464.0), - false, - true, - nil, - "someday", - "", - "bytestring", - timeToCompare1, - timeToCompare2, - timeToCompare3, - timeToCompare4, - } - mapsAndStrucs := []interface{}{ - map[string]bool{ - "true": true, - "false": false, - }, - map[string]interface{}{ - "true": "True", - "false": false, - "uint16(1616)": uint16(1616), - }, - //add a complex combo map in here. (map has list which has map) - //note that after the first thing, everything else should be generic. - map[string]interface{}{ - "list": []interface{}{ - int16(1616), - int32(32323232), - true, - float32(-3232.0), - map[string]interface{}{ - "TRUE": true, - "FALSE": false, - }, - []interface{}{true, false}, - }, - "int32": int32(32323232), - "bool": true, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890", - }, - map[interface{}]interface{}{ - true: "true", - uint8(138): false, - "false": uint8(200), - }, - newTestStruc(0, false), - } - - table = []interface{}{} - table = append(table, primitives...) //0-19 are primitives - table = append(table, primitives) //20 is a list of primitives - table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct - - tableVerify = make([]interface{}, len(table)) - tableTestNilVerify = make([]interface{}, len(table)) - tablePythonVerify = make([]interface{}, len(table)) - - lp := len(primitives) - av := tableVerify - for i, v := range table { - if i == lp+3 { - av[i] = skipVerifyVal - continue - } - //av[i] = testVerifyVal(v, testVerifyMapTypeSame) - switch v.(type) { - case []interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - case map[string]interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - case map[interface{}]interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - default: - av[i] = v - } - } - - av = tableTestNilVerify - for i, v := range table { - if i > lp+3 { - av[i] = skipVerifyVal - continue - } - av[i] = testVerifyVal(v, testVerifyMapTypeStrIntf) - } - - av = tablePythonVerify - for i, v := range table { - if i > lp+3 { - av[i] = skipVerifyVal - continue - } - av[i] = testVerifyVal(v, testVerifyForPython) - } - - tablePythonVerify = tablePythonVerify[:24] -} - -func testUnmarshal(v interface{}, data []byte, h Handle) error { - if testUseIoEncDec { - return NewDecoder(bytes.NewBuffer(data), h).Decode(v) - } - return NewDecoderBytes(data, h).Decode(v) -} - -func testMarshal(v interface{}, h Handle) (bs []byte, err error) { - if testUseIoEncDec { - var buf bytes.Buffer - err = NewEncoder(&buf, h).Encode(v) - bs = buf.Bytes() - return - } - err = NewEncoderBytes(&bs, h).Encode(v) - return -} - -func testMarshalErr(v interface{}, h Handle, t *testing.T, name string) (bs []byte, err error) { - if bs, err = testMarshal(v, h); err != nil { - logT(t, "Error encoding %s: %v, Err: %v", name, v, err) - t.FailNow() - } - return -} - -func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name string) (err error) { - if err = testUnmarshal(v, data, h); err != nil { - logT(t, "Error Decoding into %s: %v, Err: %v", name, v, err) - t.FailNow() - } - return -} - -func newTestStruc(depth int, bench bool) (ts *TestStruc) { - var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464 - - ts = &TestStruc{ - S: "some string", - I64: math.MaxInt64 * 2 / 3, // 64, - I16: 16, - Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it - Ui8: 160, - B: true, - By: 5, - - Sslice: []string{"one", "two", "three"}, - I64slice: []int64{1, 2, 3}, - I16slice: []int16{4, 5, 6}, - Ui64slice: []uint64{137, 138, 139}, - Ui8slice: []uint8{210, 211, 212}, - Bslice: []bool{true, false, true, false}, - Byslice: []byte{13, 14, 15}, - - Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)}, - - Ms: map[string]interface{}{ - "true": "true", - "int64(9)": false, - }, - Msi64: map[string]int64{ - "one": 1, - "two": 2, - }, - T: timeToCompare1, - AnonInTestStruc: AnonInTestStruc{ - AS: "A-String", - AI64: 64, - AI16: 16, - AUi64: 64, - ASslice: []string{"Aone", "Atwo", "Athree"}, - AI64slice: []int64{1, 2, 3}, - }, - } - //For benchmarks, some things will not work. - if !bench { - //json and bson require string keys in maps - //ts.M = map[interface{}]interface{}{ - // true: "true", - // int8(9): false, - //} - //gob cannot encode nil in element in array (encodeArray: nil element) - ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil} - // ts.Iptrslice = nil - } - if depth > 0 { - depth-- - if ts.Mtsptr == nil { - ts.Mtsptr = make(map[string]*TestStruc) - } - if ts.Mts == nil { - ts.Mts = make(map[string]TestStruc) - } - ts.Mtsptr["0"] = newTestStruc(depth, bench) - ts.Mts["0"] = *(ts.Mtsptr["0"]) - ts.Its = append(ts.Its, ts.Mtsptr["0"]) - } - return -} - -// doTestCodecTableOne allows us test for different variations based on arguments passed. -func doTestCodecTableOne(t *testing.T, testNil bool, h Handle, - vs []interface{}, vsVerify []interface{}) { - //if testNil, then just test for when a pointer to a nil interface{} is passed. It should work. - //Current setup allows us test (at least manually) the nil interface or typed interface. - logT(t, "================ TestNil: %v ================\n", testNil) - for i, v0 := range vs { - logT(t, "..............................................") - logT(t, " Testing: #%d:, %T, %#v\n", i, v0, v0) - b0, err := testMarshalErr(v0, h, t, "v0") - if err != nil { - continue - } - logT(t, " Encoded bytes: len: %v, %v\n", len(b0), b0) - - var v1 interface{} - - if testNil { - err = testUnmarshal(&v1, b0, h) - } else { - if v0 != nil { - v0rt := reflect.TypeOf(v0) // ptr - rv1 := reflect.New(v0rt) - err = testUnmarshal(rv1.Interface(), b0, h) - v1 = rv1.Elem().Interface() - // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() - } - } - - logT(t, " v1 returned: %T, %#v", v1, v1) - // if v1 != nil { - // logT(t, " v1 returned: %T, %#v", v1, v1) - // //we always indirect, because ptr to typed value may be passed (if not testNil) - // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() - // } - if err != nil { - logT(t, "-------- Error: %v. Partial return: %v", err, v1) - failT(t) - continue - } - v0check := vsVerify[i] - if v0check == skipVerifyVal { - logT(t, " Nil Check skipped: Decoded: %T, %#v\n", v1, v1) - continue - } - - if err = deepEqual(v0check, v1); err == nil { - logT(t, "++++++++ Before and After marshal matched\n") - } else { - logT(t, "-------- Before and After marshal do not match: Error: %v"+ - " ====> GOLDEN: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1) - failT(t) - } - } -} - -func testCodecTableOne(t *testing.T, h Handle) { - // func TestMsgpackAllExperimental(t *testing.T) { - // dopts := testDecOpts(nil, nil, false, true, true), - - switch v := h.(type) { - case *MsgpackHandle: - var oldWriteExt, oldRawToString bool - oldWriteExt, v.WriteExt = v.WriteExt, true - oldRawToString, v.RawToString = v.RawToString, true - doTestCodecTableOne(t, false, h, table, tableVerify) - v.WriteExt, v.RawToString = oldWriteExt, oldRawToString - default: - doTestCodecTableOne(t, false, h, table, tableVerify) - } - // func TestMsgpackAll(t *testing.T) { - idxTime, numPrim, numMap := 19, 23, 4 - - //skip []interface{} containing time.Time - doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) - doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) - // func TestMsgpackNilStringMap(t *testing.T) { - var oldMapType reflect.Type - v := h.getBasicHandle() - oldMapType, v.MapType = v.MapType, mapStrIntfTyp - - //skip time.Time, []interface{} containing time.Time, last map, and newStruc - doTestCodecTableOne(t, true, h, table[:idxTime], tableTestNilVerify[:idxTime]) - doTestCodecTableOne(t, true, h, table[numPrim+1:numPrim+numMap], tableTestNilVerify[numPrim+1:numPrim+numMap]) - - v.MapType = oldMapType - - // func TestMsgpackNilIntf(t *testing.T) { - - //do newTestStruc and last element of map - doTestCodecTableOne(t, true, h, table[numPrim+numMap:], tableTestNilVerify[numPrim+numMap:]) - //TODO? What is this one? - //doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18]) -} - -func testCodecMiscOne(t *testing.T, h Handle) { - b, err := testMarshalErr(32, h, t, "32") - // Cannot do this nil one, because faster type assertion decoding will panic - // var i *int32 - // if err = testUnmarshal(b, i, nil); err == nil { - // logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr") - // t.FailNow() - // } - var i2 int32 = 0 - err = testUnmarshalErr(&i2, b, h, t, "int32-ptr") - if i2 != int32(32) { - logT(t, "------- didn't unmarshal to 32: Received: %d", i2) - t.FailNow() - } - - // func TestMsgpackDecodePtr(t *testing.T) { - ts := newTestStruc(0, false) - b, err = testMarshalErr(ts, h, t, "pointer-to-struct") - if len(b) < 40 { - logT(t, "------- Size must be > 40. Size: %d", len(b)) - t.FailNow() - } - logT(t, "------- b: %v", b) - ts2 := new(TestStruc) - err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct") - if ts2.I64 != math.MaxInt64*2/3 { - logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64) - t.FailNow() - } - - // func TestMsgpackIntfDecode(t *testing.T) { - m := map[string]int{"A": 2, "B": 3} - p := []interface{}{m} - bs, err := testMarshalErr(p, h, t, "p") - - m2 := map[string]int{} - p2 := []interface{}{m2} - err = testUnmarshalErr(&p2, bs, h, t, "&p2") - - if m2["A"] != 2 || m2["B"] != 3 { - logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2) - t.FailNow() - } - // log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2) - checkEqualT(t, p, p2, "p=p2") - checkEqualT(t, m, m2, "m=m2") - if err = deepEqual(p, p2); err == nil { - logT(t, "p and p2 match") - } else { - logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2) - t.FailNow() - } - if err = deepEqual(m, m2); err == nil { - logT(t, "m and m2 match") - } else { - logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2) - t.FailNow() - } - - // func TestMsgpackDecodeStructSubset(t *testing.T) { - // test that we can decode a subset of the stream - mm := map[string]interface{}{"A": 5, "B": 99, "C": 333} - bs, err = testMarshalErr(mm, h, t, "mm") - type ttt struct { - A uint8 - C int32 - } - var t2 ttt - testUnmarshalErr(&t2, bs, h, t, "t2") - t3 := ttt{5, 333} - checkEqualT(t, t2, t3, "t2=t3") - - // println(">>>>>") - // test simple arrays, non-addressable arrays, slices - type tarr struct { - A int64 - B [3]int64 - C []byte - D [3]byte - } - var tarr0 = tarr{1, [3]int64{2, 3, 4}, []byte{4, 5, 6}, [3]byte{7, 8, 9}} - // test both pointer and non-pointer (value) - for _, tarr1 := range []interface{}{tarr0, &tarr0} { - bs, err = testMarshalErr(tarr1, h, t, "tarr1") - var tarr2 tarr - testUnmarshalErr(&tarr2, bs, h, t, "tarr2") - checkEqualT(t, tarr0, tarr2, "tarr0=tarr2") - // fmt.Printf(">>>> err: %v. tarr1: %v, tarr2: %v\n", err, tarr0, tarr2) - } - - // test byte array, even if empty (msgpack only) - if h == testMsgpackH { - type ystruct struct { - Anarray []byte - } - var ya = ystruct{} - testUnmarshalErr(&ya, []byte{0x91, 0x90}, h, t, "ya") - } -} - -func testCodecEmbeddedPointer(t *testing.T, h Handle) { - type Z int - type A struct { - AnInt int - } - type B struct { - *Z - *A - MoreInt int - } - var z Z = 4 - x1 := &B{&z, &A{5}, 6} - bs, err := testMarshalErr(x1, h, t, "x1") - // fmt.Printf("buf: len(%v): %x\n", buf.Len(), buf.Bytes()) - var x2 = new(B) - err = testUnmarshalErr(x2, bs, h, t, "x2") - err = checkEqualT(t, x1, x2, "x1=x2") - _ = err -} - -func doTestRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs time.Duration, -) (port int) { - // rpc needs EOF, which is sent via a panic, and so must be recovered. - if !recoverPanicToErr { - logT(t, "EXPECTED. set recoverPanicToErr=true, since rpc needs EOF") - t.FailNow() - } - srv := rpc.NewServer() - srv.Register(testRpcInt) - ln, err := net.Listen("tcp", "127.0.0.1:0") - // log("listener: %v", ln.Addr()) - checkErrT(t, err) - port = (ln.Addr().(*net.TCPAddr)).Port - // var opts *DecoderOptions - // opts := testDecOpts - // opts.MapType = mapStrIntfTyp - // opts.RawToString = false - serverExitChan := make(chan bool, 1) - var serverExitFlag uint64 = 0 - serverFn := func() { - for { - conn1, err1 := ln.Accept() - // if err1 != nil { - // //fmt.Printf("accept err1: %v\n", err1) - // continue - // } - if atomic.LoadUint64(&serverExitFlag) == 1 { - serverExitChan <- true - conn1.Close() - return // exit serverFn goroutine - } - if err1 == nil { - var sc rpc.ServerCodec = rr.ServerCodec(conn1, h) - srv.ServeCodec(sc) - } - } - } - - clientFn := func(cc rpc.ClientCodec) { - cl := rpc.NewClientWithCodec(cc) - defer cl.Close() - var up, sq, mult int - var rstr string - // log("Calling client") - checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up)) - // log("Called TestRpcInt.Update") - checkEqualT(t, testRpcInt.i, 5, "testRpcInt.i=5") - checkEqualT(t, up, 5, "up=5") - checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq)) - checkEqualT(t, sq, 25, "sq=25") - checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult)) - checkEqualT(t, mult, 100, "mult=100") - checkErrT(t, cl.Call("TestRpcInt.EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) - checkEqualT(t, rstr, fmt.Sprintf("%#v", TestABC{"Aa", "Bb", "Cc"}), "rstr=") - checkErrT(t, cl.Call("TestRpcInt.Echo123", []string{"A1", "B2", "C3"}, &rstr)) - checkEqualT(t, rstr, fmt.Sprintf("%#v", []string{"A1", "B2", "C3"}), "rstr=") - } - - connFn := func() (bs net.Conn) { - // log("calling f1") - bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String()) - //fmt.Printf("f1. bs: %v, err2: %v\n", bs, err2) - checkErrT(t, err2) - return - } - - exitFn := func() { - atomic.StoreUint64(&serverExitFlag, 1) - bs := connFn() - <-serverExitChan - bs.Close() - // serverExitChan <- true - } - - go serverFn() - runtime.Gosched() - //time.Sleep(100 * time.Millisecond) - if exitSleepMs == 0 { - defer ln.Close() - defer exitFn() - } - if doRequest { - bs := connFn() - cc := rr.ClientCodec(bs, h) - clientFn(cc) - } - if exitSleepMs != 0 { - go func() { - defer ln.Close() - time.Sleep(exitSleepMs) - exitFn() - }() - } - return -} - -// Comprehensive testing that generates data encoded from python msgpack, -// and validates that our code can read and write it out accordingly. -// We keep this unexported here, and put actual test in ext_dep_test.go. -// This way, it can be excluded by excluding file completely. -func doTestMsgpackPythonGenStreams(t *testing.T) { - logT(t, "TestPythonGenStreams") - tmpdir, err := ioutil.TempDir("", "golang-msgpack-test") - if err != nil { - logT(t, "-------- Unable to create temp directory\n") - t.FailNow() - } - defer os.RemoveAll(tmpdir) - logT(t, "tmpdir: %v", tmpdir) - cmd := exec.Command("python", "msgpack_test.py", "testdata", tmpdir) - //cmd.Stdin = strings.NewReader("some input") - //cmd.Stdout = &out - var cmdout []byte - if cmdout, err = cmd.CombinedOutput(); err != nil { - logT(t, "-------- Error running msgpack_test.py testdata. Err: %v", err) - logT(t, " %v", string(cmdout)) - t.FailNow() - } - - oldMapType := testMsgpackH.MapType - for i, v := range tablePythonVerify { - testMsgpackH.MapType = oldMapType - //load up the golden file based on number - //decode it - //compare to in-mem object - //encode it again - //compare to output stream - logT(t, "..............................................") - logT(t, " Testing: #%d: %T, %#v\n", i, v, v) - var bss []byte - bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i)+".golden")) - if err != nil { - logT(t, "-------- Error reading golden file: %d. Err: %v", i, err) - failT(t) - continue - } - testMsgpackH.MapType = mapStrIntfTyp - - var v1 interface{} - if err = testUnmarshal(&v1, bss, testMsgpackH); err != nil { - logT(t, "-------- Error decoding stream: %d: Err: %v", i, err) - failT(t) - continue - } - if v == skipVerifyVal { - continue - } - //no need to indirect, because we pass a nil ptr, so we already have the value - //if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() } - if err = deepEqual(v, v1); err == nil { - logT(t, "++++++++ Objects match") - } else { - logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1) - logT(t, "-------- AGAINST: %#v", v) - logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) - failT(t) - } - bsb, err := testMarshal(v1, testMsgpackH) - if err != nil { - logT(t, "Error encoding to stream: %d: Err: %v", i, err) - failT(t) - continue - } - if err = deepEqual(bsb, bss); err == nil { - logT(t, "++++++++ Bytes match") - } else { - logT(t, "???????? Bytes do not match. %v.", err) - xs := "--------" - if reflect.ValueOf(v).Kind() == reflect.Map { - xs = " " - logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs) - } else { - logT(t, "%s It's not a map. They should match.", xs) - failT(t) - } - logT(t, "%s FROM_FILE: %4d] %v", xs, len(bss), bss) - logT(t, "%s ENCODED: %4d] %v", xs, len(bsb), bsb) - } - } - testMsgpackH.MapType = oldMapType -} - -// To test MsgpackSpecRpc, we test 3 scenarios: -// - Go Client to Go RPC Service (contained within TestMsgpackRpcSpec) -// - Go client to Python RPC Service (contained within doTestMsgpackRpcSpecGoClientToPythonSvc) -// - Python Client to Go RPC Service (contained within doTestMsgpackRpcSpecPythonClientToGoSvc) -// -// This allows us test the different calling conventions -// - Go Service requires only one argument -// - Python Service allows multiple arguments - -func doTestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { - openPort := "6789" - cmd := exec.Command("python", "msgpack_test.py", "rpc-server", openPort, "2") - checkErrT(t, cmd.Start()) - time.Sleep(100 * time.Millisecond) // time for python rpc server to start - bs, err2 := net.Dial("tcp", ":"+openPort) - checkErrT(t, err2) - cc := MsgpackSpecRpc.ClientCodec(bs, testMsgpackH) - cl := rpc.NewClientWithCodec(cc) - defer cl.Close() - var rstr string - checkErrT(t, cl.Call("EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) - //checkEqualT(t, rstr, "{'A': 'Aa', 'B': 'Bb', 'C': 'Cc'}") - var mArgs MsgpackSpecRpcMultiArgs = []interface{}{"A1", "B2", "C3"} - checkErrT(t, cl.Call("Echo123", mArgs, &rstr)) - checkEqualT(t, rstr, "1:A1 2:B2 3:C3", "rstr=") -} - -func doTestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { - port := doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, false, 1*time.Second) - //time.Sleep(1000 * time.Millisecond) - cmd := exec.Command("python", "msgpack_test.py", "rpc-client-go-service", strconv.Itoa(port)) - var cmdout []byte - var err error - if cmdout, err = cmd.CombinedOutput(); err != nil { - logT(t, "-------- Error running msgpack_test.py rpc-client-go-service. Err: %v", err) - logT(t, " %v", string(cmdout)) - t.FailNow() - } - checkEqualT(t, string(cmdout), - fmt.Sprintf("%#v\n%#v\n", []string{"A1", "B2", "C3"}, TestABC{"Aa", "Bb", "Cc"}), "cmdout=") -} - -func TestBincCodecsTable(t *testing.T) { - testCodecTableOne(t, testBincH) -} - -func TestBincCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testBincH) -} - -func TestBincCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testBincH) -} - -func TestSimpleCodecsTable(t *testing.T) { - testCodecTableOne(t, testSimpleH) -} - -func TestSimpleCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testSimpleH) -} - -func TestSimpleCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testSimpleH) -} - -func TestMsgpackCodecsTable(t *testing.T) { - testCodecTableOne(t, testMsgpackH) -} - -func TestMsgpackCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testMsgpackH) -} - -func TestMsgpackCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testMsgpackH) -} - -func TestBincRpcGo(t *testing.T) { - doTestRpcOne(t, GoRpc, testBincH, true, 0) -} - -func _TestSimpleRpcGo(t *testing.T) { - doTestRpcOne(t, GoRpc, testSimpleH, true, 0) -} - -func TestMsgpackRpcGo(t *testing.T) { - doTestRpcOne(t, GoRpc, testMsgpackH, true, 0) -} - -func TestMsgpackRpcSpec(t *testing.T) { - doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0) -} - -// TODO: -// Add Tests for: -// - decoding empty list/map in stream into a nil slice/map -// - binary(M|Unm)arsher support for time.Time diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go deleted file mode 100644 index eb28459f8f..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// //+build ignore - -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// This file includes benchmarks which have dependencies on 3rdparty -// packages (bson and vmihailenco/msgpack) which must be installed locally. -// -// To run the benchmarks including these 3rdparty packages, first -// - Uncomment first line in this file (put // // in front of it) -// - Get those packages: -// go get github.com/vmihailenco/msgpack -// go get labix.org/v2/mgo/bson -// - Run: -// go test -bi -bench=. - -import ( - "testing" - - vmsgpack "github.com/vmihailenco/msgpack" - "labix.org/v2/mgo/bson" -) - -func init() { - benchCheckers = append(benchCheckers, - benchChecker{"v-msgpack", fnVMsgpackEncodeFn, fnVMsgpackDecodeFn}, - benchChecker{"bson", fnBsonEncodeFn, fnBsonDecodeFn}, - ) -} - -func fnVMsgpackEncodeFn(ts interface{}) ([]byte, error) { - return vmsgpack.Marshal(ts) -} - -func fnVMsgpackDecodeFn(buf []byte, ts interface{}) error { - return vmsgpack.Unmarshal(buf, ts) -} - -func fnBsonEncodeFn(ts interface{}) ([]byte, error) { - return bson.Marshal(ts) -} - -func fnBsonDecodeFn(buf []byte, ts interface{}) error { - return bson.Unmarshal(buf, ts) -} - -func Benchmark__Bson_______Encode(b *testing.B) { - fnBenchmarkEncode(b, "bson", benchTs, fnBsonEncodeFn) -} - -func Benchmark__Bson_______Decode(b *testing.B) { - fnBenchmarkDecode(b, "bson", benchTs, fnBsonEncodeFn, fnBsonDecodeFn, fnBenchNewTs) -} - -func Benchmark__VMsgpack___Encode(b *testing.B) { - fnBenchmarkEncode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn) -} - -func Benchmark__VMsgpack___Decode(b *testing.B) { - fnBenchmarkDecode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn, fnVMsgpackDecodeFn, fnBenchNewTs) -} - -func TestMsgpackPythonGenStreams(t *testing.T) { - doTestMsgpackPythonGenStreams(t) -} - -func TestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { - doTestMsgpackRpcSpecGoClientToPythonSvc(t) -} - -func TestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { - doTestMsgpackRpcSpecPythonClientToGoSvc(t) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go b/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go deleted file mode 100644 index 2e9b3a0f05..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// All non-std package dependencies related to testing live in this file, -// so porting to different environment is easy (just update functions). -// -// Also, this file is called z_helper_test, to give a "hint" to compiler -// that its init() function should be called last. (not guaranteed by spec) - -import ( - "errors" - "reflect" - "flag" - "testing" -) - -var ( - testLogToT = true - failNowOnFail = true -) - -func init() { - testInitFlags() - benchInitFlags() - flag.Parse() - testInit() - benchInit() -} - -func checkErrT(t *testing.T, err error) { - if err != nil { - logT(t, err.Error()) - failT(t) - } -} - -func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) { - if err = deepEqual(v1, v2); err != nil { - logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2) - failT(t) - } - return -} - -func logT(x interface{}, format string, args ...interface{}) { - if t, ok := x.(*testing.T); ok && t != nil && testLogToT { - t.Logf(format, args...) - } else if b, ok := x.(*testing.B); ok && b != nil && testLogToT { - b.Logf(format, args...) - } else { - debugf(format, args...) - } -} - -func failT(t *testing.T) { - if failNowOnFail { - t.FailNow() - } else { - t.Fail() - } -} - -func deepEqual(v1, v2 interface{}) (err error) { - if !reflect.DeepEqual(v1, v2) { - err = errors.New("Not Match") - } - return -} - -func approxDataSize(rv reflect.Value) (sum int) { - switch rk := rv.Kind(); rk { - case reflect.Invalid: - case reflect.Ptr, reflect.Interface: - sum += int(rv.Type().Size()) - sum += approxDataSize(rv.Elem()) - case reflect.Slice: - sum += int(rv.Type().Size()) - for j := 0; j < rv.Len(); j++ { - sum += approxDataSize(rv.Index(j)) - } - case reflect.String: - sum += int(rv.Type().Size()) - sum += rv.Len() - case reflect.Map: - sum += int(rv.Type().Size()) - for _, mk := range rv.MapKeys() { - sum += approxDataSize(mk) - sum += approxDataSize(rv.MapIndex(mk)) - } - case reflect.Struct: - //struct size already includes the full data size. - //sum += int(rv.Type().Size()) - for j := 0; j < rv.NumField(); j++ { - sum += approxDataSize(rv.Field(j)) - } - default: - //pure value types - sum += int(rv.Type().Size()) - } - return -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/miekg/dns/idn/code_points.go b/libnetwork/Godeps/_workspace/src/github.com/miekg/dns/idn/code_points.go deleted file mode 100644 index 129c3742f5..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/miekg/dns/idn/code_points.go +++ /dev/null @@ -1,2346 +0,0 @@ -package idn - -const ( - propertyUnknown property = iota // unknown character property - propertyPVALID // allowed to be used in IDNs - propertyCONTEXTJ // invisible or problematic characters (join controls) - propertyCONTEXTO // invisible or problematic characters (others) - propertyDISALLOWED // should not be included in IDNs - propertyUNASSIGNED // code points that are not designated in the Unicode Standard -) - -// property stores the property of a code point, as described in RFC 5892, -// section 1 -type property int - -// codePoints list all code points in Unicode Character Database (UCD) Format -// according to RFC 5892, appendix B.1. Thanks to libidn2 (GNU) - -// http://www.gnu.org/software/libidn/libidn2/ -var codePoints = []struct { - start rune - end rune - state property -}{ - {0x0000, 0x002C, propertyDISALLOWED}, // ..COMMA - {0x002D, 0x0, propertyPVALID}, // HYPHEN-MINUS - {0x002E, 0x002F, propertyDISALLOWED}, // FULL STOP..SOLIDUS - {0x0030, 0x0039, propertyPVALID}, // DIGIT ZERO..DIGIT NINE - {0x003A, 0x0060, propertyDISALLOWED}, // COLON..GRAVE ACCENT - {0x0041, 0x005A, propertyPVALID}, // LATIN CAPITAL LETTER A..LATIN CAPITAL LETTER Z - {0x0061, 0x007A, propertyPVALID}, // LATIN SMALL LETTER A..LATIN SMALL LETTER Z - {0x007B, 0x00B6, propertyDISALLOWED}, // LEFT CURLY BRACKET..PILCROW SIGN - {0x00B7, 0x0, propertyCONTEXTO}, // MIDDLE DOT - {0x00B8, 0x00DE, propertyDISALLOWED}, // CEDILLA..LATIN CAPITAL LETTER THORN - {0x00DF, 0x00F6, propertyPVALID}, // LATIN SMALL LETTER SHARP S..LATIN SMALL LETT - {0x00F7, 0x0, propertyDISALLOWED}, // DIVISION SIGN - {0x00F8, 0x00FF, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE..LATIN SMAL - {0x0100, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH MACRON - {0x0101, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH MACRON - {0x0102, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE - {0x0103, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE - {0x0104, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH OGONEK - {0x0105, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH OGONEK - {0x0106, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH ACUTE - {0x0107, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH ACUTE - {0x0108, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CIRCUMFLEX - {0x0109, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CIRCUMFLEX - {0x010A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH DOT ABOVE - {0x010B, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH DOT ABOVE - {0x010C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CARON - {0x010D, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CARON - {0x010E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CARON - {0x010F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CARON - {0x0110, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH STROKE - {0x0111, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH STROKE - {0x0112, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON - {0x0113, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON - {0x0114, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH BREVE - {0x0115, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH BREVE - {0x0116, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT ABOVE - {0x0117, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT ABOVE - {0x0118, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH OGONEK - {0x0119, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH OGONEK - {0x011A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CARON - {0x011B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CARON - {0x011C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CIRCUMFLEX - {0x011D, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CIRCUMFLEX - {0x011E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH BREVE - {0x011F, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH BREVE - {0x0120, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH DOT ABOVE - {0x0121, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH DOT ABOVE - {0x0122, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CEDILLA - {0x0123, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CEDILLA - {0x0124, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CIRCUMFLEX - {0x0125, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CIRCUMFLEX - {0x0126, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH STROKE - {0x0127, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH STROKE - {0x0128, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE - {0x0129, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE - {0x012A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH MACRON - {0x012B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH MACRON - {0x012C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH BREVE - {0x012D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH BREVE - {0x012E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH OGONEK - {0x012F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH OGONEK - {0x0130, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT ABOVE - {0x0131, 0x0, propertyPVALID}, // LATIN SMALL LETTER DOTLESS I - {0x0132, 0x0134, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE IJ..LATIN CAPITAL LET - {0x0135, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH CIRCUMFLEX - {0x0136, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CEDILLA - {0x0137, 0x0138, propertyPVALID}, // LATIN SMALL LETTER K WITH CEDILLA..LATIN SMA - {0x0139, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH ACUTE - {0x013A, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH ACUTE - {0x013B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CEDILLA - {0x013C, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CEDILLA - {0x013D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CARON - {0x013E, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CARON - {0x013F, 0x0141, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE DOT..LATI - {0x0142, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH STROKE - {0x0143, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH ACUTE - {0x0144, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH ACUTE - {0x0145, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CEDILLA - {0x0146, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CEDILLA - {0x0147, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CARON - {0x0148, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CARON - {0x0149, 0x014A, propertyDISALLOWED}, // LATIN SMALL LETTER N PRECEDED BY APOSTROPHE. - {0x014B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ENG - {0x014C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON - {0x014D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON - {0x014E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH BREVE - {0x014F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH BREVE - {0x0150, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE ACUTE - {0x0151, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE ACUTE - {0x0152, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE OE - {0x0153, 0x0, propertyPVALID}, // LATIN SMALL LIGATURE OE - {0x0154, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH ACUTE - {0x0155, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH ACUTE - {0x0156, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CEDILLA - {0x0157, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CEDILLA - {0x0158, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CARON - {0x0159, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CARON - {0x015A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE - {0x015B, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE - {0x015C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CIRCUMFLEX - {0x015D, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CIRCUMFLEX - {0x015E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CEDILLA - {0x015F, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CEDILLA - {0x0160, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON - {0x0161, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON - {0x0162, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CEDILLA - {0x0163, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CEDILLA - {0x0164, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CARON - {0x0165, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CARON - {0x0166, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH STROKE - {0x0167, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH STROKE - {0x0168, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE - {0x0169, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE - {0x016A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON - {0x016B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON - {0x016C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH BREVE - {0x016D, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH BREVE - {0x016E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH RING ABOVE - {0x016F, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH RING ABOVE - {0x0170, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE ACUTE - {0x0171, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE ACUTE - {0x0172, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH OGONEK - {0x0173, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH OGONEK - {0x0174, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH CIRCUMFLEX - {0x0175, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH CIRCUMFLEX - {0x0176, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH CIRCUMFLEX - {0x0177, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH CIRCUMFLEX - {0x0178, 0x0179, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DIAERESIS..LATIN - {0x017A, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH ACUTE - {0x017B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT ABOVE - {0x017C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT ABOVE - {0x017D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CARON - {0x017E, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CARON - {0x017F, 0x0, propertyDISALLOWED}, // LATIN SMALL LETTER LONG S - {0x0180, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH STROKE - {0x0181, 0x0182, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH HOOK..LATIN CAPI - {0x0183, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH TOPBAR - {0x0184, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE SIX - {0x0185, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE SIX - {0x0186, 0x0187, propertyDISALLOWED}, // LATIN CAPITAL LETTER OPEN O..LATIN CAPITAL L - {0x0188, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH HOOK - {0x0189, 0x018B, propertyDISALLOWED}, // LATIN CAPITAL LETTER AFRICAN D..LATIN CAPITA - {0x018C, 0x018D, propertyPVALID}, // LATIN SMALL LETTER D WITH TOPBAR..LATIN SMAL - {0x018E, 0x0191, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED E..LATIN CAPIT - {0x0192, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH HOOK - {0x0193, 0x0194, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH HOOK..LATIN CAPI - {0x0195, 0x0, propertyPVALID}, // LATIN SMALL LETTER HV - {0x0196, 0x0198, propertyDISALLOWED}, // LATIN CAPITAL LETTER IOTA..LATIN CAPITAL LET - {0x0199, 0x019B, propertyPVALID}, // LATIN SMALL LETTER K WITH HOOK..LATIN SMALL - {0x019C, 0x019D, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED M..LATIN CAPITAL - {0x019E, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LONG RIGHT LEG - {0x019F, 0x01A0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MIDDLE TILDE..LA - {0x01A1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN - {0x01A2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OI - {0x01A3, 0x0, propertyPVALID}, // LATIN SMALL LETTER OI - {0x01A4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH HOOK - {0x01A5, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH HOOK - {0x01A6, 0x01A7, propertyDISALLOWED}, // LATIN LETTER YR..LATIN CAPITAL LETTER TONE T - {0x01A8, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE TWO - {0x01A9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ESH - {0x01AA, 0x01AB, propertyPVALID}, // LATIN LETTER REVERSED ESH LOOP..LATIN SMALL - {0x01AC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH HOOK - {0x01AD, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH HOOK - {0x01AE, 0x01AF, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH RETROFLEX HOOK.. - {0x01B0, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN - {0x01B1, 0x01B3, propertyDISALLOWED}, // LATIN CAPITAL LETTER UPSILON..LATIN CAPITAL - {0x01B4, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK - {0x01B5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH STROKE - {0x01B6, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH STROKE - {0x01B7, 0x01B8, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH..LATIN CAPITAL LETT - {0x01B9, 0x01BB, propertyPVALID}, // LATIN SMALL LETTER EZH REVERSED..LATIN LETTE - {0x01BC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE FIVE - {0x01BD, 0x01C3, propertyPVALID}, // LATIN SMALL LETTER TONE FIVE..LATIN LETTER R - {0x01C4, 0x01CD, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ WITH CARON..LATIN CA - {0x01CE, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CARON - {0x01CF, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH CARON - {0x01D0, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH CARON - {0x01D1, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CARON - {0x01D2, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CARON - {0x01D3, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CARON - {0x01D4, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CARON - {0x01D5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND MA - {0x01D6, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND MACR - {0x01D7, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND AC - {0x01D8, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND ACUT - {0x01D9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND CA - {0x01DA, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND CARO - {0x01DB, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND GR - {0x01DC, 0x01DD, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND GRAV - {0x01DE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DIAERESIS AND MA - {0x01DF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DIAERESIS AND MACR - {0x01E0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE AND MA - {0x01E1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE AND MACR - {0x01E2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH MACRON - {0x01E3, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH MACRON - {0x01E4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH STROKE - {0x01E5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH STROKE - {0x01E6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CARON - {0x01E7, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CARON - {0x01E8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CARON - {0x01E9, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH CARON - {0x01EA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK - {0x01EB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK - {0x01EC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK AND MACRO - {0x01ED, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK AND MACRON - {0x01EE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH WITH CARON - {0x01EF, 0x01F0, propertyPVALID}, // LATIN SMALL LETTER EZH WITH CARON..LATIN SMA - {0x01F1, 0x01F4, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ..LATIN CAPITAL LETTE - {0x01F5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH ACUTE - {0x01F6, 0x01F8, propertyDISALLOWED}, // LATIN CAPITAL LETTER HWAIR..LATIN CAPITAL LE - {0x01F9, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH GRAVE - {0x01FA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING ABOVE AND A - {0x01FB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING ABOVE AND ACU - {0x01FC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH ACUTE - {0x01FD, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH ACUTE - {0x01FE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH STROKE AND ACUTE - {0x01FF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE AND ACUTE - {0x0200, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOUBLE GRAVE - {0x0201, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOUBLE GRAVE - {0x0202, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH INVERTED BREVE - {0x0203, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH INVERTED BREVE - {0x0204, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOUBLE GRAVE - {0x0205, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOUBLE GRAVE - {0x0206, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH INVERTED BREVE - {0x0207, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH INVERTED BREVE - {0x0208, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOUBLE GRAVE - {0x0209, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOUBLE GRAVE - {0x020A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH INVERTED BREVE - {0x020B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH INVERTED BREVE - {0x020C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE GRAVE - {0x020D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE GRAVE - {0x020E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH INVERTED BREVE - {0x020F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH INVERTED BREVE - {0x0210, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOUBLE GRAVE - {0x0211, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOUBLE GRAVE - {0x0212, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH INVERTED BREVE - {0x0213, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH INVERTED BREVE - {0x0214, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE GRAVE - {0x0215, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE GRAVE - {0x0216, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH INVERTED BREVE - {0x0217, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH INVERTED BREVE - {0x0218, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH COMMA BELOW - {0x0219, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH COMMA BELOW - {0x021A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH COMMA BELOW - {0x021B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH COMMA BELOW - {0x021C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER YOGH - {0x021D, 0x0, propertyPVALID}, // LATIN SMALL LETTER YOGH - {0x021E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CARON - {0x021F, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CARON - {0x0220, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LONG RIGHT LEG - {0x0221, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CURL - {0x0222, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OU - {0x0223, 0x0, propertyPVALID}, // LATIN SMALL LETTER OU - {0x0224, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH HOOK - {0x0225, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH HOOK - {0x0226, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE - {0x0227, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE - {0x0228, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA - {0x0229, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA - {0x022A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DIAERESIS AND MA - {0x022B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DIAERESIS AND MACR - {0x022C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND MACRON - {0x022D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND MACRON - {0x022E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE - {0x022F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE - {0x0230, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE AND MA - {0x0231, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE AND MACR - {0x0232, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH MACRON - {0x0233, 0x0239, propertyPVALID}, // LATIN SMALL LETTER Y WITH MACRON..LATIN SMAL - {0x023A, 0x023B, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH STROKE..LATIN CA - {0x023C, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH STROKE - {0x023D, 0x023E, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH BAR..LATIN CAPIT - {0x023F, 0x0240, propertyPVALID}, // LATIN SMALL LETTER S WITH SWASH TAIL..LATIN - {0x0241, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER GLOTTAL STOP - {0x0242, 0x0, propertyPVALID}, // LATIN SMALL LETTER GLOTTAL STOP - {0x0243, 0x0246, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH STROKE..LATIN CA - {0x0247, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH STROKE - {0x0248, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER J WITH STROKE - {0x0249, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH STROKE - {0x024A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL - {0x024B, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH HOOK TAIL - {0x024C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH STROKE - {0x024D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH STROKE - {0x024E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH STROKE - {0x024F, 0x02AF, propertyPVALID}, // LATIN SMALL LETTER Y WITH STROKE..LATIN SMAL - {0x02B0, 0x02B8, propertyDISALLOWED}, // MODIFIER LETTER SMALL H..MODIFIER LETTER SMA - {0x02B9, 0x02C1, propertyPVALID}, // MODIFIER LETTER PRIME..MODIFIER LETTER REVER - {0x02C2, 0x02C5, propertyDISALLOWED}, // MODIFIER LETTER LEFT ARROWHEAD..MODIFIER LET - {0x02C6, 0x02D1, propertyPVALID}, // MODIFIER LETTER CIRCUMFLEX ACCENT..MODIFIER - {0x02D2, 0x02EB, propertyDISALLOWED}, // MODIFIER LETTER CENTRED RIGHT HALF RING..MOD - {0x02EC, 0x0, propertyPVALID}, // MODIFIER LETTER VOICING - {0x02ED, 0x0, propertyDISALLOWED}, // MODIFIER LETTER UNASPIRATED - {0x02EE, 0x0, propertyPVALID}, // MODIFIER LETTER DOUBLE APOSTROPHE - {0x02EF, 0x02FF, propertyDISALLOWED}, // MODIFIER LETTER LOW DOWN ARROWHEAD..MODIFIER - {0x0300, 0x033F, propertyPVALID}, // COMBINING GRAVE ACCENT..COMBINING DOUBLE OVE - {0x0340, 0x0341, propertyDISALLOWED}, // COMBINING GRAVE TONE MARK..COMBINING ACUTE T - {0x0342, 0x0, propertyPVALID}, // COMBINING GREEK PERISPOMENI - {0x0343, 0x0345, propertyDISALLOWED}, // COMBINING GREEK KORONIS..COMBINING GREEK YPO - {0x0346, 0x034E, propertyPVALID}, // COMBINING BRIDGE ABOVE..COMBINING UPWARDS AR - {0x034F, 0x0, propertyDISALLOWED}, // COMBINING GRAPHEME JOINER - {0x0350, 0x036F, propertyPVALID}, // COMBINING RIGHT ARROWHEAD ABOVE..COMBINING L - {0x0370, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER HETA - {0x0371, 0x0, propertyPVALID}, // GREEK SMALL LETTER HETA - {0x0372, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER ARCHAIC SAMPI - {0x0373, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC SAMPI - {0x0374, 0x0, propertyDISALLOWED}, // GREEK NUMERAL SIGN - {0x0375, 0x0, propertyCONTEXTO}, // GREEK LOWER NUMERAL SIGN - {0x0376, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA - {0x0377, 0x0, propertyPVALID}, // GREEK SMALL LETTER PAMPHYLIAN DIGAMMA - {0x0378, 0x0379, propertyUNASSIGNED}, // .. - {0x037A, 0x0, propertyDISALLOWED}, // GREEK YPOGEGRAMMENI - {0x037B, 0x037D, propertyPVALID}, // GREEK SMALL REVERSED LUNATE SIGMA SYMBOL..GR - {0x037E, 0x0, propertyDISALLOWED}, // GREEK QUESTION MARK - {0x037F, 0x0383, propertyUNASSIGNED}, // .. - {0x0384, 0x038A, propertyDISALLOWED}, // GREEK TONOS..GREEK CAPITAL LETTER IOTA WITH - {0x038B, 0x0, propertyUNASSIGNED}, // - {0x038C, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH TONOS - {0x038D, 0x0, propertyUNASSIGNED}, // - {0x038E, 0x038F, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH TONOS..GRE - {0x0390, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND T - {0x0391, 0x03A1, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA..GREEK CAPITAL LE - {0x03A2, 0x0, propertyUNASSIGNED}, // - {0x03A3, 0x03AB, propertyDISALLOWED}, // GREEK CAPITAL LETTER SIGMA..GREEK CAPITAL LE - {0x03AC, 0x03CE, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH TONOS..GREEK S - {0x03CF, 0x03D6, propertyDISALLOWED}, // GREEK CAPITAL KAI SYMBOL..GREEK PI SYMBOL - {0x03D7, 0x0, propertyPVALID}, // GREEK KAI SYMBOL - {0x03D8, 0x0, propertyDISALLOWED}, // GREEK LETTER ARCHAIC KOPPA - {0x03D9, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC KOPPA - {0x03DA, 0x0, propertyDISALLOWED}, // GREEK LETTER STIGMA - {0x03DB, 0x0, propertyPVALID}, // GREEK SMALL LETTER STIGMA - {0x03DC, 0x0, propertyDISALLOWED}, // GREEK LETTER DIGAMMA - {0x03DD, 0x0, propertyPVALID}, // GREEK SMALL LETTER DIGAMMA - {0x03DE, 0x0, propertyDISALLOWED}, // GREEK LETTER KOPPA - {0x03DF, 0x0, propertyPVALID}, // GREEK SMALL LETTER KOPPA - {0x03E0, 0x0, propertyDISALLOWED}, // GREEK LETTER SAMPI - {0x03E1, 0x0, propertyPVALID}, // GREEK SMALL LETTER SAMPI - {0x03E2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHEI - {0x03E3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHEI - {0x03E4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FEI - {0x03E5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FEI - {0x03E6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHEI - {0x03E7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHEI - {0x03E8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HORI - {0x03E9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HORI - {0x03EA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GANGIA - {0x03EB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GANGIA - {0x03EC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHIMA - {0x03ED, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHIMA - {0x03EE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DEI - {0x03EF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DEI - {0x03F0, 0x03F2, propertyDISALLOWED}, // GREEK KAPPA SYMBOL..GREEK LUNATE SIGMA SYMBO - {0x03F3, 0x0, propertyPVALID}, // GREEK LETTER YOT - {0x03F4, 0x03F7, propertyDISALLOWED}, // GREEK CAPITAL THETA SYMBOL..GREEK CAPITAL LE - {0x03F8, 0x0, propertyPVALID}, // GREEK SMALL LETTER SHO - {0x03F9, 0x03FA, propertyDISALLOWED}, // GREEK CAPITAL LUNATE SIGMA SYMBOL..GREEK CAP - {0x03FB, 0x03FC, propertyPVALID}, // GREEK SMALL LETTER SAN..GREEK RHO WITH STROK - {0x03FD, 0x042F, propertyDISALLOWED}, // GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL.. - {0x0430, 0x045F, propertyPVALID}, // CYRILLIC SMALL LETTER A..CYRILLIC SMALL LETT - {0x0460, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA - {0x0461, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA - {0x0462, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAT - {0x0463, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAT - {0x0464, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED E - {0x0465, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED E - {0x0466, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LITTLE YUS - {0x0467, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LITTLE YUS - {0x0468, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS - {0x0469, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED LITTLE YUS - {0x046A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BIG YUS - {0x046B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BIG YUS - {0x046C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS - {0x046D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED BIG YUS - {0x046E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KSI - {0x046F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KSI - {0x0470, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PSI - {0x0471, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PSI - {0x0472, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER FITA - {0x0473, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER FITA - {0x0474, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA - {0x0475, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA - {0x0476, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE - {0x0477, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GR - {0x0478, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER UK - {0x0479, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER UK - {0x047A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ROUND OMEGA - {0x047B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ROUND OMEGA - {0x047C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA WITH TITLO - {0x047D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA WITH TITLO - {0x047E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OT - {0x047F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OT - {0x0480, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOPPA - {0x0481, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOPPA - {0x0482, 0x0, propertyDISALLOWED}, // CYRILLIC THOUSANDS SIGN - {0x0483, 0x0487, propertyPVALID}, // COMBINING CYRILLIC TITLO..COMBINING CYRILLIC - {0x0488, 0x048A, propertyDISALLOWED}, // COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..C - {0x048B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHORT I WITH TAIL - {0x048C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SEMISOFT SIGN - {0x048D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SEMISOFT SIGN - {0x048E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ER WITH TICK - {0x048F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ER WITH TICK - {0x0490, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH UPTURN - {0x0491, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH UPTURN - {0x0492, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE - {0x0493, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE - {0x0494, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK - {0x0495, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH MIDDLE HOOK - {0x0496, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER - {0x0497, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DESCENDER - {0x0498, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DESCENDER - {0x0499, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DESCENDER - {0x049A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH DESCENDER - {0x049B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH DESCENDER - {0x049C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH VERTICAL STR - {0x049D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH VERTICAL STROK - {0x049E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH STROKE - {0x049F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH STROKE - {0x04A0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BASHKIR KA - {0x04A1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BASHKIR KA - {0x04A2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH DESCENDER - {0x04A3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH DESCENDER - {0x04A4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE EN GHE - {0x04A5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE EN GHE - {0x04A6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK - {0x04A7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH MIDDLE HOOK - {0x04A8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN HA - {0x04A9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN HA - {0x04AA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ES WITH DESCENDER - {0x04AB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ES WITH DESCENDER - {0x04AC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH DESCENDER - {0x04AD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH DESCENDER - {0x04AE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U - {0x04AF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U - {0x04B0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U WITH STRO - {0x04B1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE - {0x04B2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH DESCENDER - {0x04B3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH DESCENDER - {0x04B4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE TE TSE - {0x04B5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE TE TSE - {0x04B6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DESCENDER - {0x04B7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DESCENDER - {0x04B8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH VERTICAL ST - {0x04B9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH VERTICAL STRO - {0x04BA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHHA - {0x04BB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHHA - {0x04BC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE - {0x04BD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE - {0x04BE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH D - {0x04BF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE WITH DES - {0x04C0, 0x04C1, propertyDISALLOWED}, // CYRILLIC LETTER PALOCHKA..CYRILLIC CAPITAL L - {0x04C2, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH BREVE - {0x04C3, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH HOOK - {0x04C4, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH HOOK - {0x04C5, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH TAIL - {0x04C6, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH TAIL - {0x04C7, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH HOOK - {0x04C8, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH HOOK - {0x04C9, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH TAIL - {0x04CA, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH TAIL - {0x04CB, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KHAKASSIAN CHE - {0x04CC, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KHAKASSIAN CHE - {0x04CD, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EM WITH TAIL - {0x04CE, 0x04CF, propertyPVALID}, // CYRILLIC SMALL LETTER EM WITH TAIL..CYRILLIC - {0x04D0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH BREVE - {0x04D1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH BREVE - {0x04D2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH DIAERESIS - {0x04D3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH DIAERESIS - {0x04D4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE A IE - {0x04D5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE A IE - {0x04D6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IE WITH BREVE - {0x04D7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IE WITH BREVE - {0x04D8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA - {0x04D9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA - {0x04DA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS - {0x04DB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS - {0x04DC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS - {0x04DD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DIAERESIS - {0x04DE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS - {0x04DF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DIAERESIS - {0x04E0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN DZE - {0x04E1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN DZE - {0x04E2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH MACRON - {0x04E3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH MACRON - {0x04E4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH DIAERESIS - {0x04E5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH DIAERESIS - {0x04E6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER O WITH DIAERESIS - {0x04E7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER O WITH DIAERESIS - {0x04E8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O - {0x04E9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O - {0x04EA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O WITH DIAERE - {0x04EB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O WITH DIAERESI - {0x04EC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER E WITH DIAERESIS - {0x04ED, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER E WITH DIAERESIS - {0x04EE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH MACRON - {0x04EF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH MACRON - {0x04F0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DIAERESIS - {0x04F1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DIAERESIS - {0x04F2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE - {0x04F3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE - {0x04F4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS - {0x04F5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DIAERESIS - {0x04F6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH DESCENDER - {0x04F7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH DESCENDER - {0x04F8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS - {0x04F9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH DIAERESIS - {0x04FA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE AND - {0x04FB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE AND HO - {0x04FC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH HOOK - {0x04FD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH HOOK - {0x04FE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH STROKE - {0x04FF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH STROKE - {0x0500, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DE - {0x0501, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DE - {0x0502, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DJE - {0x0503, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DJE - {0x0504, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI ZJE - {0x0505, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI ZJE - {0x0506, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DZJE - {0x0507, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DZJE - {0x0508, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI LJE - {0x0509, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI LJE - {0x050A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI NJE - {0x050B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI NJE - {0x050C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI SJE - {0x050D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI SJE - {0x050E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI TJE - {0x050F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI TJE - {0x0510, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED ZE - {0x0511, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED ZE - {0x0512, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH HOOK - {0x0513, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH HOOK - {0x0514, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LHA - {0x0515, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LHA - {0x0516, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER RHA - {0x0517, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER RHA - {0x0518, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAE - {0x0519, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAE - {0x051A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER QA - {0x051B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER QA - {0x051C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER WE - {0x051D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER WE - {0x051E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ALEUT KA - {0x051F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ALEUT KA - {0x0520, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK - {0x0521, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK - {0x0522, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK - {0x0523, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK - {0x0524, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH DESCENDER - {0x0525, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH DESCENDER - {0x0526, 0x0530, propertyUNASSIGNED}, // .. - {0x0531, 0x0556, propertyDISALLOWED}, // ARMENIAN CAPITAL LETTER AYB..ARMENIAN CAPITA - {0x0557, 0x0558, propertyUNASSIGNED}, // .. - {0x0559, 0x0, propertyPVALID}, // ARMENIAN MODIFIER LETTER LEFT HALF RING - {0x055A, 0x055F, propertyDISALLOWED}, // ARMENIAN APOSTROPHE..ARMENIAN ABBREVIATION M - {0x0560, 0x0, propertyUNASSIGNED}, // - {0x0561, 0x0586, propertyPVALID}, // ARMENIAN SMALL LETTER AYB..ARMENIAN SMALL LE - {0x0587, 0x0, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE ECH YIWN - {0x0588, 0x0, propertyUNASSIGNED}, // - {0x0589, 0x058A, propertyDISALLOWED}, // ARMENIAN FULL STOP..ARMENIAN HYPHEN - {0x058B, 0x0590, propertyUNASSIGNED}, // .. - {0x0591, 0x05BD, propertyPVALID}, // HEBREW ACCENT ETNAHTA..HEBREW POINT METEG - {0x05BE, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION MAQAF - {0x05BF, 0x0, propertyPVALID}, // HEBREW POINT RAFE - {0x05C0, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION PASEQ - {0x05C1, 0x05C2, propertyPVALID}, // HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT - {0x05C3, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION SOF PASUQ - {0x05C4, 0x05C5, propertyPVALID}, // HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT - {0x05C6, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION NUN HAFUKHA - {0x05C7, 0x0, propertyPVALID}, // HEBREW POINT QAMATS QATAN - {0x05C8, 0x05CF, propertyUNASSIGNED}, // .. - {0x05D0, 0x05EA, propertyPVALID}, // HEBREW LETTER ALEF..HEBREW LETTER TAV - {0x05EB, 0x05EF, propertyUNASSIGNED}, // .. - {0x05F0, 0x05F2, propertyPVALID}, // HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW L - {0x05F3, 0x05F4, propertyCONTEXTO}, // HEBREW PUNCTUATION GERESH..HEBREW PUNCTUATIO - {0x05F5, 0x05FF, propertyUNASSIGNED}, // .. - {0x0600, 0x0603, propertyDISALLOWED}, // ARABIC NUMBER SIGN..ARABIC SIGN SAFHA - {0x0604, 0x0605, propertyUNASSIGNED}, // .. - {0x0606, 0x060F, propertyDISALLOWED}, // ARABIC-INDIC CUBE ROOT..ARABIC SIGN MISRA - {0x0610, 0x061A, propertyPVALID}, // ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..AR - {0x061B, 0x0, propertyDISALLOWED}, // ARABIC SEMICOLON - {0x061C, 0x061D, propertyUNASSIGNED}, // .. - {0x061E, 0x061F, propertyDISALLOWED}, // ARABIC TRIPLE DOT PUNCTUATION MARK..ARABIC Q - {0x0620, 0x0, propertyUNASSIGNED}, // - {0x0621, 0x063F, propertyPVALID}, // ARABIC LETTER HAMZA..ARABIC LETTER FARSI YEH - {0x0640, 0x0, propertyDISALLOWED}, // ARABIC TATWEEL - {0x0641, 0x065E, propertyPVALID}, // ARABIC LETTER FEH..ARABIC FATHA WITH TWO DOT - {0x065F, 0x0, propertyUNASSIGNED}, // - {0x0660, 0x0669, propertyCONTEXTO}, // ARABIC-INDIC DIGIT ZERO..ARABIC-INDIC DIGIT - {0x066A, 0x066D, propertyDISALLOWED}, // ARABIC PERCENT SIGN..ARABIC FIVE POINTED STA - {0x066E, 0x0674, propertyPVALID}, // ARABIC LETTER DOTLESS BEH..ARABIC LETTER HIG - {0x0675, 0x0678, propertyDISALLOWED}, // ARABIC LETTER HIGH HAMZA ALEF..ARABIC LETTER - {0x0679, 0x06D3, propertyPVALID}, // ARABIC LETTER TTEH..ARABIC LETTER YEH BARREE - {0x06D4, 0x0, propertyDISALLOWED}, // ARABIC FULL STOP - {0x06D5, 0x06DC, propertyPVALID}, // ARABIC LETTER AE..ARABIC SMALL HIGH SEEN - {0x06DD, 0x06DE, propertyDISALLOWED}, // ARABIC END OF AYAH..ARABIC START OF RUB EL H - {0x06DF, 0x06E8, propertyPVALID}, // ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL - {0x06E9, 0x0, propertyDISALLOWED}, // ARABIC PLACE OF SAJDAH - {0x06EA, 0x06EF, propertyPVALID}, // ARABIC EMPTY CENTRE LOW STOP..ARABIC LETTER - {0x06F0, 0x06F9, propertyCONTEXTO}, // EXTENDED ARABIC-INDIC DIGIT ZERO..EXTENDED A - {0x06FA, 0x06FF, propertyPVALID}, // ARABIC LETTER SHEEN WITH DOT BELOW..ARABIC L - {0x0700, 0x070D, propertyDISALLOWED}, // SYRIAC END OF PARAGRAPH..SYRIAC HARKLEAN AST - {0x070E, 0x0, propertyUNASSIGNED}, // - {0x070F, 0x0, propertyDISALLOWED}, // SYRIAC ABBREVIATION MARK - {0x0710, 0x074A, propertyPVALID}, // SYRIAC LETTER ALAPH..SYRIAC BARREKH - {0x074B, 0x074C, propertyUNASSIGNED}, // .. - {0x074D, 0x07B1, propertyPVALID}, // SYRIAC LETTER SOGDIAN ZHAIN..THAANA LETTER N - {0x07B2, 0x07BF, propertyUNASSIGNED}, // .. - {0x07C0, 0x07F5, propertyPVALID}, // NKO DIGIT ZERO..NKO LOW TONE APOSTROPHE - {0x07F6, 0x07FA, propertyDISALLOWED}, // NKO SYMBOL OO DENNEN..NKO LAJANYALAN - {0x07FB, 0x07FF, propertyUNASSIGNED}, // .. - {0x0800, 0x082D, propertyPVALID}, // SAMARITAN LETTER ALAF..SAMARITAN MARK NEQUDA - {0x082E, 0x082F, propertyUNASSIGNED}, // .. - {0x0830, 0x083E, propertyDISALLOWED}, // SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUN - {0x083F, 0x08FF, propertyUNASSIGNED}, // .. - {0x0900, 0x0939, propertyPVALID}, // DEVANAGARI SIGN INVERTED CANDRABINDU..DEVANA - {0x093A, 0x093B, propertyUNASSIGNED}, // .. - {0x093C, 0x094E, propertyPVALID}, // DEVANAGARI SIGN NUKTA..DEVANAGARI VOWEL SIGN - {0x094F, 0x0, propertyUNASSIGNED}, // - {0x0950, 0x0955, propertyPVALID}, // DEVANAGARI OM..DEVANAGARI VOWEL SIGN CANDRA - {0x0956, 0x0957, propertyUNASSIGNED}, // .. - {0x0958, 0x095F, propertyDISALLOWED}, // DEVANAGARI LETTER QA..DEVANAGARI LETTER YYA - {0x0960, 0x0963, propertyPVALID}, // DEVANAGARI LETTER VOCALIC RR..DEVANAGARI VOW - {0x0964, 0x0965, propertyDISALLOWED}, // DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA - {0x0966, 0x096F, propertyPVALID}, // DEVANAGARI DIGIT ZERO..DEVANAGARI DIGIT NINE - {0x0970, 0x0, propertyDISALLOWED}, // DEVANAGARI ABBREVIATION SIGN - {0x0971, 0x0972, propertyPVALID}, // DEVANAGARI SIGN HIGH SPACING DOT..DEVANAGARI - {0x0973, 0x0978, propertyUNASSIGNED}, // .. - {0x0979, 0x097F, propertyPVALID}, // DEVANAGARI LETTER ZHA..DEVANAGARI LETTER BBA - {0x0980, 0x0, propertyUNASSIGNED}, // - {0x0981, 0x0983, propertyPVALID}, // BENGALI SIGN CANDRABINDU..BENGALI SIGN VISAR - {0x0984, 0x0, propertyUNASSIGNED}, // - {0x0985, 0x098C, propertyPVALID}, // BENGALI LETTER A..BENGALI LETTER VOCALIC L - {0x098D, 0x098E, propertyUNASSIGNED}, // .. - {0x098F, 0x0990, propertyPVALID}, // BENGALI LETTER E..BENGALI LETTER AI - {0x0991, 0x0992, propertyUNASSIGNED}, // .. - {0x0993, 0x09A8, propertyPVALID}, // BENGALI LETTER O..BENGALI LETTER NA - {0x09A9, 0x0, propertyUNASSIGNED}, // - {0x09AA, 0x09B0, propertyPVALID}, // BENGALI LETTER PA..BENGALI LETTER RA - {0x09B1, 0x0, propertyUNASSIGNED}, // - {0x09B2, 0x0, propertyPVALID}, // BENGALI LETTER LA - {0x09B3, 0x09B5, propertyUNASSIGNED}, // .. - {0x09B6, 0x09B9, propertyPVALID}, // BENGALI LETTER SHA..BENGALI LETTER HA - {0x09BA, 0x09BB, propertyUNASSIGNED}, // .. - {0x09BC, 0x09C4, propertyPVALID}, // BENGALI SIGN NUKTA..BENGALI VOWEL SIGN VOCAL - {0x09C5, 0x09C6, propertyUNASSIGNED}, // .. - {0x09C7, 0x09C8, propertyPVALID}, // BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI - {0x09C9, 0x09CA, propertyUNASSIGNED}, // .. - {0x09CB, 0x09CE, propertyPVALID}, // BENGALI VOWEL SIGN O..BENGALI LETTER KHANDA - {0x09CF, 0x09D6, propertyUNASSIGNED}, // .. - {0x09D7, 0x0, propertyPVALID}, // BENGALI AU LENGTH MARK - {0x09D8, 0x09DB, propertyUNASSIGNED}, // .. - {0x09DC, 0x09DD, propertyDISALLOWED}, // BENGALI LETTER RRA..BENGALI LETTER RHA - {0x09DE, 0x0, propertyUNASSIGNED}, // - {0x09DF, 0x0, propertyDISALLOWED}, // BENGALI LETTER YYA - {0x09E0, 0x09E3, propertyPVALID}, // BENGALI LETTER VOCALIC RR..BENGALI VOWEL SIG - {0x09E4, 0x09E5, propertyUNASSIGNED}, // .. - {0x09E6, 0x09F1, propertyPVALID}, // BENGALI DIGIT ZERO..BENGALI LETTER RA WITH L - {0x09F2, 0x09FB, propertyDISALLOWED}, // BENGALI RUPEE MARK..BENGALI GANDA MARK - {0x09FC, 0x0A00, propertyUNASSIGNED}, // .. - {0x0A01, 0x0A03, propertyPVALID}, // GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN VISA - {0x0A04, 0x0, propertyUNASSIGNED}, // - {0x0A05, 0x0A0A, propertyPVALID}, // GURMUKHI LETTER A..GURMUKHI LETTER UU - {0x0A0B, 0x0A0E, propertyUNASSIGNED}, // .. - {0x0A0F, 0x0A10, propertyPVALID}, // GURMUKHI LETTER EE..GURMUKHI LETTER AI - {0x0A11, 0x0A12, propertyUNASSIGNED}, // .. - {0x0A13, 0x0A28, propertyPVALID}, // GURMUKHI LETTER OO..GURMUKHI LETTER NA - {0x0A29, 0x0, propertyUNASSIGNED}, // - {0x0A2A, 0x0A30, propertyPVALID}, // GURMUKHI LETTER PA..GURMUKHI LETTER RA - {0x0A31, 0x0, propertyUNASSIGNED}, // - {0x0A32, 0x0, propertyPVALID}, // GURMUKHI LETTER LA - {0x0A33, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER LLA - {0x0A34, 0x0, propertyUNASSIGNED}, // - {0x0A35, 0x0, propertyPVALID}, // GURMUKHI LETTER VA - {0x0A36, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER SHA - {0x0A37, 0x0, propertyUNASSIGNED}, // - {0x0A38, 0x0A39, propertyPVALID}, // GURMUKHI LETTER SA..GURMUKHI LETTER HA - {0x0A3A, 0x0A3B, propertyUNASSIGNED}, // .. - {0x0A3C, 0x0, propertyPVALID}, // GURMUKHI SIGN NUKTA - {0x0A3D, 0x0, propertyUNASSIGNED}, // - {0x0A3E, 0x0A42, propertyPVALID}, // GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN - {0x0A43, 0x0A46, propertyUNASSIGNED}, // .. - {0x0A47, 0x0A48, propertyPVALID}, // GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN - {0x0A49, 0x0A4A, propertyUNASSIGNED}, // .. - {0x0A4B, 0x0A4D, propertyPVALID}, // GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA - {0x0A4E, 0x0A50, propertyUNASSIGNED}, // .. - {0x0A51, 0x0, propertyPVALID}, // GURMUKHI SIGN UDAAT - {0x0A52, 0x0A58, propertyUNASSIGNED}, // .. - {0x0A59, 0x0A5B, propertyDISALLOWED}, // GURMUKHI LETTER KHHA..GURMUKHI LETTER ZA - {0x0A5C, 0x0, propertyPVALID}, // GURMUKHI LETTER RRA - {0x0A5D, 0x0, propertyUNASSIGNED}, // - {0x0A5E, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER FA - {0x0A5F, 0x0A65, propertyUNASSIGNED}, // .. - {0x0A66, 0x0A75, propertyPVALID}, // GURMUKHI DIGIT ZERO..GURMUKHI SIGN YAKASH - {0x0A76, 0x0A80, propertyUNASSIGNED}, // .. - {0x0A81, 0x0A83, propertyPVALID}, // GUJARATI SIGN CANDRABINDU..GUJARATI SIGN VIS - {0x0A84, 0x0, propertyUNASSIGNED}, // - {0x0A85, 0x0A8D, propertyPVALID}, // GUJARATI LETTER A..GUJARATI VOWEL CANDRA E - {0x0A8E, 0x0, propertyUNASSIGNED}, // - {0x0A8F, 0x0A91, propertyPVALID}, // GUJARATI LETTER E..GUJARATI VOWEL CANDRA O - {0x0A92, 0x0, propertyUNASSIGNED}, // - {0x0A93, 0x0AA8, propertyPVALID}, // GUJARATI LETTER O..GUJARATI LETTER NA - {0x0AA9, 0x0, propertyUNASSIGNED}, // - {0x0AAA, 0x0AB0, propertyPVALID}, // GUJARATI LETTER PA..GUJARATI LETTER RA - {0x0AB1, 0x0, propertyUNASSIGNED}, // - {0x0AB2, 0x0AB3, propertyPVALID}, // GUJARATI LETTER LA..GUJARATI LETTER LLA - {0x0AB4, 0x0, propertyUNASSIGNED}, // - {0x0AB5, 0x0AB9, propertyPVALID}, // GUJARATI LETTER VA..GUJARATI LETTER HA - {0x0ABA, 0x0ABB, propertyUNASSIGNED}, // .. - {0x0ABC, 0x0AC5, propertyPVALID}, // GUJARATI SIGN NUKTA..GUJARATI VOWEL SIGN CAN - {0x0AC6, 0x0, propertyUNASSIGNED}, // - {0x0AC7, 0x0AC9, propertyPVALID}, // GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN C - {0x0ACA, 0x0, propertyUNASSIGNED}, // - {0x0ACB, 0x0ACD, propertyPVALID}, // GUJARATI VOWEL SIGN O..GUJARATI SIGN VIRAMA - {0x0ACE, 0x0ACF, propertyUNASSIGNED}, // .. - {0x0AD0, 0x0, propertyPVALID}, // GUJARATI OM - {0x0AD1, 0x0ADF, propertyUNASSIGNED}, // .. - {0x0AE0, 0x0AE3, propertyPVALID}, // GUJARATI LETTER VOCALIC RR..GUJARATI VOWEL S - {0x0AE4, 0x0AE5, propertyUNASSIGNED}, // .. - {0x0AE6, 0x0AEF, propertyPVALID}, // GUJARATI DIGIT ZERO..GUJARATI DIGIT NINE - {0x0AF0, 0x0, propertyUNASSIGNED}, // - {0x0AF1, 0x0, propertyDISALLOWED}, // GUJARATI RUPEE SIGN - {0x0AF2, 0x0B00, propertyUNASSIGNED}, // .. - {0x0B01, 0x0B03, propertyPVALID}, // ORIYA SIGN CANDRABINDU..ORIYA SIGN VISARGA - {0x0B04, 0x0, propertyUNASSIGNED}, // - {0x0B05, 0x0B0C, propertyPVALID}, // ORIYA LETTER A..ORIYA LETTER VOCALIC L - {0x0B0D, 0x0B0E, propertyUNASSIGNED}, // .. - {0x0B0F, 0x0B10, propertyPVALID}, // ORIYA LETTER E..ORIYA LETTER AI - {0x0B11, 0x0B12, propertyUNASSIGNED}, // .. - {0x0B13, 0x0B28, propertyPVALID}, // ORIYA LETTER O..ORIYA LETTER NA - {0x0B29, 0x0, propertyUNASSIGNED}, // - {0x0B2A, 0x0B30, propertyPVALID}, // ORIYA LETTER PA..ORIYA LETTER RA - {0x0B31, 0x0, propertyUNASSIGNED}, // - {0x0B32, 0x0B33, propertyPVALID}, // ORIYA LETTER LA..ORIYA LETTER LLA - {0x0B34, 0x0, propertyUNASSIGNED}, // - {0x0B35, 0x0B39, propertyPVALID}, // ORIYA LETTER VA..ORIYA LETTER HA - {0x0B3A, 0x0B3B, propertyUNASSIGNED}, // .. - {0x0B3C, 0x0B44, propertyPVALID}, // ORIYA SIGN NUKTA..ORIYA VOWEL SIGN VOCALIC R - {0x0B45, 0x0B46, propertyUNASSIGNED}, // .. - {0x0B47, 0x0B48, propertyPVALID}, // ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI - {0x0B49, 0x0B4A, propertyUNASSIGNED}, // .. - {0x0B4B, 0x0B4D, propertyPVALID}, // ORIYA VOWEL SIGN O..ORIYA SIGN VIRAMA - {0x0B4E, 0x0B55, propertyUNASSIGNED}, // .. - {0x0B56, 0x0B57, propertyPVALID}, // ORIYA AI LENGTH MARK..ORIYA AU LENGTH MARK - {0x0B58, 0x0B5B, propertyUNASSIGNED}, // .. - {0x0B5C, 0x0B5D, propertyDISALLOWED}, // ORIYA LETTER RRA..ORIYA LETTER RHA - {0x0B5E, 0x0, propertyUNASSIGNED}, // - {0x0B5F, 0x0B63, propertyPVALID}, // ORIYA LETTER YYA..ORIYA VOWEL SIGN VOCALIC L - {0x0B64, 0x0B65, propertyUNASSIGNED}, // .. - {0x0B66, 0x0B6F, propertyPVALID}, // ORIYA DIGIT ZERO..ORIYA DIGIT NINE - {0x0B70, 0x0, propertyDISALLOWED}, // ORIYA ISSHAR - {0x0B71, 0x0, propertyPVALID}, // ORIYA LETTER WA - {0x0B72, 0x0B81, propertyUNASSIGNED}, // .. - {0x0B82, 0x0B83, propertyPVALID}, // TAMIL SIGN ANUSVARA..TAMIL SIGN VISARGA - {0x0B84, 0x0, propertyUNASSIGNED}, // - {0x0B85, 0x0B8A, propertyPVALID}, // TAMIL LETTER A..TAMIL LETTER UU - {0x0B8B, 0x0B8D, propertyUNASSIGNED}, // .. - {0x0B8E, 0x0B90, propertyPVALID}, // TAMIL LETTER E..TAMIL LETTER AI - {0x0B91, 0x0, propertyUNASSIGNED}, // - {0x0B92, 0x0B95, propertyPVALID}, // TAMIL LETTER O..TAMIL LETTER KA - {0x0B96, 0x0B98, propertyUNASSIGNED}, // .. - {0x0B99, 0x0B9A, propertyPVALID}, // TAMIL LETTER NGA..TAMIL LETTER CA - {0x0B9B, 0x0, propertyUNASSIGNED}, // - {0x0B9C, 0x0, propertyPVALID}, // TAMIL LETTER JA - {0x0B9D, 0x0, propertyUNASSIGNED}, // - {0x0B9E, 0x0B9F, propertyPVALID}, // TAMIL LETTER NYA..TAMIL LETTER TTA - {0x0BA0, 0x0BA2, propertyUNASSIGNED}, // .. - {0x0BA3, 0x0BA4, propertyPVALID}, // TAMIL LETTER NNA..TAMIL LETTER TA - {0x0BA5, 0x0BA7, propertyUNASSIGNED}, // .. - {0x0BA8, 0x0BAA, propertyPVALID}, // TAMIL LETTER NA..TAMIL LETTER PA - {0x0BAB, 0x0BAD, propertyUNASSIGNED}, // .. - {0x0BAE, 0x0BB9, propertyPVALID}, // TAMIL LETTER MA..TAMIL LETTER HA - {0x0BBA, 0x0BBD, propertyUNASSIGNED}, // .. - {0x0BBE, 0x0BC2, propertyPVALID}, // TAMIL VOWEL SIGN AA..TAMIL VOWEL SIGN UU - {0x0BC3, 0x0BC5, propertyUNASSIGNED}, // .. - {0x0BC6, 0x0BC8, propertyPVALID}, // TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI - {0x0BC9, 0x0, propertyUNASSIGNED}, // - {0x0BCA, 0x0BCD, propertyPVALID}, // TAMIL VOWEL SIGN O..TAMIL SIGN VIRAMA - {0x0BCE, 0x0BCF, propertyUNASSIGNED}, // .. - {0x0BD0, 0x0, propertyPVALID}, // TAMIL OM - {0x0BD1, 0x0BD6, propertyUNASSIGNED}, // .. - {0x0BD7, 0x0, propertyPVALID}, // TAMIL AU LENGTH MARK - {0x0BD8, 0x0BE5, propertyUNASSIGNED}, // .. - {0x0BE6, 0x0BEF, propertyPVALID}, // TAMIL DIGIT ZERO..TAMIL DIGIT NINE - {0x0BF0, 0x0BFA, propertyDISALLOWED}, // TAMIL NUMBER TEN..TAMIL NUMBER SIGN - {0x0BFB, 0x0C00, propertyUNASSIGNED}, // .. - {0x0C01, 0x0C03, propertyPVALID}, // TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA - {0x0C04, 0x0, propertyUNASSIGNED}, // - {0x0C05, 0x0C0C, propertyPVALID}, // TELUGU LETTER A..TELUGU LETTER VOCALIC L - {0x0C0D, 0x0, propertyUNASSIGNED}, // - {0x0C0E, 0x0C10, propertyPVALID}, // TELUGU LETTER E..TELUGU LETTER AI - {0x0C11, 0x0, propertyUNASSIGNED}, // - {0x0C12, 0x0C28, propertyPVALID}, // TELUGU LETTER O..TELUGU LETTER NA - {0x0C29, 0x0, propertyUNASSIGNED}, // - {0x0C2A, 0x0C33, propertyPVALID}, // TELUGU LETTER PA..TELUGU LETTER LLA - {0x0C34, 0x0, propertyUNASSIGNED}, // - {0x0C35, 0x0C39, propertyPVALID}, // TELUGU LETTER VA..TELUGU LETTER HA - {0x0C3A, 0x0C3C, propertyUNASSIGNED}, // .. - {0x0C3D, 0x0C44, propertyPVALID}, // TELUGU SIGN AVAGRAHA..TELUGU VOWEL SIGN VOCA - {0x0C45, 0x0, propertyUNASSIGNED}, // - {0x0C46, 0x0C48, propertyPVALID}, // TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI - {0x0C49, 0x0, propertyUNASSIGNED}, // - {0x0C4A, 0x0C4D, propertyPVALID}, // TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA - {0x0C4E, 0x0C54, propertyUNASSIGNED}, // .. - {0x0C55, 0x0C56, propertyPVALID}, // TELUGU LENGTH MARK..TELUGU AI LENGTH MARK - {0x0C57, 0x0, propertyUNASSIGNED}, // - {0x0C58, 0x0C59, propertyPVALID}, // TELUGU LETTER TSA..TELUGU LETTER DZA - {0x0C5A, 0x0C5F, propertyUNASSIGNED}, // .. - {0x0C60, 0x0C63, propertyPVALID}, // TELUGU LETTER VOCALIC RR..TELUGU VOWEL SIGN - {0x0C64, 0x0C65, propertyUNASSIGNED}, // .. - {0x0C66, 0x0C6F, propertyPVALID}, // TELUGU DIGIT ZERO..TELUGU DIGIT NINE - {0x0C70, 0x0C77, propertyUNASSIGNED}, // .. - {0x0C78, 0x0C7F, propertyDISALLOWED}, // TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF - {0x0C80, 0x0C81, propertyUNASSIGNED}, // .. - {0x0C82, 0x0C83, propertyPVALID}, // KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA - {0x0C84, 0x0, propertyUNASSIGNED}, // - {0x0C85, 0x0C8C, propertyPVALID}, // KANNADA LETTER A..KANNADA LETTER VOCALIC L - {0x0C8D, 0x0, propertyUNASSIGNED}, // - {0x0C8E, 0x0C90, propertyPVALID}, // KANNADA LETTER E..KANNADA LETTER AI - {0x0C91, 0x0, propertyUNASSIGNED}, // - {0x0C92, 0x0CA8, propertyPVALID}, // KANNADA LETTER O..KANNADA LETTER NA - {0x0CA9, 0x0, propertyUNASSIGNED}, // - {0x0CAA, 0x0CB3, propertyPVALID}, // KANNADA LETTER PA..KANNADA LETTER LLA - {0x0CB4, 0x0, propertyUNASSIGNED}, // - {0x0CB5, 0x0CB9, propertyPVALID}, // KANNADA LETTER VA..KANNADA LETTER HA - {0x0CBA, 0x0CBB, propertyUNASSIGNED}, // .. - {0x0CBC, 0x0CC4, propertyPVALID}, // KANNADA SIGN NUKTA..KANNADA VOWEL SIGN VOCAL - {0x0CC5, 0x0, propertyUNASSIGNED}, // - {0x0CC6, 0x0CC8, propertyPVALID}, // KANNADA VOWEL SIGN E..KANNADA VOWEL SIGN AI - {0x0CC9, 0x0, propertyUNASSIGNED}, // - {0x0CCA, 0x0CCD, propertyPVALID}, // KANNADA VOWEL SIGN O..KANNADA SIGN VIRAMA - {0x0CCE, 0x0CD4, propertyUNASSIGNED}, // .. - {0x0CD5, 0x0CD6, propertyPVALID}, // KANNADA LENGTH MARK..KANNADA AI LENGTH MARK - {0x0CD7, 0x0CDD, propertyUNASSIGNED}, // .. - {0x0CDE, 0x0, propertyPVALID}, // KANNADA LETTER FA - {0x0CDF, 0x0, propertyUNASSIGNED}, // - {0x0CE0, 0x0CE3, propertyPVALID}, // KANNADA LETTER VOCALIC RR..KANNADA VOWEL SIG - {0x0CE4, 0x0CE5, propertyUNASSIGNED}, // .. - {0x0CE6, 0x0CEF, propertyPVALID}, // KANNADA DIGIT ZERO..KANNADA DIGIT NINE - {0x0CF0, 0x0, propertyUNASSIGNED}, // - {0x0CF1, 0x0CF2, propertyDISALLOWED}, // KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADH - {0x0CF3, 0x0D01, propertyUNASSIGNED}, // .. - {0x0D02, 0x0D03, propertyPVALID}, // MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISA - {0x0D04, 0x0, propertyUNASSIGNED}, // - {0x0D05, 0x0D0C, propertyPVALID}, // MALAYALAM LETTER A..MALAYALAM LETTER VOCALIC - {0x0D0D, 0x0, propertyUNASSIGNED}, // - {0x0D0E, 0x0D10, propertyPVALID}, // MALAYALAM LETTER E..MALAYALAM LETTER AI - {0x0D11, 0x0, propertyUNASSIGNED}, // - {0x0D12, 0x0D28, propertyPVALID}, // MALAYALAM LETTER O..MALAYALAM LETTER NA - {0x0D29, 0x0, propertyUNASSIGNED}, // - {0x0D2A, 0x0D39, propertyPVALID}, // MALAYALAM LETTER PA..MALAYALAM LETTER HA - {0x0D3A, 0x0D3C, propertyUNASSIGNED}, // .. - {0x0D3D, 0x0D44, propertyPVALID}, // MALAYALAM SIGN AVAGRAHA..MALAYALAM VOWEL SIG - {0x0D45, 0x0, propertyUNASSIGNED}, // - {0x0D46, 0x0D48, propertyPVALID}, // MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN - {0x0D49, 0x0, propertyUNASSIGNED}, // - {0x0D4A, 0x0D4D, propertyPVALID}, // MALAYALAM VOWEL SIGN O..MALAYALAM SIGN VIRAM - {0x0D4E, 0x0D56, propertyUNASSIGNED}, // .. - {0x0D57, 0x0, propertyPVALID}, // MALAYALAM AU LENGTH MARK - {0x0D58, 0x0D5F, propertyUNASSIGNED}, // .. - {0x0D60, 0x0D63, propertyPVALID}, // MALAYALAM LETTER VOCALIC RR..MALAYALAM VOWEL - {0x0D64, 0x0D65, propertyUNASSIGNED}, // .. - {0x0D66, 0x0D6F, propertyPVALID}, // MALAYALAM DIGIT ZERO..MALAYALAM DIGIT NINE - {0x0D70, 0x0D75, propertyDISALLOWED}, // MALAYALAM NUMBER TEN..MALAYALAM FRACTION THR - {0x0D76, 0x0D78, propertyUNASSIGNED}, // .. - {0x0D79, 0x0, propertyDISALLOWED}, // MALAYALAM DATE MARK - {0x0D7A, 0x0D7F, propertyPVALID}, // MALAYALAM LETTER CHILLU NN..MALAYALAM LETTER - {0x0D80, 0x0D81, propertyUNASSIGNED}, // .. - {0x0D82, 0x0D83, propertyPVALID}, // SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARG - {0x0D84, 0x0, propertyUNASSIGNED}, // - {0x0D85, 0x0D96, propertyPVALID}, // SINHALA LETTER AYANNA..SINHALA LETTER AUYANN - {0x0D97, 0x0D99, propertyUNASSIGNED}, // .. - {0x0D9A, 0x0DB1, propertyPVALID}, // SINHALA LETTER ALPAPRAANA KAYANNA..SINHALA L - {0x0DB2, 0x0, propertyUNASSIGNED}, // - {0x0DB3, 0x0DBB, propertyPVALID}, // SINHALA LETTER SANYAKA DAYANNA..SINHALA LETT - {0x0DBC, 0x0, propertyUNASSIGNED}, // - {0x0DBD, 0x0, propertyPVALID}, // SINHALA LETTER DANTAJA LAYANNA - {0x0DBE, 0x0DBF, propertyUNASSIGNED}, // .. - {0x0DC0, 0x0DC6, propertyPVALID}, // SINHALA LETTER VAYANNA..SINHALA LETTER FAYAN - {0x0DC7, 0x0DC9, propertyUNASSIGNED}, // .. - {0x0DCA, 0x0, propertyPVALID}, // SINHALA SIGN AL-LAKUNA - {0x0DCB, 0x0DCE, propertyUNASSIGNED}, // .. - {0x0DCF, 0x0DD4, propertyPVALID}, // SINHALA VOWEL SIGN AELA-PILLA..SINHALA VOWEL - {0x0DD5, 0x0, propertyUNASSIGNED}, // - {0x0DD6, 0x0, propertyPVALID}, // SINHALA VOWEL SIGN DIGA PAA-PILLA - {0x0DD7, 0x0, propertyUNASSIGNED}, // - {0x0DD8, 0x0DDF, propertyPVALID}, // SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOW - {0x0DE0, 0x0DF1, propertyUNASSIGNED}, // .. - {0x0DF2, 0x0DF3, propertyPVALID}, // SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHAL - {0x0DF4, 0x0, propertyDISALLOWED}, // SINHALA PUNCTUATION KUNDDALIYA - {0x0DF5, 0x0E00, propertyUNASSIGNED}, // .. - {0x0E01, 0x0E32, propertyPVALID}, // THAI CHARACTER KO KAI..THAI CHARACTER SARA A - {0x0E33, 0x0, propertyDISALLOWED}, // THAI CHARACTER SARA AM - {0x0E34, 0x0E3A, propertyPVALID}, // THAI CHARACTER SARA I..THAI CHARACTER PHINTH - {0x0E3B, 0x0E3E, propertyUNASSIGNED}, // .. - {0x0E3F, 0x0, propertyDISALLOWED}, // THAI CURRENCY SYMBOL BAHT - {0x0E40, 0x0E4E, propertyPVALID}, // THAI CHARACTER SARA E..THAI CHARACTER YAMAKK - {0x0E4F, 0x0, propertyDISALLOWED}, // THAI CHARACTER FONGMAN - {0x0E50, 0x0E59, propertyPVALID}, // THAI DIGIT ZERO..THAI DIGIT NINE - {0x0E5A, 0x0E5B, propertyDISALLOWED}, // THAI CHARACTER ANGKHANKHU..THAI CHARACTER KH - {0x0E5C, 0x0E80, propertyUNASSIGNED}, // .. - {0x0E81, 0x0E82, propertyPVALID}, // LAO LETTER KO..LAO LETTER KHO SUNG - {0x0E83, 0x0, propertyUNASSIGNED}, // - {0x0E84, 0x0, propertyPVALID}, // LAO LETTER KHO TAM - {0x0E85, 0x0E86, propertyUNASSIGNED}, // .. - {0x0E87, 0x0E88, propertyPVALID}, // LAO LETTER NGO..LAO LETTER CO - {0x0E89, 0x0, propertyUNASSIGNED}, // - {0x0E8A, 0x0, propertyPVALID}, // LAO LETTER SO TAM - {0x0E8B, 0x0E8C, propertyUNASSIGNED}, // .. - {0x0E8D, 0x0, propertyPVALID}, // LAO LETTER NYO - {0x0E8E, 0x0E93, propertyUNASSIGNED}, // .. - {0x0E94, 0x0E97, propertyPVALID}, // LAO LETTER DO..LAO LETTER THO TAM - {0x0E98, 0x0, propertyUNASSIGNED}, // - {0x0E99, 0x0E9F, propertyPVALID}, // LAO LETTER NO..LAO LETTER FO SUNG - {0x0EA0, 0x0, propertyUNASSIGNED}, // - {0x0EA1, 0x0EA3, propertyPVALID}, // LAO LETTER MO..LAO LETTER LO LING - {0x0EA4, 0x0, propertyUNASSIGNED}, // - {0x0EA5, 0x0, propertyPVALID}, // LAO LETTER LO LOOT - {0x0EA6, 0x0, propertyUNASSIGNED}, // - {0x0EA7, 0x0, propertyPVALID}, // LAO LETTER WO - {0x0EA8, 0x0EA9, propertyUNASSIGNED}, // .. - {0x0EAA, 0x0EAB, propertyPVALID}, // LAO LETTER SO SUNG..LAO LETTER HO SUNG - {0x0EAC, 0x0, propertyUNASSIGNED}, // - {0x0EAD, 0x0EB2, propertyPVALID}, // LAO LETTER O..LAO VOWEL SIGN AA - {0x0EB3, 0x0, propertyDISALLOWED}, // LAO VOWEL SIGN AM - {0x0EB4, 0x0EB9, propertyPVALID}, // LAO VOWEL SIGN I..LAO VOWEL SIGN UU - {0x0EBA, 0x0, propertyUNASSIGNED}, // - {0x0EBB, 0x0EBD, propertyPVALID}, // LAO VOWEL SIGN MAI KON..LAO SEMIVOWEL SIGN N - {0x0EBE, 0x0EBF, propertyUNASSIGNED}, // .. - {0x0EC0, 0x0EC4, propertyPVALID}, // LAO VOWEL SIGN E..LAO VOWEL SIGN AI - {0x0EC5, 0x0, propertyUNASSIGNED}, // - {0x0EC6, 0x0, propertyPVALID}, // LAO KO LA - {0x0EC7, 0x0, propertyUNASSIGNED}, // - {0x0EC8, 0x0ECD, propertyPVALID}, // LAO TONE MAI EK..LAO NIGGAHITA - {0x0ECE, 0x0ECF, propertyUNASSIGNED}, // .. - {0x0ED0, 0x0ED9, propertyPVALID}, // LAO DIGIT ZERO..LAO DIGIT NINE - {0x0EDA, 0x0EDB, propertyUNASSIGNED}, // .. - {0x0EDC, 0x0EDD, propertyDISALLOWED}, // LAO HO NO..LAO HO MO - {0x0EDE, 0x0EFF, propertyUNASSIGNED}, // .. - {0x0F00, 0x0, propertyPVALID}, // TIBETAN SYLLABLE OM - {0x0F01, 0x0F0A, propertyDISALLOWED}, // TIBETAN MARK GTER YIG MGO TRUNCATED A..TIBET - {0x0F0B, 0x0, propertyPVALID}, // TIBETAN MARK INTERSYLLABIC TSHEG - {0x0F0C, 0x0F17, propertyDISALLOWED}, // TIBETAN MARK DELIMITER TSHEG BSTAR..TIBETAN - {0x0F18, 0x0F19, propertyPVALID}, // TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN - {0x0F1A, 0x0F1F, propertyDISALLOWED}, // TIBETAN SIGN RDEL DKAR GCIG..TIBETAN SIGN RD - {0x0F20, 0x0F29, propertyPVALID}, // TIBETAN DIGIT ZERO..TIBETAN DIGIT NINE - {0x0F2A, 0x0F34, propertyDISALLOWED}, // TIBETAN DIGIT HALF ONE..TIBETAN MARK BSDUS R - {0x0F35, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG NYI ZLA - {0x0F36, 0x0, propertyDISALLOWED}, // TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN - {0x0F37, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG SGOR RTAGS - {0x0F38, 0x0, propertyDISALLOWED}, // TIBETAN MARK CHE MGO - {0x0F39, 0x0, propertyPVALID}, // TIBETAN MARK TSA -PHRU - {0x0F3A, 0x0F3D, propertyDISALLOWED}, // TIBETAN MARK GUG RTAGS GYON..TIBETAN MARK AN - {0x0F3E, 0x0F42, propertyPVALID}, // TIBETAN SIGN YAR TSHES..TIBETAN LETTER GA - {0x0F43, 0x0, propertyDISALLOWED}, // TIBETAN LETTER GHA - {0x0F44, 0x0F47, propertyPVALID}, // TIBETAN LETTER NGA..TIBETAN LETTER JA - {0x0F48, 0x0, propertyUNASSIGNED}, // - {0x0F49, 0x0F4C, propertyPVALID}, // TIBETAN LETTER NYA..TIBETAN LETTER DDA - {0x0F4D, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DDHA - {0x0F4E, 0x0F51, propertyPVALID}, // TIBETAN LETTER NNA..TIBETAN LETTER DA - {0x0F52, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DHA - {0x0F53, 0x0F56, propertyPVALID}, // TIBETAN LETTER NA..TIBETAN LETTER BA - {0x0F57, 0x0, propertyDISALLOWED}, // TIBETAN LETTER BHA - {0x0F58, 0x0F5B, propertyPVALID}, // TIBETAN LETTER MA..TIBETAN LETTER DZA - {0x0F5C, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DZHA - {0x0F5D, 0x0F68, propertyPVALID}, // TIBETAN LETTER WA..TIBETAN LETTER A - {0x0F69, 0x0, propertyDISALLOWED}, // TIBETAN LETTER KSSA - {0x0F6A, 0x0F6C, propertyPVALID}, // TIBETAN LETTER FIXED-FORM RA..TIBETAN LETTER - {0x0F6D, 0x0F70, propertyUNASSIGNED}, // .. - {0x0F71, 0x0F72, propertyPVALID}, // TIBETAN VOWEL SIGN AA..TIBETAN VOWEL SIGN I - {0x0F73, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN II - {0x0F74, 0x0, propertyPVALID}, // TIBETAN VOWEL SIGN U - {0x0F75, 0x0F79, propertyDISALLOWED}, // TIBETAN VOWEL SIGN UU..TIBETAN VOWEL SIGN VO - {0x0F7A, 0x0F80, propertyPVALID}, // TIBETAN VOWEL SIGN E..TIBETAN VOWEL SIGN REV - {0x0F81, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN REVERSED II - {0x0F82, 0x0F84, propertyPVALID}, // TIBETAN SIGN NYI ZLA NAA DA..TIBETAN MARK HA - {0x0F85, 0x0, propertyDISALLOWED}, // TIBETAN MARK PALUTA - {0x0F86, 0x0F8B, propertyPVALID}, // TIBETAN SIGN LCI RTAGS..TIBETAN SIGN GRU MED - {0x0F8C, 0x0F8F, propertyUNASSIGNED}, // .. - {0x0F90, 0x0F92, propertyPVALID}, // TIBETAN SUBJOINED LETTER KA..TIBETAN SUBJOIN - {0x0F93, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER GHA - {0x0F94, 0x0F97, propertyPVALID}, // TIBETAN SUBJOINED LETTER NGA..TIBETAN SUBJOI - {0x0F98, 0x0, propertyUNASSIGNED}, // - {0x0F99, 0x0F9C, propertyPVALID}, // TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOI - {0x0F9D, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DDHA - {0x0F9E, 0x0FA1, propertyPVALID}, // TIBETAN SUBJOINED LETTER NNA..TIBETAN SUBJOI - {0x0FA2, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DHA - {0x0FA3, 0x0FA6, propertyPVALID}, // TIBETAN SUBJOINED LETTER NA..TIBETAN SUBJOIN - {0x0FA7, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER BHA - {0x0FA8, 0x0FAB, propertyPVALID}, // TIBETAN SUBJOINED LETTER MA..TIBETAN SUBJOIN - {0x0FAC, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DZHA - {0x0FAD, 0x0FB8, propertyPVALID}, // TIBETAN SUBJOINED LETTER WA..TIBETAN SUBJOIN - {0x0FB9, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER KSSA - {0x0FBA, 0x0FBC, propertyPVALID}, // TIBETAN SUBJOINED LETTER FIXED-FORM WA..TIBE - {0x0FBD, 0x0, propertyUNASSIGNED}, // - {0x0FBE, 0x0FC5, propertyDISALLOWED}, // TIBETAN KU RU KHA..TIBETAN SYMBOL RDO RJE - {0x0FC6, 0x0, propertyPVALID}, // TIBETAN SYMBOL PADMA GDAN - {0x0FC7, 0x0FCC, propertyDISALLOWED}, // TIBETAN SYMBOL RDO RJE RGYA GRAM..TIBETAN SY - {0x0FCD, 0x0, propertyUNASSIGNED}, // - {0x0FCE, 0x0FD8, propertyDISALLOWED}, // TIBETAN SIGN RDEL NAG RDEL DKAR..LEFT-FACING - {0x0FD9, 0x0FFF, propertyUNASSIGNED}, // .. - {0x1000, 0x1049, propertyPVALID}, // MYANMAR LETTER KA..MYANMAR DIGIT NINE - {0x104A, 0x104F, propertyDISALLOWED}, // MYANMAR SIGN LITTLE SECTION..MYANMAR SYMBOL - {0x1050, 0x109D, propertyPVALID}, // MYANMAR LETTER SHA..MYANMAR VOWEL SIGN AITON - {0x109E, 0x10C5, propertyDISALLOWED}, // MYANMAR SYMBOL SHAN ONE..GEORGIAN CAPITAL LE - {0x10C6, 0x10CF, propertyUNASSIGNED}, // .. - {0x10D0, 0x10FA, propertyPVALID}, // GEORGIAN LETTER AN..GEORGIAN LETTER AIN - {0x10FB, 0x10FC, propertyDISALLOWED}, // GEORGIAN PARAGRAPH SEPARATOR..MODIFIER LETTE - {0x10FD, 0x10FF, propertyUNASSIGNED}, // .. - {0x1100, 0x11FF, propertyDISALLOWED}, // HANGUL CHOSEONG KIYEOK..HANGUL JONGSEONG SSA - {0x1200, 0x1248, propertyPVALID}, // ETHIOPIC SYLLABLE HA..ETHIOPIC SYLLABLE QWA - {0x1249, 0x0, propertyUNASSIGNED}, // - {0x124A, 0x124D, propertyPVALID}, // ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE - {0x124E, 0x124F, propertyUNASSIGNED}, // .. - {0x1250, 0x1256, propertyPVALID}, // ETHIOPIC SYLLABLE QHA..ETHIOPIC SYLLABLE QHO - {0x1257, 0x0, propertyUNASSIGNED}, // - {0x1258, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE QHWA - {0x1259, 0x0, propertyUNASSIGNED}, // - {0x125A, 0x125D, propertyPVALID}, // ETHIOPIC SYLLABLE QHWI..ETHIOPIC SYLLABLE QH - {0x125E, 0x125F, propertyUNASSIGNED}, // .. - {0x1260, 0x1288, propertyPVALID}, // ETHIOPIC SYLLABLE BA..ETHIOPIC SYLLABLE XWA - {0x1289, 0x0, propertyUNASSIGNED}, // - {0x128A, 0x128D, propertyPVALID}, // ETHIOPIC SYLLABLE XWI..ETHIOPIC SYLLABLE XWE - {0x128E, 0x128F, propertyUNASSIGNED}, // .. - {0x1290, 0x12B0, propertyPVALID}, // ETHIOPIC SYLLABLE NA..ETHIOPIC SYLLABLE KWA - {0x12B1, 0x0, propertyUNASSIGNED}, // - {0x12B2, 0x12B5, propertyPVALID}, // ETHIOPIC SYLLABLE KWI..ETHIOPIC SYLLABLE KWE - {0x12B6, 0x12B7, propertyUNASSIGNED}, // .. - {0x12B8, 0x12BE, propertyPVALID}, // ETHIOPIC SYLLABLE KXA..ETHIOPIC SYLLABLE KXO - {0x12BF, 0x0, propertyUNASSIGNED}, // - {0x12C0, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE KXWA - {0x12C1, 0x0, propertyUNASSIGNED}, // - {0x12C2, 0x12C5, propertyPVALID}, // ETHIOPIC SYLLABLE KXWI..ETHIOPIC SYLLABLE KX - {0x12C6, 0x12C7, propertyUNASSIGNED}, // .. - {0x12C8, 0x12D6, propertyPVALID}, // ETHIOPIC SYLLABLE WA..ETHIOPIC SYLLABLE PHAR - {0x12D7, 0x0, propertyUNASSIGNED}, // - {0x12D8, 0x1310, propertyPVALID}, // ETHIOPIC SYLLABLE ZA..ETHIOPIC SYLLABLE GWA - {0x1311, 0x0, propertyUNASSIGNED}, // - {0x1312, 0x1315, propertyPVALID}, // ETHIOPIC SYLLABLE GWI..ETHIOPIC SYLLABLE GWE - {0x1316, 0x1317, propertyUNASSIGNED}, // .. - {0x1318, 0x135A, propertyPVALID}, // ETHIOPIC SYLLABLE GGA..ETHIOPIC SYLLABLE FYA - {0x135B, 0x135E, propertyUNASSIGNED}, // .. - {0x135F, 0x0, propertyPVALID}, // ETHIOPIC COMBINING GEMINATION MARK - {0x1360, 0x137C, propertyDISALLOWED}, // ETHIOPIC SECTION MARK..ETHIOPIC NUMBER TEN T - {0x137D, 0x137F, propertyUNASSIGNED}, // .. - {0x1380, 0x138F, propertyPVALID}, // ETHIOPIC SYLLABLE SEBATBEIT MWA..ETHIOPIC SY - {0x1390, 0x1399, propertyDISALLOWED}, // ETHIOPIC TONAL MARK YIZET..ETHIOPIC TONAL MA - {0x139A, 0x139F, propertyUNASSIGNED}, // .. - {0x13A0, 0x13F4, propertyPVALID}, // CHEROKEE LETTER A..CHEROKEE LETTER YV - {0x13F5, 0x13FF, propertyUNASSIGNED}, // .. - {0x1400, 0x0, propertyDISALLOWED}, // CANADIAN SYLLABICS HYPHEN - {0x1401, 0x166C, propertyPVALID}, // CANADIAN SYLLABICS E..CANADIAN SYLLABICS CAR - {0x166D, 0x166E, propertyDISALLOWED}, // CANADIAN SYLLABICS CHI SIGN..CANADIAN SYLLAB - {0x166F, 0x167F, propertyPVALID}, // CANADIAN SYLLABICS QAI..CANADIAN SYLLABICS B - {0x1680, 0x0, propertyDISALLOWED}, // OGHAM SPACE MARK - {0x1681, 0x169A, propertyPVALID}, // OGHAM LETTER BEITH..OGHAM LETTER PEITH - {0x169B, 0x169C, propertyDISALLOWED}, // OGHAM FEATHER MARK..OGHAM REVERSED FEATHER M - {0x169D, 0x169F, propertyUNASSIGNED}, // .. - {0x16A0, 0x16EA, propertyPVALID}, // RUNIC LETTER FEHU FEOH FE F..RUNIC LETTER X - {0x16EB, 0x16F0, propertyDISALLOWED}, // RUNIC SINGLE PUNCTUATION..RUNIC BELGTHOR SYM - {0x16F1, 0x16FF, propertyUNASSIGNED}, // .. - {0x1700, 0x170C, propertyPVALID}, // TAGALOG LETTER A..TAGALOG LETTER YA - {0x170D, 0x0, propertyUNASSIGNED}, // - {0x170E, 0x1714, propertyPVALID}, // TAGALOG LETTER LA..TAGALOG SIGN VIRAMA - {0x1715, 0x171F, propertyUNASSIGNED}, // .. - {0x1720, 0x1734, propertyPVALID}, // HANUNOO LETTER A..HANUNOO SIGN PAMUDPOD - {0x1735, 0x1736, propertyDISALLOWED}, // PHILIPPINE SINGLE PUNCTUATION..PHILIPPINE DO - {0x1737, 0x173F, propertyUNASSIGNED}, // .. - {0x1740, 0x1753, propertyPVALID}, // BUHID LETTER A..BUHID VOWEL SIGN U - {0x1754, 0x175F, propertyUNASSIGNED}, // .. - {0x1760, 0x176C, propertyPVALID}, // TAGBANWA LETTER A..TAGBANWA LETTER YA - {0x176D, 0x0, propertyUNASSIGNED}, // - {0x176E, 0x1770, propertyPVALID}, // TAGBANWA LETTER LA..TAGBANWA LETTER SA - {0x1771, 0x0, propertyUNASSIGNED}, // - {0x1772, 0x1773, propertyPVALID}, // TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U - {0x1774, 0x177F, propertyUNASSIGNED}, // .. - {0x1780, 0x17B3, propertyPVALID}, // KHMER LETTER KA..KHMER INDEPENDENT VOWEL QAU - {0x17B4, 0x17B5, propertyDISALLOWED}, // KHMER VOWEL INHERENT AQ..KHMER VOWEL INHEREN - {0x17B6, 0x17D3, propertyPVALID}, // KHMER VOWEL SIGN AA..KHMER SIGN BATHAMASAT - {0x17D4, 0x17D6, propertyDISALLOWED}, // KHMER SIGN KHAN..KHMER SIGN CAMNUC PII KUUH - {0x17D7, 0x0, propertyPVALID}, // KHMER SIGN LEK TOO - {0x17D8, 0x17DB, propertyDISALLOWED}, // KHMER SIGN BEYYAL..KHMER CURRENCY SYMBOL RIE - {0x17DC, 0x17DD, propertyPVALID}, // KHMER SIGN AVAKRAHASANYA..KHMER SIGN ATTHACA - {0x17DE, 0x17DF, propertyUNASSIGNED}, // .. - {0x17E0, 0x17E9, propertyPVALID}, // KHMER DIGIT ZERO..KHMER DIGIT NINE - {0x17EA, 0x17EF, propertyUNASSIGNED}, // .. - {0x17F0, 0x17F9, propertyDISALLOWED}, // KHMER SYMBOL LEK ATTAK SON..KHMER SYMBOL LEK - {0x17FA, 0x17FF, propertyUNASSIGNED}, // .. - {0x1800, 0x180E, propertyDISALLOWED}, // MONGOLIAN BIRGA..MONGOLIAN VOWEL SEPARATOR - {0x180F, 0x0, propertyUNASSIGNED}, // - {0x1810, 0x1819, propertyPVALID}, // MONGOLIAN DIGIT ZERO..MONGOLIAN DIGIT NINE - {0x181A, 0x181F, propertyUNASSIGNED}, // .. - {0x1820, 0x1877, propertyPVALID}, // MONGOLIAN LETTER A..MONGOLIAN LETTER MANCHU - {0x1878, 0x187F, propertyUNASSIGNED}, // .. - {0x1880, 0x18AA, propertyPVALID}, // MONGOLIAN LETTER ALI GALI ANUSVARA ONE..MONG - {0x18AB, 0x18AF, propertyUNASSIGNED}, // .. - {0x18B0, 0x18F5, propertyPVALID}, // CANADIAN SYLLABICS OY..CANADIAN SYLLABICS CA - {0x18F6, 0x18FF, propertyUNASSIGNED}, // .. - {0x1900, 0x191C, propertyPVALID}, // LIMBU VOWEL-CARRIER LETTER..LIMBU LETTER HA - {0x191D, 0x191F, propertyUNASSIGNED}, // .. - {0x1920, 0x192B, propertyPVALID}, // LIMBU VOWEL SIGN A..LIMBU SUBJOINED LETTER W - {0x192C, 0x192F, propertyUNASSIGNED}, // .. - {0x1930, 0x193B, propertyPVALID}, // LIMBU SMALL LETTER KA..LIMBU SIGN SA-I - {0x193C, 0x193F, propertyUNASSIGNED}, // .. - {0x1940, 0x0, propertyDISALLOWED}, // LIMBU SIGN LOO - {0x1941, 0x1943, propertyUNASSIGNED}, // .. - {0x1944, 0x1945, propertyDISALLOWED}, // LIMBU EXCLAMATION MARK..LIMBU QUESTION MARK - {0x1946, 0x196D, propertyPVALID}, // LIMBU DIGIT ZERO..TAI LE LETTER AI - {0x196E, 0x196F, propertyUNASSIGNED}, // .. - {0x1970, 0x1974, propertyPVALID}, // TAI LE LETTER TONE-2..TAI LE LETTER TONE-6 - {0x1975, 0x197F, propertyUNASSIGNED}, // .. - {0x1980, 0x19AB, propertyPVALID}, // NEW TAI LUE LETTER HIGH QA..NEW TAI LUE LETT - {0x19AC, 0x19AF, propertyUNASSIGNED}, // .. - {0x19B0, 0x19C9, propertyPVALID}, // NEW TAI LUE VOWEL SIGN VOWEL SHORTENER..NEW - {0x19CA, 0x19CF, propertyUNASSIGNED}, // .. - {0x19D0, 0x19DA, propertyPVALID}, // NEW TAI LUE DIGIT ZERO..NEW TAI LUE THAM DIG - {0x19DB, 0x19DD, propertyUNASSIGNED}, // .. - {0x19DE, 0x19FF, propertyDISALLOWED}, // NEW TAI LUE SIGN LAE..KHMER SYMBOL DAP-PRAM - {0x1A00, 0x1A1B, propertyPVALID}, // BUGINESE LETTER KA..BUGINESE VOWEL SIGN AE - {0x1A1C, 0x1A1D, propertyUNASSIGNED}, // .. - {0x1A1E, 0x1A1F, propertyDISALLOWED}, // BUGINESE PALLAWA..BUGINESE END OF SECTION - {0x1A20, 0x1A5E, propertyPVALID}, // TAI THAM LETTER HIGH KA..TAI THAM CONSONANT - {0x1A5F, 0x0, propertyUNASSIGNED}, // - {0x1A60, 0x1A7C, propertyPVALID}, // TAI THAM SIGN SAKOT..TAI THAM SIGN KHUEN-LUE - {0x1A7D, 0x1A7E, propertyUNASSIGNED}, // .. - {0x1A7F, 0x1A89, propertyPVALID}, // TAI THAM COMBINING CRYPTOGRAMMIC DOT..TAI TH - {0x1A8A, 0x1A8F, propertyUNASSIGNED}, // .. - {0x1A90, 0x1A99, propertyPVALID}, // TAI THAM THAM DIGIT ZERO..TAI THAM THAM DIGI - {0x1A9A, 0x1A9F, propertyUNASSIGNED}, // .. - {0x1AA0, 0x1AA6, propertyDISALLOWED}, // TAI THAM SIGN WIANG..TAI THAM SIGN REVERSED - {0x1AA7, 0x0, propertyPVALID}, // TAI THAM SIGN MAI YAMOK - {0x1AA8, 0x1AAD, propertyDISALLOWED}, // TAI THAM SIGN KAAN..TAI THAM SIGN CAANG - {0x1AAE, 0x1AFF, propertyUNASSIGNED}, // .. - {0x1B00, 0x1B4B, propertyPVALID}, // BALINESE SIGN ULU RICEM..BALINESE LETTER ASY - {0x1B4C, 0x1B4F, propertyUNASSIGNED}, // .. - {0x1B50, 0x1B59, propertyPVALID}, // BALINESE DIGIT ZERO..BALINESE DIGIT NINE - {0x1B5A, 0x1B6A, propertyDISALLOWED}, // BALINESE PANTI..BALINESE MUSICAL SYMBOL DANG - {0x1B6B, 0x1B73, propertyPVALID}, // BALINESE MUSICAL SYMBOL COMBINING TEGEH..BAL - {0x1B74, 0x1B7C, propertyDISALLOWED}, // BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DUG. - {0x1B7D, 0x1B7F, propertyUNASSIGNED}, // .. - {0x1B80, 0x1BAA, propertyPVALID}, // SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PAMA - {0x1BAB, 0x1BAD, propertyUNASSIGNED}, // .. - {0x1BAE, 0x1BB9, propertyPVALID}, // SUNDANESE LETTER KHA..SUNDANESE DIGIT NINE - {0x1BBA, 0x1BFF, propertyUNASSIGNED}, // .. - {0x1C00, 0x1C37, propertyPVALID}, // LEPCHA LETTER KA..LEPCHA SIGN NUKTA - {0x1C38, 0x1C3A, propertyUNASSIGNED}, // .. - {0x1C3B, 0x1C3F, propertyDISALLOWED}, // LEPCHA PUNCTUATION TA-ROL..LEPCHA PUNCTUATIO - {0x1C40, 0x1C49, propertyPVALID}, // LEPCHA DIGIT ZERO..LEPCHA DIGIT NINE - {0x1C4A, 0x1C4C, propertyUNASSIGNED}, // .. - {0x1C4D, 0x1C7D, propertyPVALID}, // LEPCHA LETTER TTA..OL CHIKI AHAD - {0x1C7E, 0x1C7F, propertyDISALLOWED}, // OL CHIKI PUNCTUATION MUCAAD..OL CHIKI PUNCTU - {0x1C80, 0x1CCF, propertyUNASSIGNED}, // .. - {0x1CD0, 0x1CD2, propertyPVALID}, // VEDIC TONE KARSHANA..VEDIC TONE PRENKHA - {0x1CD3, 0x0, propertyDISALLOWED}, // VEDIC SIGN NIHSHVASA - {0x1CD4, 0x1CF2, propertyPVALID}, // VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC - {0x1CF3, 0x1CFF, propertyUNASSIGNED}, // .. - {0x1D00, 0x1D2B, propertyPVALID}, // LATIN LETTER SMALL CAPITAL A..CYRILLIC LETTE - {0x1D2C, 0x1D2E, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL A..MODIFIER LETTER C - {0x1D2F, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL BARRED B - {0x1D30, 0x1D3A, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL D..MODIFIER LETTER C - {0x1D3B, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL REVERSED N - {0x1D3C, 0x1D4D, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL O..MODIFIER LETTER S - {0x1D4E, 0x0, propertyPVALID}, // MODIFIER LETTER SMALL TURNED I - {0x1D4F, 0x1D6A, propertyDISALLOWED}, // MODIFIER LETTER SMALL K..GREEK SUBSCRIPT SMA - {0x1D6B, 0x1D77, propertyPVALID}, // LATIN SMALL LETTER UE..LATIN SMALL LETTER TU - {0x1D78, 0x0, propertyDISALLOWED}, // MODIFIER LETTER CYRILLIC EN - {0x1D79, 0x1D9A, propertyPVALID}, // LATIN SMALL LETTER INSULAR G..LATIN SMALL LE - {0x1D9B, 0x1DBF, propertyDISALLOWED}, // MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER - {0x1DC0, 0x1DE6, propertyPVALID}, // COMBINING DOTTED GRAVE ACCENT..COMBINING LAT - {0x1DE7, 0x1DFC, propertyUNASSIGNED}, // .. - {0x1DFD, 0x1DFF, propertyPVALID}, // COMBINING ALMOST EQUAL TO BELOW..COMBINING R - {0x1E00, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING BELOW - {0x1E01, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING BELOW - {0x1E02, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT ABOVE - {0x1E03, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT ABOVE - {0x1E04, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT BELOW - {0x1E05, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT BELOW - {0x1E06, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH LINE BELOW - {0x1E07, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH LINE BELOW - {0x1E08, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CEDILLA AND ACUT - {0x1E09, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CEDILLA AND ACUTE - {0x1E0A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT ABOVE - {0x1E0B, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT ABOVE - {0x1E0C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT BELOW - {0x1E0D, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT BELOW - {0x1E0E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH LINE BELOW - {0x1E0F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH LINE BELOW - {0x1E10, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CEDILLA - {0x1E11, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CEDILLA - {0x1E12, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW - {0x1E13, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW - {0x1E14, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND GRAVE - {0x1E15, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND GRAVE - {0x1E16, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND ACUTE - {0x1E17, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND ACUTE - {0x1E18, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW - {0x1E19, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW - {0x1E1A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE BELOW - {0x1E1B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE BELOW - {0x1E1C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA AND BREV - {0x1E1D, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA AND BREVE - {0x1E1E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER F WITH DOT ABOVE - {0x1E1F, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH DOT ABOVE - {0x1E20, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH MACRON - {0x1E21, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH MACRON - {0x1E22, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT ABOVE - {0x1E23, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT ABOVE - {0x1E24, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT BELOW - {0x1E25, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT BELOW - {0x1E26, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DIAERESIS - {0x1E27, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DIAERESIS - {0x1E28, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CEDILLA - {0x1E29, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CEDILLA - {0x1E2A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH BREVE BELOW - {0x1E2B, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH BREVE BELOW - {0x1E2C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE BELOW - {0x1E2D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE BELOW - {0x1E2E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DIAERESIS AND AC - {0x1E2F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DIAERESIS AND ACUT - {0x1E30, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH ACUTE - {0x1E31, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH ACUTE - {0x1E32, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DOT BELOW - {0x1E33, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DOT BELOW - {0x1E34, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH LINE BELOW - {0x1E35, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH LINE BELOW - {0x1E36, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW - {0x1E37, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW - {0x1E38, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW AND MA - {0x1E39, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW AND MACR - {0x1E3A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH LINE BELOW - {0x1E3B, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH LINE BELOW - {0x1E3C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW - {0x1E3D, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW - {0x1E3E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH ACUTE - {0x1E3F, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH ACUTE - {0x1E40, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT ABOVE - {0x1E41, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT ABOVE - {0x1E42, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT BELOW - {0x1E43, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT BELOW - {0x1E44, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT ABOVE - {0x1E45, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT ABOVE - {0x1E46, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT BELOW - {0x1E47, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT BELOW - {0x1E48, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LINE BELOW - {0x1E49, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LINE BELOW - {0x1E4A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW - {0x1E4B, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW - {0x1E4C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND ACUTE - {0x1E4D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND ACUTE - {0x1E4E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND DIAERE - {0x1E4F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND DIAERESI - {0x1E50, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND GRAVE - {0x1E51, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND GRAVE - {0x1E52, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND ACUTE - {0x1E53, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND ACUTE - {0x1E54, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH ACUTE - {0x1E55, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH ACUTE - {0x1E56, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH DOT ABOVE - {0x1E57, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH DOT ABOVE - {0x1E58, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT ABOVE - {0x1E59, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT ABOVE - {0x1E5A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW - {0x1E5B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW - {0x1E5C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW AND MA - {0x1E5D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW AND MACR - {0x1E5E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH LINE BELOW - {0x1E5F, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH LINE BELOW - {0x1E60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT ABOVE - {0x1E61, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT ABOVE - {0x1E62, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW - {0x1E63, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW - {0x1E64, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE AND DOT AB - {0x1E65, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE AND DOT ABOV - {0x1E66, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON AND DOT AB - {0x1E67, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON AND DOT ABOV - {0x1E68, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW AND DO - {0x1E69, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW AND DOT - {0x1E6A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT ABOVE - {0x1E6B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT ABOVE - {0x1E6C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT BELOW - {0x1E6D, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT BELOW - {0x1E6E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH LINE BELOW - {0x1E6F, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH LINE BELOW - {0x1E70, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW - {0x1E71, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW - {0x1E72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS BELOW - {0x1E73, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS BELOW - {0x1E74, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE BELOW - {0x1E75, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE BELOW - {0x1E76, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW - {0x1E77, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW - {0x1E78, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE AND ACUTE - {0x1E79, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE AND ACUTE - {0x1E7A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON AND DIAER - {0x1E7B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON AND DIAERES - {0x1E7C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH TILDE - {0x1E7D, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH TILDE - {0x1E7E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DOT BELOW - {0x1E7F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DOT BELOW - {0x1E80, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH GRAVE - {0x1E81, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH GRAVE - {0x1E82, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH ACUTE - {0x1E83, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH ACUTE - {0x1E84, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DIAERESIS - {0x1E85, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DIAERESIS - {0x1E86, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT ABOVE - {0x1E87, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT ABOVE - {0x1E88, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT BELOW - {0x1E89, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT BELOW - {0x1E8A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DOT ABOVE - {0x1E8B, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DOT ABOVE - {0x1E8C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DIAERESIS - {0x1E8D, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DIAERESIS - {0x1E8E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT ABOVE - {0x1E8F, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT ABOVE - {0x1E90, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CIRCUMFLEX - {0x1E91, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CIRCUMFLEX - {0x1E92, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT BELOW - {0x1E93, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT BELOW - {0x1E94, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH LINE BELOW - {0x1E95, 0x1E99, propertyPVALID}, // LATIN SMALL LETTER Z WITH LINE BELOW..LATIN - {0x1E9A, 0x1E9B, propertyDISALLOWED}, // LATIN SMALL LETTER A WITH RIGHT HALF RING..L - {0x1E9C, 0x1E9D, propertyPVALID}, // LATIN SMALL LETTER LONG S WITH DIAGONAL STRO - {0x1E9E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SHARP S - {0x1E9F, 0x0, propertyPVALID}, // LATIN SMALL LETTER DELTA - {0x1EA0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT BELOW - {0x1EA1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT BELOW - {0x1EA2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH HOOK ABOVE - {0x1EA3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH HOOK ABOVE - {0x1EA4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND A - {0x1EA5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACU - {0x1EA6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND G - {0x1EA7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRA - {0x1EA8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND H - {0x1EA9, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOO - {0x1EAA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND T - {0x1EAB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND TIL - {0x1EAC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND D - {0x1EAD, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT - {0x1EAE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND ACUTE - {0x1EAF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND ACUTE - {0x1EB0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND GRAVE - {0x1EB1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND GRAVE - {0x1EB2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND HOOK A - {0x1EB3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND HOOK ABO - {0x1EB4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND TILDE - {0x1EB5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND TILDE - {0x1EB6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND DOT BE - {0x1EB7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND DOT BELO - {0x1EB8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT BELOW - {0x1EB9, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT BELOW - {0x1EBA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH HOOK ABOVE - {0x1EBB, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH HOOK ABOVE - {0x1EBC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE - {0x1EBD, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE - {0x1EBE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND A - {0x1EBF, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACU - {0x1EC0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND G - {0x1EC1, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRA - {0x1EC2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND H - {0x1EC3, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOO - {0x1EC4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND T - {0x1EC5, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND TIL - {0x1EC6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND D - {0x1EC7, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT - {0x1EC8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH HOOK ABOVE - {0x1EC9, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH HOOK ABOVE - {0x1ECA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT BELOW - {0x1ECB, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOT BELOW - {0x1ECC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT BELOW - {0x1ECD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT BELOW - {0x1ECE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HOOK ABOVE - {0x1ECF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HOOK ABOVE - {0x1ED0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND A - {0x1ED1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACU - {0x1ED2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND G - {0x1ED3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRA - {0x1ED4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND H - {0x1ED5, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOO - {0x1ED6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND T - {0x1ED7, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND TIL - {0x1ED8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND D - {0x1ED9, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT - {0x1EDA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND ACUTE - {0x1EDB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND ACUTE - {0x1EDC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND GRAVE - {0x1EDD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND GRAVE - {0x1EDE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND HOOK AB - {0x1EDF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND HOOK ABOV - {0x1EE0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND TILDE - {0x1EE1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND TILDE - {0x1EE2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND DOT BEL - {0x1EE3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND DOT BELOW - {0x1EE4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOT BELOW - {0x1EE5, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOT BELOW - {0x1EE6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HOOK ABOVE - {0x1EE7, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HOOK ABOVE - {0x1EE8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND ACUTE - {0x1EE9, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND ACUTE - {0x1EEA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND GRAVE - {0x1EEB, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND GRAVE - {0x1EEC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND HOOK AB - {0x1EED, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND HOOK ABOV - {0x1EEE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND TILDE - {0x1EEF, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND TILDE - {0x1EF0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND DOT BEL - {0x1EF1, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND DOT BELOW - {0x1EF2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH GRAVE - {0x1EF3, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH GRAVE - {0x1EF4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT BELOW - {0x1EF5, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT BELOW - {0x1EF6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH HOOK ABOVE - {0x1EF7, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK ABOVE - {0x1EF8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH TILDE - {0x1EF9, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH TILDE - {0x1EFA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH LL - {0x1EFB, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH LL - {0x1EFC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH V - {0x1EFD, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH V - {0x1EFE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH LOOP - {0x1EFF, 0x1F07, propertyPVALID}, // LATIN SMALL LETTER Y WITH LOOP..GREEK SMALL - {0x1F08, 0x1F0F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA WITH PSILI..GREEK - {0x1F10, 0x1F15, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH PSILI..GREEK - {0x1F16, 0x1F17, propertyUNASSIGNED}, // .. - {0x1F18, 0x1F1D, propertyDISALLOWED}, // GREEK CAPITAL LETTER EPSILON WITH PSILI..GRE - {0x1F1E, 0x1F1F, propertyUNASSIGNED}, // .. - {0x1F20, 0x1F27, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PSILI..GREEK SMA - {0x1F28, 0x1F2F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ETA WITH PSILI..GREEK C - {0x1F30, 0x1F37, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PSILI..GREEK SM - {0x1F38, 0x1F3F, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH PSILI..GREEK - {0x1F40, 0x1F45, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH PSILI..GREEK - {0x1F46, 0x1F47, propertyUNASSIGNED}, // .. - {0x1F48, 0x1F4D, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH PSILI..GRE - {0x1F4E, 0x1F4F, propertyUNASSIGNED}, // .. - {0x1F50, 0x1F57, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH PSILI..GREEK - {0x1F58, 0x0, propertyUNASSIGNED}, // - {0x1F59, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA - {0x1F5A, 0x0, propertyUNASSIGNED}, // - {0x1F5B, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F5C, 0x0, propertyUNASSIGNED}, // - {0x1F5D, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F5E, 0x0, propertyUNASSIGNED}, // - {0x1F5F, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F60, 0x1F67, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PSILI..GREEK S - {0x1F68, 0x1F6F, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMEGA WITH PSILI..GREEK - {0x1F70, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VARIA - {0x1F71, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH OXIA - {0x1F72, 0x0, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH VARIA - {0x1F73, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER EPSILON WITH OXIA - {0x1F74, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH VARIA - {0x1F75, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH OXIA - {0x1F76, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VARIA - {0x1F77, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH OXIA - {0x1F78, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH VARIA - {0x1F79, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMICRON WITH OXIA - {0x1F7A, 0x0, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VARIA - {0x1F7B, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH OXIA - {0x1F7C, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH VARIA - {0x1F7D, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH OXIA - {0x1F7E, 0x1F7F, propertyUNASSIGNED}, // .. - {0x1F80, 0x1FAF, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PSILI AND YPOG - {0x1FB0, 0x1FB1, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VRACHY..GREEK - {0x1FB2, 0x1FB4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH VARIA AND YPOG - {0x1FB5, 0x0, propertyUNASSIGNED}, // - {0x1FB6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI - {0x1FB7, 0x1FC4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI AN - {0x1FC5, 0x0, propertyUNASSIGNED}, // - {0x1FC6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PERISPOMENI - {0x1FC7, 0x1FCF, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH PERISPOMENI AND - {0x1FD0, 0x1FD2, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VRACHY..GREEK S - {0x1FD3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND O - {0x1FD4, 0x1FD5, propertyUNASSIGNED}, // .. - {0x1FD6, 0x1FD7, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PERISPOMENI..GR - {0x1FD8, 0x1FDB, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH VRACHY..GREEK - {0x1FDC, 0x0, propertyUNASSIGNED}, // - {0x1FDD, 0x1FDF, propertyDISALLOWED}, // GREEK DASIA AND VARIA..GREEK DASIA AND PERIS - {0x1FE0, 0x1FE2, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VRACHY..GREE - {0x1FE3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH DIALYTIKA AN - {0x1FE4, 0x1FE7, propertyPVALID}, // GREEK SMALL LETTER RHO WITH PSILI..GREEK SMA - {0x1FE8, 0x1FEF, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH VRACHY..GR - {0x1FF0, 0x1FF1, propertyUNASSIGNED}, // .. - {0x1FF2, 0x1FF4, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH VARIA AND YPOG - {0x1FF5, 0x0, propertyUNASSIGNED}, // - {0x1FF6, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI - {0x1FF7, 0x1FFE, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI AN - {0x1FFF, 0x0, propertyUNASSIGNED}, // - {0x2000, 0x200B, propertyDISALLOWED}, // EN QUAD..ZERO WIDTH SPACE - {0x200C, 0x200D, propertyCONTEXTJ}, // ZERO WIDTH NON-JOINER..ZERO WIDTH JOINER - {0x200E, 0x2064, propertyDISALLOWED}, // LEFT-TO-RIGHT MARK..INVISIBLE PLUS - {0x2065, 0x2069, propertyUNASSIGNED}, // .. - {0x206A, 0x2071, propertyDISALLOWED}, // INHIBIT SYMMETRIC SWAPPING..SUPERSCRIPT LATI - {0x2072, 0x2073, propertyUNASSIGNED}, // .. - {0x2074, 0x208E, propertyDISALLOWED}, // SUPERSCRIPT FOUR..SUBSCRIPT RIGHT PARENTHESI - {0x208F, 0x0, propertyUNASSIGNED}, // - {0x2090, 0x2094, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER A..LATIN SUBSCR - {0x2095, 0x209F, propertyUNASSIGNED}, // .. - {0x20A0, 0x20B8, propertyDISALLOWED}, // EURO-CURRENCY SIGN..TENGE SIGN - {0x20B9, 0x20CF, propertyUNASSIGNED}, // .. - {0x20D0, 0x20F0, propertyDISALLOWED}, // COMBINING LEFT HARPOON ABOVE..COMBINING ASTE - {0x20F1, 0x20FF, propertyUNASSIGNED}, // .. - {0x2100, 0x214D, propertyDISALLOWED}, // ACCOUNT OF..AKTIESELSKAB - {0x214E, 0x0, propertyPVALID}, // TURNED SMALL F - {0x214F, 0x2183, propertyDISALLOWED}, // SYMBOL FOR SAMARITAN SOURCE..ROMAN NUMERAL R - {0x2184, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C - {0x2185, 0x2189, propertyDISALLOWED}, // ROMAN NUMERAL SIX LATE FORM..VULGAR FRACTION - {0x218A, 0x218F, propertyUNASSIGNED}, // .. - {0x2190, 0x23E8, propertyDISALLOWED}, // LEFTWARDS ARROW..DECIMAL EXPONENT SYMBOL - {0x23E9, 0x23FF, propertyUNASSIGNED}, // .. - {0x2400, 0x2426, propertyDISALLOWED}, // SYMBOL FOR NULL..SYMBOL FOR SUBSTITUTE FORM - {0x2427, 0x243F, propertyUNASSIGNED}, // .. - {0x2440, 0x244A, propertyDISALLOWED}, // OCR HOOK..OCR DOUBLE BACKSLASH - {0x244B, 0x245F, propertyUNASSIGNED}, // .. - {0x2460, 0x26CD, propertyDISALLOWED}, // CIRCLED DIGIT ONE..DISABLED CAR - {0x26CE, 0x0, propertyUNASSIGNED}, // - {0x26CF, 0x26E1, propertyDISALLOWED}, // PICK..RESTRICTED LEFT ENTRY-2 - {0x26E2, 0x0, propertyUNASSIGNED}, // - {0x26E3, 0x0, propertyDISALLOWED}, // HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE - {0x26E4, 0x26E7, propertyUNASSIGNED}, // .. - {0x26E8, 0x26FF, propertyDISALLOWED}, // BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZ - {0x2700, 0x0, propertyUNASSIGNED}, // - {0x2701, 0x2704, propertyDISALLOWED}, // UPPER BLADE SCISSORS..WHITE SCISSORS - {0x2705, 0x0, propertyUNASSIGNED}, // - {0x2706, 0x2709, propertyDISALLOWED}, // TELEPHONE LOCATION SIGN..ENVELOPE - {0x270A, 0x270B, propertyUNASSIGNED}, // .. - {0x270C, 0x2727, propertyDISALLOWED}, // VICTORY HAND..WHITE FOUR POINTED STAR - {0x2728, 0x0, propertyUNASSIGNED}, // - {0x2729, 0x274B, propertyDISALLOWED}, // STRESS OUTLINED WHITE STAR..HEAVY EIGHT TEAR - {0x274C, 0x0, propertyUNASSIGNED}, // - {0x274D, 0x0, propertyDISALLOWED}, // SHADOWED WHITE CIRCLE - {0x274E, 0x0, propertyUNASSIGNED}, // - {0x274F, 0x2752, propertyDISALLOWED}, // LOWER RIGHT DROP-SHADOWED WHITE SQUARE..UPPE - {0x2753, 0x2755, propertyUNASSIGNED}, // .. - {0x2756, 0x275E, propertyDISALLOWED}, // BLACK DIAMOND MINUS WHITE X..HEAVY DOUBLE CO - {0x275F, 0x2760, propertyUNASSIGNED}, // .. - {0x2761, 0x2794, propertyDISALLOWED}, // CURVED STEM PARAGRAPH SIGN ORNAMENT..HEAVY W - {0x2795, 0x2797, propertyUNASSIGNED}, // .. - {0x2798, 0x27AF, propertyDISALLOWED}, // HEAVY SOUTH EAST ARROW..NOTCHED LOWER RIGHT- - {0x27B0, 0x0, propertyUNASSIGNED}, // - {0x27B1, 0x27BE, propertyDISALLOWED}, // NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARD - {0x27BF, 0x0, propertyUNASSIGNED}, // - {0x27C0, 0x27CA, propertyDISALLOWED}, // THREE DIMENSIONAL ANGLE..VERTICAL BAR WITH H - {0x27CB, 0x0, propertyUNASSIGNED}, // - {0x27CC, 0x0, propertyDISALLOWED}, // LONG DIVISION - {0x27CD, 0x27CF, propertyUNASSIGNED}, // .. - {0x27D0, 0x2B4C, propertyDISALLOWED}, // WHITE DIAMOND WITH CENTRED DOT..RIGHTWARDS A - {0x2B4D, 0x2B4F, propertyUNASSIGNED}, // .. - {0x2B50, 0x2B59, propertyDISALLOWED}, // WHITE MEDIUM STAR..HEAVY CIRCLED SALTIRE - {0x2B5A, 0x2BFF, propertyUNASSIGNED}, // .. - {0x2C00, 0x2C2E, propertyDISALLOWED}, // GLAGOLITIC CAPITAL LETTER AZU..GLAGOLITIC CA - {0x2C2F, 0x0, propertyUNASSIGNED}, // - {0x2C30, 0x2C5E, propertyPVALID}, // GLAGOLITIC SMALL LETTER AZU..GLAGOLITIC SMAL - {0x2C5F, 0x0, propertyUNASSIGNED}, // - {0x2C60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOUBLE BAR - {0x2C61, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOUBLE BAR - {0x2C62, 0x2C64, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE TILDE..LA - {0x2C65, 0x2C66, propertyPVALID}, // LATIN SMALL LETTER A WITH STROKE..LATIN SMAL - {0x2C67, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DESCENDER - {0x2C68, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DESCENDER - {0x2C69, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DESCENDER - {0x2C6A, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DESCENDER - {0x2C6B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DESCENDER - {0x2C6C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DESCENDER - {0x2C6D, 0x2C70, propertyDISALLOWED}, // LATIN CAPITAL LETTER ALPHA..LATIN CAPITAL LE - {0x2C71, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH RIGHT HOOK - {0x2C72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH HOOK - {0x2C73, 0x2C74, propertyPVALID}, // LATIN SMALL LETTER W WITH HOOK..LATIN SMALL - {0x2C75, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HALF H - {0x2C76, 0x2C7B, propertyPVALID}, // LATIN SMALL LETTER HALF H..LATIN LETTER SMAL - {0x2C7C, 0x2C80, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER J..COPTIC CAPIT - {0x2C81, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ALFA - {0x2C82, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER VIDA - {0x2C83, 0x0, propertyPVALID}, // COPTIC SMALL LETTER VIDA - {0x2C84, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GAMMA - {0x2C85, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GAMMA - {0x2C86, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DALDA - {0x2C87, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DALDA - {0x2C88, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER EIE - {0x2C89, 0x0, propertyPVALID}, // COPTIC SMALL LETTER EIE - {0x2C8A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SOU - {0x2C8B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SOU - {0x2C8C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER ZATA - {0x2C8D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ZATA - {0x2C8E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HATE - {0x2C8F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HATE - {0x2C90, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER THETHE - {0x2C91, 0x0, propertyPVALID}, // COPTIC SMALL LETTER THETHE - {0x2C92, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER IAUDA - {0x2C93, 0x0, propertyPVALID}, // COPTIC SMALL LETTER IAUDA - {0x2C94, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KAPA - {0x2C95, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KAPA - {0x2C96, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER LAULA - {0x2C97, 0x0, propertyPVALID}, // COPTIC SMALL LETTER LAULA - {0x2C98, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER MI - {0x2C99, 0x0, propertyPVALID}, // COPTIC SMALL LETTER MI - {0x2C9A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER NI - {0x2C9B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER NI - {0x2C9C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KSI - {0x2C9D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KSI - {0x2C9E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER O - {0x2C9F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER O - {0x2CA0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PI - {0x2CA1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PI - {0x2CA2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER RO - {0x2CA3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER RO - {0x2CA4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SIMA - {0x2CA5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SIMA - {0x2CA6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER TAU - {0x2CA7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER TAU - {0x2CA8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER UA - {0x2CA9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER UA - {0x2CAA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FI - {0x2CAB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FI - {0x2CAC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHI - {0x2CAD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHI - {0x2CAE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PSI - {0x2CAF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PSI - {0x2CB0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OOU - {0x2CB1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OOU - {0x2CB2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P ALEF - {0x2CB3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P ALEF - {0x2CB4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC AIN - {0x2CB5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC AIN - {0x2CB6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC EIE - {0x2CB7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC EIE - {0x2CB8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P KAPA - {0x2CB9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P KAPA - {0x2CBA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P NI - {0x2CBB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P NI - {0x2CBC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC NI - {0x2CBD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC NI - {0x2CBE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC OOU - {0x2CBF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC OOU - {0x2CC0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SAMPI - {0x2CC1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SAMPI - {0x2CC2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CROSSED SHEI - {0x2CC3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CROSSED SHEI - {0x2CC4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHEI - {0x2CC5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHEI - {0x2CC6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC ESH - {0x2CC7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC ESH - {0x2CC8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER AKHMIMIC KHEI - {0x2CC9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER AKHMIMIC KHEI - {0x2CCA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P HORI - {0x2CCB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P HORI - {0x2CCC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HORI - {0x2CCD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HORI - {0x2CCE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HA - {0x2CCF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HA - {0x2CD0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER L-SHAPED HA - {0x2CD1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER L-SHAPED HA - {0x2CD2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HEI - {0x2CD3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HEI - {0x2CD4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HAT - {0x2CD5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HAT - {0x2CD6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC GANGIA - {0x2CD7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC GANGIA - {0x2CD8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC DJA - {0x2CD9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC DJA - {0x2CDA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHIMA - {0x2CDB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHIMA - {0x2CDC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN SHIMA - {0x2CDD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN SHIMA - {0x2CDE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NGI - {0x2CDF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NGI - {0x2CE0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NYI - {0x2CE1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NYI - {0x2CE2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN WAU - {0x2CE3, 0x2CE4, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN WAU..COPTIC S - {0x2CE5, 0x2CEB, propertyDISALLOWED}, // COPTIC SYMBOL MI RO..COPTIC CAPITAL LETTER C - {0x2CEC, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC SHEI - {0x2CED, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC GANGIA - {0x2CEE, 0x2CF1, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC GANGIA..CO - {0x2CF2, 0x2CF8, propertyUNASSIGNED}, // .. - {0x2CF9, 0x2CFF, propertyDISALLOWED}, // COPTIC OLD NUBIAN FULL STOP..COPTIC MORPHOLO - {0x2D00, 0x2D25, propertyPVALID}, // GEORGIAN SMALL LETTER AN..GEORGIAN SMALL LET - {0x2D26, 0x2D2F, propertyUNASSIGNED}, // .. - {0x2D30, 0x2D65, propertyPVALID}, // TIFINAGH LETTER YA..TIFINAGH LETTER YAZZ - {0x2D66, 0x2D6E, propertyUNASSIGNED}, // .. - {0x2D6F, 0x0, propertyDISALLOWED}, // TIFINAGH MODIFIER LETTER LABIALIZATION MARK - {0x2D70, 0x2D7F, propertyUNASSIGNED}, // .. - {0x2D80, 0x2D96, propertyPVALID}, // ETHIOPIC SYLLABLE LOA..ETHIOPIC SYLLABLE GGW - {0x2D97, 0x2D9F, propertyUNASSIGNED}, // .. - {0x2DA0, 0x2DA6, propertyPVALID}, // ETHIOPIC SYLLABLE SSA..ETHIOPIC SYLLABLE SSO - {0x2DA7, 0x0, propertyUNASSIGNED}, // - {0x2DA8, 0x2DAE, propertyPVALID}, // ETHIOPIC SYLLABLE CCA..ETHIOPIC SYLLABLE CCO - {0x2DAF, 0x0, propertyUNASSIGNED}, // - {0x2DB0, 0x2DB6, propertyPVALID}, // ETHIOPIC SYLLABLE ZZA..ETHIOPIC SYLLABLE ZZO - {0x2DB7, 0x0, propertyUNASSIGNED}, // - {0x2DB8, 0x2DBE, propertyPVALID}, // ETHIOPIC SYLLABLE CCHA..ETHIOPIC SYLLABLE CC - {0x2DBF, 0x0, propertyUNASSIGNED}, // - {0x2DC0, 0x2DC6, propertyPVALID}, // ETHIOPIC SYLLABLE QYA..ETHIOPIC SYLLABLE QYO - {0x2DC7, 0x0, propertyUNASSIGNED}, // - {0x2DC8, 0x2DCE, propertyPVALID}, // ETHIOPIC SYLLABLE KYA..ETHIOPIC SYLLABLE KYO - {0x2DCF, 0x0, propertyUNASSIGNED}, // - {0x2DD0, 0x2DD6, propertyPVALID}, // ETHIOPIC SYLLABLE XYA..ETHIOPIC SYLLABLE XYO - {0x2DD7, 0x0, propertyUNASSIGNED}, // - {0x2DD8, 0x2DDE, propertyPVALID}, // ETHIOPIC SYLLABLE GYA..ETHIOPIC SYLLABLE GYO - {0x2DDF, 0x0, propertyUNASSIGNED}, // - {0x2DE0, 0x2DFF, propertyPVALID}, // COMBINING CYRILLIC LETTER BE..COMBINING CYRI - {0x2E00, 0x2E2E, propertyDISALLOWED}, // RIGHT ANGLE SUBSTITUTION MARKER..REVERSED QU - {0x2E2F, 0x0, propertyPVALID}, // VERTICAL TILDE - {0x2E30, 0x2E31, propertyDISALLOWED}, // RING POINT..WORD SEPARATOR MIDDLE DOT - {0x2E32, 0x2E7F, propertyUNASSIGNED}, // .. - {0x2E80, 0x2E99, propertyDISALLOWED}, // CJK RADICAL REPEAT..CJK RADICAL RAP - {0x2E9A, 0x0, propertyUNASSIGNED}, // - {0x2E9B, 0x2EF3, propertyDISALLOWED}, // CJK RADICAL CHOKE..CJK RADICAL C-SIMPLIFIED - {0x2EF4, 0x2EFF, propertyUNASSIGNED}, // .. - {0x2F00, 0x2FD5, propertyDISALLOWED}, // KANGXI RADICAL ONE..KANGXI RADICAL FLUTE - {0x2FD6, 0x2FEF, propertyUNASSIGNED}, // .. - {0x2FF0, 0x2FFB, propertyDISALLOWED}, // IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RI - {0x2FFC, 0x2FFF, propertyUNASSIGNED}, // .. - {0x3000, 0x3004, propertyDISALLOWED}, // IDEOGRAPHIC SPACE..JAPANESE INDUSTRIAL STAND - {0x3005, 0x3007, propertyPVALID}, // IDEOGRAPHIC ITERATION MARK..IDEOGRAPHIC NUMB - {0x3008, 0x3029, propertyDISALLOWED}, // LEFT ANGLE BRACKET..HANGZHOU NUMERAL NINE - {0x302A, 0x302D, propertyPVALID}, // IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENT - {0x302E, 0x303B, propertyDISALLOWED}, // HANGUL SINGLE DOT TONE MARK..VERTICAL IDEOGR - {0x303C, 0x0, propertyPVALID}, // MASU MARK - {0x303D, 0x303F, propertyDISALLOWED}, // PART ALTERNATION MARK..IDEOGRAPHIC HALF FILL - {0x3040, 0x0, propertyUNASSIGNED}, // - {0x3041, 0x3096, propertyPVALID}, // HIRAGANA LETTER SMALL A..HIRAGANA LETTER SMA - {0x3097, 0x3098, propertyUNASSIGNED}, // .. - {0x3099, 0x309A, propertyPVALID}, // COMBINING KATAKANA-HIRAGANA VOICED SOUND MAR - {0x309B, 0x309C, propertyDISALLOWED}, // KATAKANA-HIRAGANA VOICED SOUND MARK..KATAKAN - {0x309D, 0x309E, propertyPVALID}, // HIRAGANA ITERATION MARK..HIRAGANA VOICED ITE - {0x309F, 0x30A0, propertyDISALLOWED}, // HIRAGANA DIGRAPH YORI..KATAKANA-HIRAGANA DOU - {0x30A1, 0x30FA, propertyPVALID}, // KATAKANA LETTER SMALL A..KATAKANA LETTER VO - {0x30FB, 0x0, propertyCONTEXTO}, // KATAKANA MIDDLE DOT - {0x30FC, 0x30FE, propertyPVALID}, // KATAKANA-HIRAGANA PROLONGED SOUND MARK..KATA - {0x30FF, 0x0, propertyDISALLOWED}, // KATAKANA DIGRAPH KOTO - {0x3100, 0x3104, propertyUNASSIGNED}, // .. - {0x3105, 0x312D, propertyPVALID}, // BOPOMOFO LETTER B..BOPOMOFO LETTER IH - {0x312E, 0x3130, propertyUNASSIGNED}, // .. - {0x3131, 0x318E, propertyDISALLOWED}, // HANGUL LETTER KIYEOK..HANGUL LETTER ARAEAE - {0x318F, 0x0, propertyUNASSIGNED}, // - {0x3190, 0x319F, propertyDISALLOWED}, // IDEOGRAPHIC ANNOTATION LINKING MARK..IDEOGRA - {0x31A0, 0x31B7, propertyPVALID}, // BOPOMOFO LETTER BU..BOPOMOFO FINAL LETTER H - {0x31B8, 0x31BF, propertyUNASSIGNED}, // .. - {0x31C0, 0x31E3, propertyDISALLOWED}, // CJK STROKE T..CJK STROKE Q - {0x31E4, 0x31EF, propertyUNASSIGNED}, // .. - {0x31F0, 0x31FF, propertyPVALID}, // KATAKANA LETTER SMALL KU..KATAKANA LETTER SM - {0x3200, 0x321E, propertyDISALLOWED}, // PARENTHESIZED HANGUL KIYEOK..PARENTHESIZED K - {0x321F, 0x0, propertyUNASSIGNED}, // - {0x3220, 0x32FE, propertyDISALLOWED}, // PARENTHESIZED IDEOGRAPH ONE..CIRCLED KATAKAN - {0x32FF, 0x0, propertyUNASSIGNED}, // - {0x3300, 0x33FF, propertyDISALLOWED}, // SQUARE APAATO..SQUARE GAL - {0x3400, 0x4DB5, propertyPVALID}, // .... - {0x4DC0, 0x4DFF, propertyDISALLOWED}, // HEXAGRAM FOR THE CREATIVE HEAVEN..HEXAGRAM F - {0x4E00, 0x9FCB, propertyPVALID}, // .. - {0x9FCC, 0x9FFF, propertyUNASSIGNED}, // .. - {0xA000, 0xA48C, propertyPVALID}, // YI SYLLABLE IT..YI SYLLABLE YYR - {0xA48D, 0xA48F, propertyUNASSIGNED}, // .. - {0xA490, 0xA4C6, propertyDISALLOWED}, // YI RADICAL QOT..YI RADICAL KE - {0xA4C7, 0xA4CF, propertyUNASSIGNED}, // .. - {0xA4D0, 0xA4FD, propertyPVALID}, // LISU LETTER BA..LISU LETTER TONE MYA JEU - {0xA4FE, 0xA4FF, propertyDISALLOWED}, // LISU PUNCTUATION COMMA..LISU PUNCTUATION FUL - {0xA500, 0xA60C, propertyPVALID}, // VAI SYLLABLE EE..VAI SYLLABLE LENGTHENER - {0xA60D, 0xA60F, propertyDISALLOWED}, // VAI COMMA..VAI QUESTION MARK - {0xA610, 0xA62B, propertyPVALID}, // VAI SYLLABLE NDOLE FA..VAI SYLLABLE NDOLE DO - {0xA62C, 0xA63F, propertyUNASSIGNED}, // .. - {0xA640, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZEMLYA - {0xA641, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZEMLYA - {0xA642, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZELO - {0xA643, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZELO - {0xA644, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED DZE - {0xA645, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED DZE - {0xA646, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTA - {0xA647, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTA - {0xA648, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DJERV - {0xA649, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DJERV - {0xA64A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOGRAPH UK - {0xA64B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOGRAPH UK - {0xA64C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BROAD OMEGA - {0xA64D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BROAD OMEGA - {0xA64E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER NEUTRAL YER - {0xA64F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER NEUTRAL YER - {0xA650, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH BACK YER - {0xA651, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH BACK YER - {0xA652, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED YAT - {0xA653, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED YAT - {0xA654, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED YU - {0xA655, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED YU - {0xA656, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED A - {0xA657, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED A - {0xA658, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CLOSED LITTLE YUS - {0xA659, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CLOSED LITTLE YUS - {0xA65A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BLENDED YUS - {0xA65B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BLENDED YUS - {0xA65C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED CLOSED LITT - {0xA65D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED CLOSED LITTLE - {0xA65E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YN - {0xA65F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YN - {0xA660, 0xA661, propertyUNASSIGNED}, // .. - {0xA662, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT DE - {0xA663, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT DE - {0xA664, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EL - {0xA665, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EL - {0xA666, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EM - {0xA667, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EM - {0xA668, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOCULAR O - {0xA669, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOCULAR O - {0xA66A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BINOCULAR O - {0xA66B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BINOCULAR O - {0xA66C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DOUBLE MONOCULAR O - {0xA66D, 0xA66F, propertyPVALID}, // CYRILLIC SMALL LETTER DOUBLE MONOCULAR O..CO - {0xA670, 0xA673, propertyDISALLOWED}, // COMBINING CYRILLIC TEN MILLIONS SIGN..SLAVON - {0xA674, 0xA67B, propertyUNASSIGNED}, // .. - {0xA67C, 0xA67D, propertyPVALID}, // COMBINING CYRILLIC KAVYKA..COMBINING CYRILLI - {0xA67E, 0x0, propertyDISALLOWED}, // CYRILLIC KAVYKA - {0xA67F, 0x0, propertyPVALID}, // CYRILLIC PAYEROK - {0xA680, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DWE - {0xA681, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DWE - {0xA682, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZWE - {0xA683, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZWE - {0xA684, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHWE - {0xA685, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHWE - {0xA686, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CCHE - {0xA687, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CCHE - {0xA688, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZZE - {0xA689, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZZE - {0xA68A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK - {0xA68B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH MIDDLE HOOK - {0xA68C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TWE - {0xA68D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TWE - {0xA68E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSWE - {0xA68F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSWE - {0xA690, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSSE - {0xA691, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSSE - {0xA692, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TCHE - {0xA693, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TCHE - {0xA694, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HWE - {0xA695, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HWE - {0xA696, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHWE - {0xA697, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHWE - {0xA698, 0xA69F, propertyUNASSIGNED}, // .. - {0xA6A0, 0xA6E5, propertyPVALID}, // BAMUM LETTER A..BAMUM LETTER KI - {0xA6E6, 0xA6EF, propertyDISALLOWED}, // BAMUM LETTER MO..BAMUM LETTER KOGHOM - {0xA6F0, 0xA6F1, propertyPVALID}, // BAMUM COMBINING MARK KOQNDON..BAMUM COMBININ - {0xA6F2, 0xA6F7, propertyDISALLOWED}, // BAMUM NJAEMLI..BAMUM QUESTION MARK - {0xA6F8, 0xA6FF, propertyUNASSIGNED}, // .. - {0xA700, 0xA716, propertyDISALLOWED}, // MODIFIER LETTER CHINESE TONE YIN PING..MODIF - {0xA717, 0xA71F, propertyPVALID}, // MODIFIER LETTER DOT VERTICAL BAR..MODIFIER L - {0xA720, 0xA722, propertyDISALLOWED}, // MODIFIER LETTER STRESS AND HIGH TONE..LATIN - {0xA723, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL ALEF - {0xA724, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EGYPTOLOGICAL AIN - {0xA725, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL AIN - {0xA726, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HENG - {0xA727, 0x0, propertyPVALID}, // LATIN SMALL LETTER HENG - {0xA728, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TZ - {0xA729, 0x0, propertyPVALID}, // LATIN SMALL LETTER TZ - {0xA72A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TRESILLO - {0xA72B, 0x0, propertyPVALID}, // LATIN SMALL LETTER TRESILLO - {0xA72C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO - {0xA72D, 0x0, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO - {0xA72E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO WITH COMMA - {0xA72F, 0xA731, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO WITH COMMA..LAT - {0xA732, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AA - {0xA733, 0x0, propertyPVALID}, // LATIN SMALL LETTER AA - {0xA734, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AO - {0xA735, 0x0, propertyPVALID}, // LATIN SMALL LETTER AO - {0xA736, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AU - {0xA737, 0x0, propertyPVALID}, // LATIN SMALL LETTER AU - {0xA738, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV - {0xA739, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV - {0xA73A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR - {0xA73B, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV WITH HORIZONTAL BAR - {0xA73C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AY - {0xA73D, 0x0, propertyPVALID}, // LATIN SMALL LETTER AY - {0xA73E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED C WITH DOT - {0xA73F, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C WITH DOT - {0xA740, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE - {0xA741, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE - {0xA742, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DIAGONAL STROKE - {0xA743, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DIAGONAL STROKE - {0xA744, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE AND DIAGO - {0xA745, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE AND DIAGONA - {0xA746, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER BROKEN L - {0xA747, 0x0, propertyPVALID}, // LATIN SMALL LETTER BROKEN L - {0xA748, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH HIGH STROKE - {0xA749, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH HIGH STROKE - {0xA74A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LONG STROKE OVER - {0xA74B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LONG STROKE OVERLA - {0xA74C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LOOP - {0xA74D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LOOP - {0xA74E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OO - {0xA74F, 0x0, propertyPVALID}, // LATIN SMALL LETTER OO - {0xA750, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH STROKE THROUGH D - {0xA751, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH STROKE THROUGH DES - {0xA752, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH FLOURISH - {0xA753, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH FLOURISH - {0xA754, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH SQUIRREL TAIL - {0xA755, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH SQUIRREL TAIL - {0xA756, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH STROKE THROUGH D - {0xA757, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH STROKE THROUGH DES - {0xA758, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE - {0xA759, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH DIAGONAL STROKE - {0xA75A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R ROTUNDA - {0xA75B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R ROTUNDA - {0xA75C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER RUM ROTUNDA - {0xA75D, 0x0, propertyPVALID}, // LATIN SMALL LETTER RUM ROTUNDA - {0xA75E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DIAGONAL STROKE - {0xA75F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DIAGONAL STROKE - {0xA760, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VY - {0xA761, 0x0, propertyPVALID}, // LATIN SMALL LETTER VY - {0xA762, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VISIGOTHIC Z - {0xA763, 0x0, propertyPVALID}, // LATIN SMALL LETTER VISIGOTHIC Z - {0xA764, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE - {0xA765, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE - {0xA766, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE THROU - {0xA767, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE THROUGH - {0xA768, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VEND - {0xA769, 0x0, propertyPVALID}, // LATIN SMALL LETTER VEND - {0xA76A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ET - {0xA76B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ET - {0xA76C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER IS - {0xA76D, 0x0, propertyPVALID}, // LATIN SMALL LETTER IS - {0xA76E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CON - {0xA76F, 0x0, propertyPVALID}, // LATIN SMALL LETTER CON - {0xA770, 0x0, propertyDISALLOWED}, // MODIFIER LETTER US - {0xA771, 0xA778, propertyPVALID}, // LATIN SMALL LETTER DUM..LATIN SMALL LETTER U - {0xA779, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR D - {0xA77A, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR D - {0xA77B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR F - {0xA77C, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR F - {0xA77D, 0xA77E, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR G..LATIN CAPITA - {0xA77F, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED INSULAR G - {0xA780, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED L - {0xA781, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED L - {0xA782, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR R - {0xA783, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR R - {0xA784, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR S - {0xA785, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR S - {0xA786, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR T - {0xA787, 0xA788, propertyPVALID}, // LATIN SMALL LETTER INSULAR T..MODIFIER LETTE - {0xA789, 0xA78B, propertyDISALLOWED}, // MODIFIER LETTER COLON..LATIN CAPITAL LETTER - {0xA78C, 0x0, propertyPVALID}, // LATIN SMALL LETTER SALTILLO - {0xA78D, 0xA7FA, propertyUNASSIGNED}, // .. - {0xA7FB, 0xA827, propertyPVALID}, // LATIN EPIGRAPHIC LETTER REVERSED F..SYLOTI N - {0xA828, 0xA82B, propertyDISALLOWED}, // SYLOTI NAGRI POETRY MARK-1..SYLOTI NAGRI POE - {0xA82C, 0xA82F, propertyUNASSIGNED}, // .. - {0xA830, 0xA839, propertyDISALLOWED}, // NORTH INDIC FRACTION ONE QUARTER..NORTH INDI - {0xA83A, 0xA83F, propertyUNASSIGNED}, // .. - {0xA840, 0xA873, propertyPVALID}, // PHAGS-PA LETTER KA..PHAGS-PA LETTER CANDRABI - {0xA874, 0xA877, propertyDISALLOWED}, // PHAGS-PA SINGLE HEAD MARK..PHAGS-PA MARK DOU - {0xA878, 0xA87F, propertyUNASSIGNED}, // .. - {0xA880, 0xA8C4, propertyPVALID}, // SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VI - {0xA8C5, 0xA8CD, propertyUNASSIGNED}, // .. - {0xA8CE, 0xA8CF, propertyDISALLOWED}, // SAURASHTRA DANDA..SAURASHTRA DOUBLE DANDA - {0xA8D0, 0xA8D9, propertyPVALID}, // SAURASHTRA DIGIT ZERO..SAURASHTRA DIGIT NINE - {0xA8DA, 0xA8DF, propertyUNASSIGNED}, // .. - {0xA8E0, 0xA8F7, propertyPVALID}, // COMBINING DEVANAGARI DIGIT ZERO..DEVANAGARI - {0xA8F8, 0xA8FA, propertyDISALLOWED}, // DEVANAGARI SIGN PUSHPIKA..DEVANAGARI CARET - {0xA8FB, 0x0, propertyPVALID}, // DEVANAGARI HEADSTROKE - {0xA8FC, 0xA8FF, propertyUNASSIGNED}, // .. - {0xA900, 0xA92D, propertyPVALID}, // KAYAH LI DIGIT ZERO..KAYAH LI TONE CALYA PLO - {0xA92E, 0xA92F, propertyDISALLOWED}, // KAYAH LI SIGN CWI..KAYAH LI SIGN SHYA - {0xA930, 0xA953, propertyPVALID}, // REJANG LETTER KA..REJANG VIRAMA - {0xA954, 0xA95E, propertyUNASSIGNED}, // .. - {0xA95F, 0xA97C, propertyDISALLOWED}, // REJANG SECTION MARK..HANGUL CHOSEONG SSANGYE - {0xA97D, 0xA97F, propertyUNASSIGNED}, // .. - {0xA980, 0xA9C0, propertyPVALID}, // JAVANESE SIGN PANYANGGA..JAVANESE PANGKON - {0xA9C1, 0xA9CD, propertyDISALLOWED}, // JAVANESE LEFT RERENGGAN..JAVANESE TURNED PAD - {0xA9CE, 0x0, propertyUNASSIGNED}, // - {0xA9CF, 0xA9D9, propertyPVALID}, // JAVANESE PANGRANGKEP..JAVANESE DIGIT NINE - {0xA9DA, 0xA9DD, propertyUNASSIGNED}, // .. - {0xA9DE, 0xA9DF, propertyDISALLOWED}, // JAVANESE PADA TIRTA TUMETES..JAVANESE PADA I - {0xA9E0, 0xA9FF, propertyUNASSIGNED}, // .. - {0xAA00, 0xAA36, propertyPVALID}, // CHAM LETTER A..CHAM CONSONANT SIGN WA - {0xAA37, 0xAA3F, propertyUNASSIGNED}, // .. - {0xAA40, 0xAA4D, propertyPVALID}, // CHAM LETTER FINAL K..CHAM CONSONANT SIGN FIN - {0xAA4E, 0xAA4F, propertyUNASSIGNED}, // .. - {0xAA50, 0xAA59, propertyPVALID}, // CHAM DIGIT ZERO..CHAM DIGIT NINE - {0xAA5A, 0xAA5B, propertyUNASSIGNED}, // .. - {0xAA5C, 0xAA5F, propertyDISALLOWED}, // CHAM PUNCTUATION SPIRAL..CHAM PUNCTUATION TR - {0xAA60, 0xAA76, propertyPVALID}, // MYANMAR LETTER KHAMTI GA..MYANMAR LOGOGRAM K - {0xAA77, 0xAA79, propertyDISALLOWED}, // MYANMAR SYMBOL AITON EXCLAMATION..MYANMAR SY - {0xAA7A, 0xAA7B, propertyPVALID}, // MYANMAR LETTER AITON RA..MYANMAR SIGN PAO KA - {0xAA7C, 0xAA7F, propertyUNASSIGNED}, // .. - {0xAA80, 0xAAC2, propertyPVALID}, // TAI VIET LETTER LOW KO..TAI VIET TONE MAI SO - {0xAAC3, 0xAADA, propertyUNASSIGNED}, // .. - {0xAADB, 0xAADD, propertyPVALID}, // TAI VIET SYMBOL KON..TAI VIET SYMBOL SAM - {0xAADE, 0xAADF, propertyDISALLOWED}, // TAI VIET SYMBOL HO HOI..TAI VIET SYMBOL KOI - {0xAAE0, 0xABBF, propertyUNASSIGNED}, // .. - {0xABC0, 0xABEA, propertyPVALID}, // MEETEI MAYEK LETTER KOK..MEETEI MAYEK VOWEL - {0xABEB, 0x0, propertyDISALLOWED}, // MEETEI MAYEK CHEIKHEI - {0xABEC, 0xABED, propertyPVALID}, // MEETEI MAYEK LUM IYEK..MEETEI MAYEK APUN IYE - {0xABEE, 0xABEF, propertyUNASSIGNED}, // .. - {0xABF0, 0xABF9, propertyPVALID}, // MEETEI MAYEK DIGIT ZERO..MEETEI MAYEK DIGIT - {0xABFA, 0xABFF, propertyUNASSIGNED}, // .. - {0xAC00, 0xD7A3, propertyPVALID}, // .. - {0xD7A4, 0xD7AF, propertyUNASSIGNED}, // .. - {0xD7B0, 0xD7C6, propertyDISALLOWED}, // HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARA - {0xD7C7, 0xD7CA, propertyUNASSIGNED}, // .. - {0xD7CB, 0xD7FB, propertyDISALLOWED}, // HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEO - {0xD7FC, 0xD7FF, propertyUNASSIGNED}, // .. - {0xD800, 0xFA0D, propertyDISALLOWED}, // ..CJK COMPAT - {0xFA0E, 0xFA0F, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA0E..CJK COMPAT - {0xFA10, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA10 - {0xFA11, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA11 - {0xFA12, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA12 - {0xFA13, 0xFA14, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA13..CJK COMPAT - {0xFA15, 0xFA1E, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA15..CJK COMPAT - {0xFA1F, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA1F - {0xFA20, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA20 - {0xFA21, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA21 - {0xFA22, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA22 - {0xFA23, 0xFA24, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA23..CJK COMPAT - {0xFA25, 0xFA26, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA25..CJK COMPAT - {0xFA27, 0xFA29, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA27..CJK COMPAT - {0xFA2A, 0xFA2D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA2A..CJK COMPAT - {0xFA2E, 0xFA2F, propertyUNASSIGNED}, // .. - {0xFA30, 0xFA6D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA30..CJK COMPAT - {0xFA6E, 0xFA6F, propertyUNASSIGNED}, // .. - {0xFA70, 0xFAD9, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COMPAT - {0xFADA, 0xFAFF, propertyUNASSIGNED}, // .. - {0xFB00, 0xFB06, propertyDISALLOWED}, // LATIN SMALL LIGATURE FF..LATIN SMALL LIGATUR - {0xFB07, 0xFB12, propertyUNASSIGNED}, // .. - {0xFB13, 0xFB17, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE MEN NOW..ARMENIAN SM - {0xFB18, 0xFB1C, propertyUNASSIGNED}, // .. - {0xFB1D, 0x0, propertyDISALLOWED}, // HEBREW LETTER YOD WITH HIRIQ - {0xFB1E, 0x0, propertyPVALID}, // HEBREW POINT JUDEO-SPANISH VARIKA - {0xFB1F, 0xFB36, propertyDISALLOWED}, // HEBREW LIGATURE YIDDISH YOD YOD PATAH..HEBRE - {0xFB37, 0x0, propertyUNASSIGNED}, // - {0xFB38, 0xFB3C, propertyDISALLOWED}, // HEBREW LETTER TET WITH DAGESH..HEBREW LETTER - {0xFB3D, 0x0, propertyUNASSIGNED}, // - {0xFB3E, 0x0, propertyDISALLOWED}, // HEBREW LETTER MEM WITH DAGESH - {0xFB3F, 0x0, propertyUNASSIGNED}, // - {0xFB40, 0xFB41, propertyDISALLOWED}, // HEBREW LETTER NUN WITH DAGESH..HEBREW LETTER - {0xFB42, 0x0, propertyUNASSIGNED}, // - {0xFB43, 0xFB44, propertyDISALLOWED}, // HEBREW LETTER FINAL PE WITH DAGESH..HEBREW L - {0xFB45, 0x0, propertyUNASSIGNED}, // - {0xFB46, 0xFBB1, propertyDISALLOWED}, // HEBREW LETTER TSADI WITH DAGESH..ARABIC LETT - {0xFBB2, 0xFBD2, propertyUNASSIGNED}, // .. - {0xFBD3, 0xFD3F, propertyDISALLOWED}, // ARABIC LETTER NG ISOLATED FORM..ORNATE RIGHT - {0xFD40, 0xFD4F, propertyUNASSIGNED}, // .. - {0xFD50, 0xFD8F, propertyDISALLOWED}, // ARABIC LIGATURE TEH WITH JEEM WITH MEEM INIT - {0xFD90, 0xFD91, propertyUNASSIGNED}, // .. - {0xFD92, 0xFDC7, propertyDISALLOWED}, // ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INI - {0xFDC8, 0xFDCF, propertyUNASSIGNED}, // .. - {0xFDD0, 0xFDFD, propertyDISALLOWED}, // ..ARABIC LIGATURE BISMILLAH AR - {0xFDFE, 0xFDFF, propertyUNASSIGNED}, // .. - {0xFE00, 0xFE19, propertyDISALLOWED}, // VARIATION SELECTOR-1..PRESENTATION FORM FOR - {0xFE1A, 0xFE1F, propertyUNASSIGNED}, // .. - {0xFE20, 0xFE26, propertyPVALID}, // COMBINING LIGATURE LEFT HALF..COMBINING CONJ - {0xFE27, 0xFE2F, propertyUNASSIGNED}, // .. - {0xFE30, 0xFE52, propertyDISALLOWED}, // PRESENTATION FORM FOR VERTICAL TWO DOT LEADE - {0xFE53, 0x0, propertyUNASSIGNED}, // - {0xFE54, 0xFE66, propertyDISALLOWED}, // SMALL SEMICOLON..SMALL EQUALS SIGN - {0xFE67, 0x0, propertyUNASSIGNED}, // - {0xFE68, 0xFE6B, propertyDISALLOWED}, // SMALL REVERSE SOLIDUS..SMALL COMMERCIAL AT - {0xFE6C, 0xFE6F, propertyUNASSIGNED}, // .. - {0xFE70, 0xFE72, propertyDISALLOWED}, // ARABIC FATHATAN ISOLATED FORM..ARABIC DAMMAT - {0xFE73, 0x0, propertyPVALID}, // ARABIC TAIL FRAGMENT - {0xFE74, 0x0, propertyDISALLOWED}, // ARABIC KASRATAN ISOLATED FORM - {0xFE75, 0x0, propertyUNASSIGNED}, // - {0xFE76, 0xFEFC, propertyDISALLOWED}, // ARABIC FATHA ISOLATED FORM..ARABIC LIGATURE - {0xFEFD, 0xFEFE, propertyUNASSIGNED}, // .. - {0xFEFF, 0x0, propertyDISALLOWED}, // ZERO WIDTH NO-BREAK SPACE - {0xFF00, 0x0, propertyUNASSIGNED}, // - {0xFF01, 0xFFBE, propertyDISALLOWED}, // FULLWIDTH EXCLAMATION MARK..HALFWIDTH HANGUL - {0xFFBF, 0xFFC1, propertyUNASSIGNED}, // .. - {0xFFC2, 0xFFC7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER A..HALFWIDTH HANGUL - {0xFFC8, 0xFFC9, propertyUNASSIGNED}, // .. - {0xFFCA, 0xFFCF, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YEO..HALFWIDTH HANGU - {0xFFD0, 0xFFD1, propertyUNASSIGNED}, // .. - {0xFFD2, 0xFFD7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YO..HALFWIDTH HANGUL - {0xFFD8, 0xFFD9, propertyUNASSIGNED}, // .. - {0xFFDA, 0xFFDC, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER EU..HALFWIDTH HANGUL - {0xFFDD, 0xFFDF, propertyUNASSIGNED}, // .. - {0xFFE0, 0xFFE6, propertyDISALLOWED}, // FULLWIDTH CENT SIGN..FULLWIDTH WON SIGN - {0xFFE7, 0x0, propertyUNASSIGNED}, // - {0xFFE8, 0xFFEE, propertyDISALLOWED}, // HALFWIDTH FORMS LIGHT VERTICAL..HALFWIDTH WH - {0xFFEF, 0xFFF8, propertyUNASSIGNED}, // .. - {0xFFF9, 0xFFFF, propertyDISALLOWED}, // INTERLINEAR ANNOTATION ANCHOR.. - {0x1000D, 0x10026, propertyPVALID}, // LINEAR B SYLLABLE B036 JO..LINEAR B SYLLABLE - {0x10027, 0x0, propertyUNASSIGNED}, // - {0x10028, 0x1003A, propertyPVALID}, // LINEAR B SYLLABLE B060 RA..LINEAR B SYLLABLE - {0x1003B, 0x0, propertyUNASSIGNED}, // - {0x1003C, 0x1003D, propertyPVALID}, // LINEAR B SYLLABLE B017 ZA..LINEAR B SYLLABLE - {0x1003E, 0x0, propertyUNASSIGNED}, // - {0x1003F, 0x1004D, propertyPVALID}, // LINEAR B SYLLABLE B020 ZO..LINEAR B SYLLABLE - {0x1004E, 0x1004F, propertyUNASSIGNED}, // .. - {0x10050, 0x1005D, propertyPVALID}, // LINEAR B SYMBOL B018..LINEAR B SYMBOL B089 - {0x1005E, 0x1007F, propertyUNASSIGNED}, // .. - {0x10080, 0x100FA, propertyPVALID}, // LINEAR B IDEOGRAM B100 MAN..LINEAR B IDEOGRA - {0x100FB, 0x100FF, propertyUNASSIGNED}, // .. - {0x10100, 0x10102, propertyDISALLOWED}, // AEGEAN WORD SEPARATOR LINE..AEGEAN CHECK MAR - {0x10103, 0x10106, propertyUNASSIGNED}, // .. - {0x10107, 0x10133, propertyDISALLOWED}, // AEGEAN NUMBER ONE..AEGEAN NUMBER NINETY THOU - {0x10134, 0x10136, propertyUNASSIGNED}, // .. - {0x10137, 0x1018A, propertyDISALLOWED}, // AEGEAN WEIGHT BASE UNIT..GREEK ZERO SIGN - {0x1018B, 0x1018F, propertyUNASSIGNED}, // .. - {0x10190, 0x1019B, propertyDISALLOWED}, // ROMAN SEXTANS SIGN..ROMAN CENTURIAL SIGN - {0x1019C, 0x101CF, propertyUNASSIGNED}, // .. - {0x101D0, 0x101FC, propertyDISALLOWED}, // PHAISTOS DISC SIGN PEDESTRIAN..PHAISTOS DISC - {0x101FD, 0x0, propertyPVALID}, // PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE - {0x101FE, 0x1027F, propertyUNASSIGNED}, // .. - {0x10280, 0x1029C, propertyPVALID}, // LYCIAN LETTER A..LYCIAN LETTER X - {0x1029D, 0x1029F, propertyUNASSIGNED}, // .. - {0x102A0, 0x102D0, propertyPVALID}, // CARIAN LETTER A..CARIAN LETTER UUU3 - {0x102D1, 0x102FF, propertyUNASSIGNED}, // .. - {0x10300, 0x1031E, propertyPVALID}, // OLD ITALIC LETTER A..OLD ITALIC LETTER UU - {0x1031F, 0x0, propertyUNASSIGNED}, // - {0x10320, 0x10323, propertyDISALLOWED}, // OLD ITALIC NUMERAL ONE..OLD ITALIC NUMERAL F - {0x10324, 0x1032F, propertyUNASSIGNED}, // .. - {0x10330, 0x10340, propertyPVALID}, // GOTHIC LETTER AHSA..GOTHIC LETTER PAIRTHRA - {0x10341, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINETY - {0x10342, 0x10349, propertyPVALID}, // GOTHIC LETTER RAIDA..GOTHIC LETTER OTHAL - {0x1034A, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINE HUNDRED - {0x1034B, 0x1037F, propertyUNASSIGNED}, // .. - {0x10380, 0x1039D, propertyPVALID}, // UGARITIC LETTER ALPA..UGARITIC LETTER SSU - {0x1039E, 0x0, propertyUNASSIGNED}, // - {0x1039F, 0x0, propertyDISALLOWED}, // UGARITIC WORD DIVIDER - {0x103A0, 0x103C3, propertyPVALID}, // OLD PERSIAN SIGN A..OLD PERSIAN SIGN HA - {0x103C4, 0x103C7, propertyUNASSIGNED}, // .. - {0x103C8, 0x103CF, propertyPVALID}, // OLD PERSIAN SIGN AURAMAZDAA..OLD PERSIAN SIG - {0x103D0, 0x103D5, propertyDISALLOWED}, // OLD PERSIAN WORD DIVIDER..OLD PERSIAN NUMBER - {0x103D6, 0x103FF, propertyUNASSIGNED}, // .. - {0x10400, 0x10427, propertyDISALLOWED}, // DESERET CAPITAL LETTER LONG I..DESERET CAPIT - {0x10428, 0x1049D, propertyPVALID}, // DESERET SMALL LETTER LONG I..OSMANYA LETTER - {0x1049E, 0x1049F, propertyUNASSIGNED}, // .. - {0x104A0, 0x104A9, propertyPVALID}, // OSMANYA DIGIT ZERO..OSMANYA DIGIT NINE - {0x104AA, 0x107FF, propertyUNASSIGNED}, // .. - {0x10800, 0x10805, propertyPVALID}, // CYPRIOT SYLLABLE A..CYPRIOT SYLLABLE JA - {0x10806, 0x10807, propertyUNASSIGNED}, // .. - {0x10808, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE JO - {0x10809, 0x0, propertyUNASSIGNED}, // - {0x1080A, 0x10835, propertyPVALID}, // CYPRIOT SYLLABLE KA..CYPRIOT SYLLABLE WO - {0x10836, 0x0, propertyUNASSIGNED}, // - {0x10837, 0x10838, propertyPVALID}, // CYPRIOT SYLLABLE XA..CYPRIOT SYLLABLE XE - {0x10839, 0x1083B, propertyUNASSIGNED}, // .. - {0x1083C, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE ZA - {0x1083D, 0x1083E, propertyUNASSIGNED}, // .. - {0x1083F, 0x10855, propertyPVALID}, // CYPRIOT SYLLABLE ZO..IMPERIAL ARAMAIC LETTER - {0x10856, 0x0, propertyUNASSIGNED}, // - {0x10857, 0x1085F, propertyDISALLOWED}, // IMPERIAL ARAMAIC SECTION SIGN..IMPERIAL ARAM - {0x10860, 0x108FF, propertyUNASSIGNED}, // .. - {0x10900, 0x10915, propertyPVALID}, // PHOENICIAN LETTER ALF..PHOENICIAN LETTER TAU - {0x10916, 0x1091B, propertyDISALLOWED}, // PHOENICIAN NUMBER ONE..PHOENICIAN NUMBER THR - {0x1091C, 0x1091E, propertyUNASSIGNED}, // .. - {0x1091F, 0x0, propertyDISALLOWED}, // PHOENICIAN WORD SEPARATOR - {0x10920, 0x10939, propertyPVALID}, // LYDIAN LETTER A..LYDIAN LETTER C - {0x1093A, 0x1093E, propertyUNASSIGNED}, // .. - {0x1093F, 0x0, propertyDISALLOWED}, // LYDIAN TRIANGULAR MARK - {0x10940, 0x109FF, propertyUNASSIGNED}, // .. - {0x10A00, 0x10A03, propertyPVALID}, // KHAROSHTHI LETTER A..KHAROSHTHI VOWEL SIGN V - {0x10A04, 0x0, propertyUNASSIGNED}, // - {0x10A05, 0x10A06, propertyPVALID}, // KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SI - {0x10A07, 0x10A0B, propertyUNASSIGNED}, // .. - {0x10A0C, 0x10A13, propertyPVALID}, // KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI LET - {0x10A14, 0x0, propertyUNASSIGNED}, // - {0x10A15, 0x10A17, propertyPVALID}, // KHAROSHTHI LETTER CA..KHAROSHTHI LETTER JA - {0x10A18, 0x0, propertyUNASSIGNED}, // - {0x10A19, 0x10A33, propertyPVALID}, // KHAROSHTHI LETTER NYA..KHAROSHTHI LETTER TTT - {0x10A34, 0x10A37, propertyUNASSIGNED}, // .. - {0x10A38, 0x10A3A, propertyPVALID}, // KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN D - {0x10A3B, 0x10A3E, propertyUNASSIGNED}, // .. - {0x10A3F, 0x0, propertyPVALID}, // KHAROSHTHI VIRAMA - {0x10A40, 0x10A47, propertyDISALLOWED}, // KHAROSHTHI DIGIT ONE..KHAROSHTHI NUMBER ONE - {0x10A48, 0x10A4F, propertyUNASSIGNED}, // .. - {0x10A50, 0x10A58, propertyDISALLOWED}, // KHAROSHTHI PUNCTUATION DOT..KHAROSHTHI PUNCT - {0x10A59, 0x10A5F, propertyUNASSIGNED}, // .. - {0x10A60, 0x10A7C, propertyPVALID}, // OLD SOUTH ARABIAN LETTER HE..OLD SOUTH ARABI - {0x10A7D, 0x10A7F, propertyDISALLOWED}, // OLD SOUTH ARABIAN NUMBER ONE..OLD SOUTH ARAB - {0x10A80, 0x10AFF, propertyUNASSIGNED}, // .. - {0x10B00, 0x10B35, propertyPVALID}, // AVESTAN LETTER A..AVESTAN LETTER HE - {0x10B36, 0x10B38, propertyUNASSIGNED}, // .. - {0x10B39, 0x10B3F, propertyDISALLOWED}, // AVESTAN ABBREVIATION MARK..LARGE ONE RING OV - {0x10B40, 0x10B55, propertyPVALID}, // INSCRIPTIONAL PARTHIAN LETTER ALEPH..INSCRIP - {0x10B56, 0x10B57, propertyUNASSIGNED}, // .. - {0x10B58, 0x10B5F, propertyDISALLOWED}, // INSCRIPTIONAL PARTHIAN NUMBER ONE..INSCRIPTI - {0x10B60, 0x10B72, propertyPVALID}, // INSCRIPTIONAL PAHLAVI LETTER ALEPH..INSCRIPT - {0x10B73, 0x10B77, propertyUNASSIGNED}, // .. - {0x10B78, 0x10B7F, propertyDISALLOWED}, // INSCRIPTIONAL PAHLAVI NUMBER ONE..INSCRIPTIO - {0x10B80, 0x10BFF, propertyUNASSIGNED}, // .. - {0x10C00, 0x10C48, propertyPVALID}, // OLD TURKIC LETTER ORKHON A..OLD TURKIC LETTE - {0x10C49, 0x10E5F, propertyUNASSIGNED}, // .. - {0x10E60, 0x10E7E, propertyDISALLOWED}, // RUMI DIGIT ONE..RUMI FRACTION TWO THIRDS - {0x10E7F, 0x1107F, propertyUNASSIGNED}, // .. - {0x11080, 0x110BA, propertyPVALID}, // KAITHI SIGN CANDRABINDU..KAITHI SIGN NUKTA - {0x110BB, 0x110C1, propertyDISALLOWED}, // KAITHI ABBREVIATION SIGN..KAITHI DOUBLE DAND - {0x110C2, 0x11FFF, propertyUNASSIGNED}, // .. - {0x12000, 0x1236E, propertyPVALID}, // CUNEIFORM SIGN A..CUNEIFORM SIGN ZUM - {0x1236F, 0x123FF, propertyUNASSIGNED}, // .. - {0x12400, 0x12462, propertyDISALLOWED}, // CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NU - {0x12463, 0x1246F, propertyUNASSIGNED}, // .. - {0x12470, 0x12473, propertyDISALLOWED}, // CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD - {0x12474, 0x12FFF, propertyUNASSIGNED}, // .. - {0x13000, 0x1342E, propertyPVALID}, // EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYP - {0x1342F, 0x1CFFF, propertyUNASSIGNED}, // .. - {0x1D000, 0x1D0F5, propertyDISALLOWED}, // BYZANTINE MUSICAL SYMBOL PSILI..BYZANTINE MU - {0x1D0F6, 0x1D0FF, propertyUNASSIGNED}, // .. - {0x1D100, 0x1D126, propertyDISALLOWED}, // MUSICAL SYMBOL SINGLE BARLINE..MUSICAL SYMBO - {0x1D127, 0x1D128, propertyUNASSIGNED}, // .. - {0x1D129, 0x1D1DD, propertyDISALLOWED}, // MUSICAL SYMBOL MULTIPLE MEASURE REST..MUSICA - {0x1D1DE, 0x1D1FF, propertyUNASSIGNED}, // .. - {0x1D200, 0x1D245, propertyDISALLOWED}, // GREEK VOCAL NOTATION SYMBOL-1..GREEK MUSICAL - {0x1D246, 0x1D2FF, propertyUNASSIGNED}, // .. - {0x1D300, 0x1D356, propertyDISALLOWED}, // MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING - {0x1D357, 0x1D35F, propertyUNASSIGNED}, // .. - {0x1D360, 0x1D371, propertyDISALLOWED}, // COUNTING ROD UNIT DIGIT ONE..COUNTING ROD TE - {0x1D372, 0x1D3FF, propertyUNASSIGNED}, // .. - {0x1D400, 0x1D454, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL A..MATHEMATICAL IT - {0x1D455, 0x0, propertyUNASSIGNED}, // - {0x1D456, 0x1D49C, propertyDISALLOWED}, // MATHEMATICAL ITALIC SMALL I..MATHEMATICAL SC - {0x1D49D, 0x0, propertyUNASSIGNED}, // - {0x1D49E, 0x1D49F, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL C..MATHEMATICAL - {0x1D4A0, 0x1D4A1, propertyUNASSIGNED}, // .. - {0x1D4A2, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL G - {0x1D4A3, 0x1D4A4, propertyUNASSIGNED}, // .. - {0x1D4A5, 0x1D4A6, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL J..MATHEMATICAL - {0x1D4A7, 0x1D4A8, propertyUNASSIGNED}, // .. - {0x1D4A9, 0x1D4AC, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL N..MATHEMATICAL - {0x1D4AD, 0x0, propertyUNASSIGNED}, // - {0x1D4AE, 0x1D4B9, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL S..MATHEMATICAL - {0x1D4BA, 0x0, propertyUNASSIGNED}, // - {0x1D4BB, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL F - {0x1D4BC, 0x0, propertyUNASSIGNED}, // - {0x1D4BD, 0x1D4C3, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL H..MATHEMATICAL SC - {0x1D4C4, 0x0, propertyUNASSIGNED}, // - {0x1D4C5, 0x1D505, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL P..MATHEMATICAL FR - {0x1D506, 0x0, propertyUNASSIGNED}, // - {0x1D507, 0x1D50A, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL D..MATHEMATICAL - {0x1D50B, 0x1D50C, propertyUNASSIGNED}, // .. - {0x1D50D, 0x1D514, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL J..MATHEMATICAL - {0x1D515, 0x0, propertyUNASSIGNED}, // - {0x1D516, 0x1D51C, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL S..MATHEMATICAL - {0x1D51D, 0x0, propertyUNASSIGNED}, // - {0x1D51E, 0x1D539, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR SMALL A..MATHEMATICAL D - {0x1D53A, 0x0, propertyUNASSIGNED}, // - {0x1D53B, 0x1D53E, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL D..MATHEM - {0x1D53F, 0x0, propertyUNASSIGNED}, // - {0x1D540, 0x1D544, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL I..MATHEM - {0x1D545, 0x0, propertyUNASSIGNED}, // - {0x1D546, 0x0, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL O - {0x1D547, 0x1D549, propertyUNASSIGNED}, // .. - {0x1D54A, 0x1D550, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL S..MATHEM - {0x1D551, 0x0, propertyUNASSIGNED}, // - {0x1D552, 0x1D6A5, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK SMALL A..MATHEMAT - {0x1D6A6, 0x1D6A7, propertyUNASSIGNED}, // .. - {0x1D6A8, 0x1D7CB, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL ALPHA..MATHEMATICA - {0x1D7CC, 0x1D7CD, propertyUNASSIGNED}, // .. - {0x1D7CE, 0x1D7FF, propertyDISALLOWED}, // MATHEMATICAL BOLD DIGIT ZERO..MATHEMATICAL M - {0x1D800, 0x1EFFF, propertyUNASSIGNED}, // .. - {0x1F000, 0x1F02B, propertyDISALLOWED}, // MAHJONG TILE EAST WIND..MAHJONG TILE BACK - {0x1F02C, 0x1F02F, propertyUNASSIGNED}, // .. - {0x1F030, 0x1F093, propertyDISALLOWED}, // DOMINO TILE HORIZONTAL BACK..DOMINO TILE VER - {0x1F094, 0x1F0FF, propertyUNASSIGNED}, // .. - {0x1F100, 0x1F10A, propertyDISALLOWED}, // DIGIT ZERO FULL STOP..DIGIT NINE COMMA - {0x1F10B, 0x1F10F, propertyUNASSIGNED}, // .. - {0x1F110, 0x1F12E, propertyDISALLOWED}, // PARENTHESIZED LATIN CAPITAL LETTER A..CIRCLE - {0x1F12F, 0x1F130, propertyUNASSIGNED}, // .. - {0x1F131, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER B - {0x1F132, 0x1F13C, propertyUNASSIGNED}, // .. - {0x1F13D, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER N - {0x1F13E, 0x0, propertyUNASSIGNED}, // - {0x1F13F, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER P - {0x1F140, 0x1F141, propertyUNASSIGNED}, // .. - {0x1F142, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER S - {0x1F143, 0x1F145, propertyUNASSIGNED}, // .. - {0x1F146, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER W - {0x1F147, 0x1F149, propertyUNASSIGNED}, // .. - {0x1F14A, 0x1F14E, propertyDISALLOWED}, // SQUARED HV..SQUARED PPV - {0x1F14F, 0x1F156, propertyUNASSIGNED}, // .. - {0x1F157, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER H - {0x1F158, 0x1F15E, propertyUNASSIGNED}, // .. - {0x1F15F, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER P - {0x1F160, 0x1F178, propertyUNASSIGNED}, // .. - {0x1F179, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER J - {0x1F17A, 0x0, propertyUNASSIGNED}, // - {0x1F17B, 0x1F17C, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER L..NEG - {0x1F17D, 0x1F17E, propertyUNASSIGNED}, // .. - {0x1F17F, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER P - {0x1F180, 0x1F189, propertyUNASSIGNED}, // .. - {0x1F18A, 0x1F18D, propertyDISALLOWED}, // CROSSED NEGATIVE SQUARED LATIN CAPITAL LETTE - {0x1F18E, 0x1F18F, propertyUNASSIGNED}, // .. - {0x1F190, 0x0, propertyDISALLOWED}, // SQUARE DJ - {0x1F191, 0x1F1FF, propertyUNASSIGNED}, // .. - {0x1F200, 0x0, propertyDISALLOWED}, // SQUARE HIRAGANA HOKA - {0x1F201, 0x1F20F, propertyUNASSIGNED}, // .. - {0x1F210, 0x1F231, propertyDISALLOWED}, // SQUARED CJK UNIFIED IDEOGRAPH-624B..SQUARED - {0x1F232, 0x1F23F, propertyUNASSIGNED}, // .. - {0x1F240, 0x1F248, propertyDISALLOWED}, // TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRA - {0x1F249, 0x1FFFD, propertyUNASSIGNED}, // .. - {0x1FFFE, 0x1FFFF, propertyDISALLOWED}, // .. - {0x20000, 0x2A6D6, propertyPVALID}, // .... - {0x2A700, 0x2B734, propertyPVALID}, // .... - {0x2F800, 0x2FA1D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPA - {0x2FA1E, 0x2FFFD, propertyUNASSIGNED}, // .. - {0x2FFFE, 0x2FFFF, propertyDISALLOWED}, // .. - {0x30000, 0x3FFFD, propertyUNASSIGNED}, // .. - {0x3FFFE, 0x3FFFF, propertyDISALLOWED}, // .. - {0x40000, 0x4FFFD, propertyUNASSIGNED}, // .. - {0x4FFFE, 0x4FFFF, propertyDISALLOWED}, // .. - {0x50000, 0x5FFFD, propertyUNASSIGNED}, // .. - {0x5FFFE, 0x5FFFF, propertyDISALLOWED}, // .. - {0x60000, 0x6FFFD, propertyUNASSIGNED}, // .. - {0x6FFFE, 0x6FFFF, propertyDISALLOWED}, // .. - {0x70000, 0x7FFFD, propertyUNASSIGNED}, // .. - {0x7FFFE, 0x7FFFF, propertyDISALLOWED}, // .. - {0x80000, 0x8FFFD, propertyUNASSIGNED}, // .. - {0x8FFFE, 0x8FFFF, propertyDISALLOWED}, // .. - {0x90000, 0x9FFFD, propertyUNASSIGNED}, // .. - {0x9FFFE, 0x9FFFF, propertyDISALLOWED}, // .. - {0xA0000, 0xAFFFD, propertyUNASSIGNED}, // .. - {0xAFFFE, 0xAFFFF, propertyDISALLOWED}, // .. - {0xB0000, 0xBFFFD, propertyUNASSIGNED}, // .. - {0xBFFFE, 0xBFFFF, propertyDISALLOWED}, // .. - {0xC0000, 0xCFFFD, propertyUNASSIGNED}, // .. - {0xCFFFE, 0xCFFFF, propertyDISALLOWED}, // .. - {0xD0000, 0xDFFFD, propertyUNASSIGNED}, // .. - {0xDFFFE, 0xDFFFF, propertyDISALLOWED}, // .. - {0xE0000, 0x0, propertyUNASSIGNED}, // - {0xE0001, 0x0, propertyDISALLOWED}, // LANGUAGE TAG - {0xE0002, 0xE001F, propertyUNASSIGNED}, // .. - {0xE0020, 0xE007F, propertyDISALLOWED}, // TAG SPACE..CANCEL TAG - {0xE0080, 0xE00FF, propertyUNASSIGNED}, // .. - {0xE0100, 0xE01EF, propertyDISALLOWED}, // VARIATION SELECTOR-17..VARIATION SELECTOR-25 - {0xE01F0, 0xEFFFD, propertyUNASSIGNED}, // .. - {0xEFFFE, 0x10FFFF, propertyDISALLOWED}, // .. -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go b/libnetwork/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go deleted file mode 100644 index 7e5c263fc8..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go +++ /dev/null @@ -1,373 +0,0 @@ -// Package idn implements encoding from and to punycode as speficied by RFC 3492. -package idn - -import ( - "bytes" - "strings" - "unicode" - "unicode/utf8" - - "github.com/miekg/dns" -) - -// Implementation idea from RFC itself and from from IDNA::Punycode created by -// Tatsuhiko Miyagawa and released under Perl Artistic -// License in 2002. - -const ( - _MIN rune = 1 - _MAX rune = 26 - _SKEW rune = 38 - _BASE rune = 36 - _BIAS rune = 72 - _N rune = 128 - _DAMP rune = 700 - - _DELIMITER = '-' - _PREFIX = "xn--" -) - -// ToPunycode converts unicode domain names to DNS-appropriate punycode names. -// This function will return an empty string result for domain names with -// invalid unicode strings. This function expects domain names in lowercase. -func ToPunycode(s string) string { - // Early check to see if encoding is needed. - // This will prevent making heap allocations when not needed. - if !needToPunycode(s) { - return s - } - - tokens := dns.SplitDomainName(s) - switch { - case s == "": - return "" - case tokens == nil: // s == . - return "." - case s[len(s)-1] == '.': - tokens = append(tokens, "") - } - - for i := range tokens { - t := encode([]byte(tokens[i])) - if t == nil { - return "" - } - tokens[i] = string(t) - } - return strings.Join(tokens, ".") -} - -// FromPunycode returns unicode domain name from provided punycode string. -// This function expects punycode strings in lowercase. -func FromPunycode(s string) string { - // Early check to see if decoding is needed. - // This will prevent making heap allocations when not needed. - if !needFromPunycode(s) { - return s - } - - tokens := dns.SplitDomainName(s) - switch { - case s == "": - return "" - case tokens == nil: // s == . - return "." - case s[len(s)-1] == '.': - tokens = append(tokens, "") - } - for i := range tokens { - tokens[i] = string(decode([]byte(tokens[i]))) - } - return strings.Join(tokens, ".") -} - -// digitval converts single byte into meaningful value that's used to calculate decoded unicode character. -const errdigit = 0xffff - -func digitval(code rune) rune { - switch { - case code >= 'A' && code <= 'Z': - return code - 'A' - case code >= 'a' && code <= 'z': - return code - 'a' - case code >= '0' && code <= '9': - return code - '0' + 26 - } - return errdigit -} - -// lettercode finds BASE36 byte (a-z0-9) based on calculated number. -func lettercode(digit rune) rune { - switch { - case digit >= 0 && digit <= 25: - return digit + 'a' - case digit >= 26 && digit <= 36: - return digit - 26 + '0' - } - panic("dns: not reached") -} - -// adapt calculates next bias to be used for next iteration delta. -func adapt(delta rune, numpoints int, firsttime bool) rune { - if firsttime { - delta /= _DAMP - } else { - delta /= 2 - } - - var k rune - for delta = delta + delta/rune(numpoints); delta > (_BASE-_MIN)*_MAX/2; k += _BASE { - delta /= _BASE - _MIN - } - - return k + ((_BASE-_MIN+1)*delta)/(delta+_SKEW) -} - -// next finds minimal rune (one with lowest codepoint value) that should be equal or above boundary. -func next(b []rune, boundary rune) rune { - if len(b) == 0 { - panic("dns: invalid set of runes to determine next one") - } - m := b[0] - for _, x := range b[1:] { - if x >= boundary && (m < boundary || x < m) { - m = x - } - } - return m -} - -// preprune converts unicode rune to lower case. At this time it's not -// supporting all things described in RFCs. -func preprune(r rune) rune { - if unicode.IsUpper(r) { - r = unicode.ToLower(r) - } - return r -} - -// tfunc is a function that helps calculate each character weight. -func tfunc(k, bias rune) rune { - switch { - case k <= bias: - return _MIN - case k >= bias+_MAX: - return _MAX - } - return k - bias -} - -// needToPunycode returns true for strings that require punycode encoding -// (contain unicode characters). -func needToPunycode(s string) bool { - // This function is very similar to bytes.Runes. We don't use bytes.Runes - // because it makes a heap allocation that's not needed here. - for i := 0; len(s) > 0; i++ { - r, l := utf8.DecodeRuneInString(s) - if r > 0x7f { - return true - } - s = s[l:] - } - return false -} - -// needFromPunycode returns true for strings that require punycode decoding. -func needFromPunycode(s string) bool { - if s == "." { - return false - } - - off := 0 - end := false - pl := len(_PREFIX) - sl := len(s) - - // If s starts with _PREFIX. - if sl > pl && s[off:off+pl] == _PREFIX { - return true - } - - for { - // Find the part after the next ".". - off, end = dns.NextLabel(s, off) - if end { - return false - } - // If this parts starts with _PREFIX. - if sl-off > pl && s[off:off+pl] == _PREFIX { - return true - } - } -} - -// encode transforms Unicode input bytes (that represent DNS label) into -// punycode bytestream. This function would return nil if there's an invalid -// character in the label. -func encode(input []byte) []byte { - n, bias := _N, _BIAS - - b := bytes.Runes(input) - for i := range b { - if !isValidRune(b[i]) { - return nil - } - - b[i] = preprune(b[i]) - } - - basic := make([]byte, 0, len(b)) - for _, ltr := range b { - if ltr <= 0x7f { - basic = append(basic, byte(ltr)) - } - } - basiclen := len(basic) - fulllen := len(b) - if basiclen == fulllen { - return basic - } - - var out bytes.Buffer - - out.WriteString(_PREFIX) - if basiclen > 0 { - out.Write(basic) - out.WriteByte(_DELIMITER) - } - - var ( - ltr, nextltr rune - delta, q rune // delta calculation (see rfc) - t, k, cp rune // weight and codepoint calculation - ) - - s := &bytes.Buffer{} - for h := basiclen; h < fulllen; n, delta = n+1, delta+1 { - nextltr = next(b, n) - s.Truncate(0) - s.WriteRune(nextltr) - delta, n = delta+(nextltr-n)*rune(h+1), nextltr - - for _, ltr = range b { - if ltr < n { - delta++ - } - if ltr == n { - q = delta - for k = _BASE; ; k += _BASE { - t = tfunc(k, bias) - if q < t { - break - } - cp = t + ((q - t) % (_BASE - t)) - out.WriteRune(lettercode(cp)) - q = (q - t) / (_BASE - t) - } - - out.WriteRune(lettercode(q)) - - bias = adapt(delta, h+1, h == basiclen) - h, delta = h+1, 0 - } - } - } - return out.Bytes() -} - -// decode transforms punycode input bytes (that represent DNS label) into Unicode bytestream. -func decode(b []byte) []byte { - src := b // b would move and we need to keep it - - n, bias := _N, _BIAS - if !bytes.HasPrefix(b, []byte(_PREFIX)) { - return b - } - out := make([]rune, 0, len(b)) - b = b[len(_PREFIX):] - for pos := len(b) - 1; pos >= 0; pos-- { - // only last delimiter is our interest - if b[pos] == _DELIMITER { - out = append(out, bytes.Runes(b[:pos])...) - b = b[pos+1:] // trim source string - break - } - } - if len(b) == 0 { - return src - } - var ( - i, oldi, w rune - ch byte - t, digit rune - ln int - ) - - for i = 0; len(b) > 0; i++ { - oldi, w = i, 1 - for k := _BASE; len(b) > 0; k += _BASE { - ch, b = b[0], b[1:] - digit = digitval(rune(ch)) - if digit == errdigit { - return src - } - i += digit * w - if i < 0 { - // safety check for rune overflow - return src - } - - t = tfunc(k, bias) - if digit < t { - break - } - - w *= _BASE - t - } - ln = len(out) + 1 - bias = adapt(i-oldi, ln, oldi == 0) - n += i / rune(ln) - i = i % rune(ln) - // insert - out = append(out, 0) - copy(out[i+1:], out[i:]) - out[i] = n - } - - var ret bytes.Buffer - for _, r := range out { - ret.WriteRune(r) - } - return ret.Bytes() -} - -// isValidRune checks if the character is valid. We will look for the -// character property in the code points list. For now we aren't checking special -// rules in case of contextual property -func isValidRune(r rune) bool { - return findProperty(r) == propertyPVALID -} - -// findProperty will try to check the code point property of the given -// character. It will use a binary search algorithm as we have a slice of -// ordered ranges (average case performance O(log n)) -func findProperty(r rune) property { - imin, imax := 0, len(codePoints) - - for imax >= imin { - imid := (imin + imax) / 2 - - codePoint := codePoints[imid] - if (codePoint.start == r && codePoint.end == 0) || (codePoint.start <= r && codePoint.end >= r) { - return codePoint.state - } - - if (codePoint.end > 0 && codePoint.end < r) || (codePoint.end == 0 && codePoint.start < r) { - imin = imid + 1 - } else { - imax = imid - 1 - } - } - - return propertyUnknown -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/LICENSE b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/LICENSE new file mode 100644 index 0000000000..bc00498c52 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Samuel Stauffer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name of the author nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go deleted file mode 100644 index b8571734ec..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package zk - -import ( - "fmt" - "strings" - "testing" - "time" -) - -type logWriter struct { - t *testing.T - p string -} - -func (lw logWriter) Write(b []byte) (int, error) { - lw.t.Logf("%s%s", lw.p, string(b)) - return len(b), nil -} - -func TestBasicCluster(t *testing.T) { - ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk1, err := ts.Connect(0) - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk1.Close() - zk2, err := ts.Connect(1) - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk2.Close() - - time.Sleep(time.Second * 5) - - if _, err := zk1.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create failed on node 1: %+v", err) - } - if by, _, err := zk2.Get("/gozk-test"); err != nil { - t.Fatalf("Get failed on node 2: %+v", err) - } else if string(by) != "foo-cluster" { - t.Fatal("Wrong data for node 2") - } -} - -func TestClientClusterFailover(t *testing.T) { - ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, evCh, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - hasSession := make(chan string, 1) - go func() { - for ev := range evCh { - if ev.Type == EventSession && ev.State == StateHasSession { - select { - case hasSession <- ev.Server: - default: - } - } - } - }() - - waitSession := func() string { - select { - case srv := <-hasSession: - return srv - case <-time.After(time.Second * 8): - t.Fatal("Failed to connect and get a session") - } - return "" - } - - srv := waitSession() - if _, err := zk.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create failed on node 1: %+v", err) - } - - stopped := false - for _, s := range ts.Servers { - if strings.HasSuffix(srv, fmt.Sprintf(":%d", s.Port)) { - s.Srv.Stop() - stopped = true - break - } - } - if !stopped { - t.Fatal("Failed to stop server") - } - - waitSession() - if by, _, err := zk.Get("/gozk-test"); err != nil { - t.Fatalf("Get failed on node 2: %+v", err) - } else if string(by) != "foo-cluster" { - t.Fatal("Wrong data for node 2") - } -} - -func TestWaitForClose(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, err := ts.Connect(0) - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - timeout := time.After(30 * time.Second) -CONNECTED: - for { - select { - case ev := <-zk.eventChan: - if ev.State == StateConnected { - break CONNECTED - } - case <-timeout: - zk.Close() - t.Fatal("Timeout") - } - } - zk.Close() - for { - select { - case _, ok := <-zk.eventChan: - if !ok { - return - } - case <-timeout: - t.Fatal("Timeout") - } - } -} - -func TestBadSession(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - zk.conn.Close() - time.Sleep(time.Millisecond * 100) - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go deleted file mode 100644 index 9fe6b04ceb..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package zk - -import ( - "fmt" - "testing" -) - -func TestModeString(t *testing.T) { - if fmt.Sprintf("%v", ModeUnknown) != "unknown" { - t.Errorf("unknown value should be 'unknown'") - } - - if fmt.Sprintf("%v", ModeLeader) != "leader" { - t.Errorf("leader value should be 'leader'") - } - - if fmt.Sprintf("%v", ModeFollower) != "follower" { - t.Errorf("follower value should be 'follower'") - } - - if fmt.Sprintf("%v", ModeStandalone) != "standalone" { - t.Errorf("standlone value should be 'standalone'") - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go deleted file mode 100644 index 63907268d9..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go +++ /dev/null @@ -1,367 +0,0 @@ -package zk - -import ( - "net" - "testing" - "time" -) - -var ( - zkSrvrOut = `Zookeeper version: 3.4.6-1569965, built on 02/20/2014 09:09 GMT -Latency min/avg/max: 0/1/10 -Received: 4207 -Sent: 4220 -Connections: 81 -Outstanding: 1 -Zxid: 0x110a7a8f37 -Mode: leader -Node count: 306 -` - zkConsOut = ` /10.42.45.231:45361[1](queued=0,recved=9435,sent=9457,sid=0x94c2989e04716b5,lop=PING,est=1427238717217,to=20001,lcxid=0x55120915,lzxid=0xffffffffffffffff,lresp=1427259255908,llat=0,minlat=0,avglat=1,maxlat=17) - /10.55.33.98:34342[1](queued=0,recved=9338,sent=9350,sid=0x94c2989e0471731,lop=PING,est=1427238849319,to=20001,lcxid=0x55120944,lzxid=0xffffffffffffffff,lresp=1427259252294,llat=0,minlat=0,avglat=1,maxlat=18) - /10.44.145.114:46556[1](queued=0,recved=109253,sent=109617,sid=0x94c2989e0471709,lop=DELE,est=1427238791305,to=20001,lcxid=0x55139618,lzxid=0x110a7b187d,lresp=1427259257423,llat=2,minlat=0,avglat=1,maxlat=23) - -` -) - -func TestFLWRuok(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:2181") - - if err != nil { - t.Fatalf(err.Error()) - } - - go tcpServer(l, "") - - var oks []bool - var ok bool - - oks = FLWRuok([]string{"127.0.0.1"}, time.Second*10) - - // close the connection, and pause shortly - // to cheat around a race condition - l.Close() - time.Sleep(time.Millisecond * 1) - - if len(oks) == 0 { - t.Errorf("no values returned") - } - - ok = oks[0] - - if !ok { - t.Errorf("instance should be marked as OK") - } - - // - // Confirm that it also returns false for dead instances - // - l, err = net.Listen("tcp", "127.0.0.1:2181") - - if err != nil { - t.Fatalf(err.Error()) - } - - defer l.Close() - - go tcpServer(l, "dead") - - oks = FLWRuok([]string{"127.0.0.1"}, time.Second*10) - - if len(oks) == 0 { - t.Errorf("no values returned") - } - - ok = oks[0] - - if ok { - t.Errorf("instance should be marked as not OK") - } -} - -func TestFLWSrvr(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:2181") - - if err != nil { - t.Fatalf(err.Error()) - } - - defer l.Close() - - go tcpServer(l, "") - - var statsSlice []*ServerStats - var stats *ServerStats - var ok bool - - statsSlice, ok = FLWSrvr([]string{"127.0.0.1:2181"}, time.Second*10) - - if !ok { - t.Errorf("failure indicated on 'srvr' parsing") - } - - if len(statsSlice) == 0 { - t.Errorf("no *ServerStats instances returned") - } - - stats = statsSlice[0] - - if stats.Error != nil { - t.Fatalf("error seen in stats: %v", err.Error()) - } - - if stats.Sent != 4220 { - t.Errorf("Sent != 4220") - } - - if stats.Received != 4207 { - t.Errorf("Received != 4207") - } - - if stats.NodeCount != 306 { - t.Errorf("NodeCount != 306") - } - - if stats.MinLatency != 0 { - t.Errorf("MinLatency != 0") - } - - if stats.AvgLatency != 1 { - t.Errorf("AvgLatency != 1") - } - - if stats.MaxLatency != 10 { - t.Errorf("MaxLatency != 10") - } - - if stats.Connections != 81 { - t.Errorf("Connection != 81") - } - - if stats.Outstanding != 1 { - t.Errorf("Outstanding != 1") - } - - if stats.Epoch != 17 { - t.Errorf("Epoch != 17") - } - - if stats.Counter != 175804215 { - t.Errorf("Counter != 175804215") - } - - if stats.Mode != ModeLeader { - t.Errorf("Mode != ModeLeader") - } - - if stats.Version != "3.4.6-1569965" { - t.Errorf("Version expected: 3.4.6-1569965") - } - - buildTime, err := time.Parse("01/02/2006 15:04 MST", "02/20/2014 09:09 GMT") - - if !stats.BuildTime.Equal(buildTime) { - - } -} - -func TestFLWCons(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:2181") - - if err != nil { - t.Fatalf(err.Error()) - } - - defer l.Close() - - go tcpServer(l, "") - - var clients []*ServerClients - var ok bool - - clients, ok = FLWCons([]string{"127.0.0.1"}, time.Second*10) - - if !ok { - t.Errorf("failure indicated on 'cons' parsing") - } - - if len(clients) == 0 { - t.Errorf("no *ServerClients instances returned") - } - - results := []*ServerClient{ - &ServerClient{ - Queued: 0, - Received: 9435, - Sent: 9457, - SessionID: 669956116721374901, - LastOperation: "PING", - Established: time.Unix(1427238717217, 0), - Timeout: 20001, - Lcxid: 1427245333, - Lzxid: -1, - LastResponse: time.Unix(1427259255908, 0), - LastLatency: 0, - MinLatency: 0, - AvgLatency: 1, - MaxLatency: 17, - Addr: "10.42.45.231:45361", - }, - &ServerClient{ - Queued: 0, - Received: 9338, - Sent: 9350, - SessionID: 669956116721375025, - LastOperation: "PING", - Established: time.Unix(1427238849319, 0), - Timeout: 20001, - Lcxid: 1427245380, - Lzxid: -1, - LastResponse: time.Unix(1427259252294, 0), - LastLatency: 0, - MinLatency: 0, - AvgLatency: 1, - MaxLatency: 18, - Addr: "10.55.33.98:34342", - }, - &ServerClient{ - Queued: 0, - Received: 109253, - Sent: 109617, - SessionID: 669956116721374985, - LastOperation: "DELE", - Established: time.Unix(1427238791305, 0), - Timeout: 20001, - Lcxid: 1427346968, - Lzxid: 73190283389, - LastResponse: time.Unix(1427259257423, 0), - LastLatency: 2, - MinLatency: 0, - AvgLatency: 1, - MaxLatency: 23, - Addr: "10.44.145.114:46556", - }, - } - - for _, z := range clients { - if z.Error != nil { - t.Errorf("error seen: %v", err.Error()) - } - - for i, v := range z.Clients { - c := results[i] - - if v.Error != nil { - t.Errorf("client error seen: %v", err.Error()) - } - - if v.Queued != c.Queued { - t.Errorf("Queued value mismatch (%d/%d)", v.Queued, c.Queued) - } - - if v.Received != c.Received { - t.Errorf("Received value mismatch (%d/%d)", v.Received, c.Received) - } - - if v.Sent != c.Sent { - t.Errorf("Sent value mismatch (%d/%d)", v.Sent, c.Sent) - } - - if v.SessionID != c.SessionID { - t.Errorf("SessionID value mismatch (%d/%d)", v.SessionID, c.SessionID) - } - - if v.LastOperation != c.LastOperation { - t.Errorf("LastOperation value mismatch ('%v'/'%v')", v.LastOperation, c.LastOperation) - } - - if v.Timeout != c.Timeout { - t.Errorf("Timeout value mismatch (%d/%d)", v.Timeout, c.Timeout) - } - - if v.Lcxid != c.Lcxid { - t.Errorf("Lcxid value mismatch (%d/%d)", v.Lcxid, c.Lcxid) - } - - if v.Lzxid != c.Lzxid { - t.Errorf("Lzxid value mismatch (%d/%d)", v.Lzxid, c.Lzxid) - } - - if v.LastLatency != c.LastLatency { - t.Errorf("LastLatency value mismatch (%d/%d)", v.LastLatency, c.LastLatency) - } - - if v.MinLatency != c.MinLatency { - t.Errorf("MinLatency value mismatch (%d/%d)", v.MinLatency, c.MinLatency) - } - - if v.AvgLatency != c.AvgLatency { - t.Errorf("AvgLatency value mismatch (%d/%d)", v.AvgLatency, c.AvgLatency) - } - - if v.MaxLatency != c.MaxLatency { - t.Errorf("MaxLatency value mismatch (%d/%d)", v.MaxLatency, c.MaxLatency) - } - - if v.Addr != c.Addr { - t.Errorf("Addr value mismatch ('%v'/'%v')", v.Addr, c.Addr) - } - - if !c.Established.Equal(v.Established) { - t.Errorf("Established value mismatch (%v/%v)", c.Established, v.Established) - } - - if !c.LastResponse.Equal(v.LastResponse) { - t.Errorf("Established value mismatch (%v/%v)", c.LastResponse, v.LastResponse) - } - } - } -} - -func tcpServer(listener net.Listener, thing string) { - for { - conn, err := listener.Accept() - if err != nil { - return - } - go connHandler(conn, thing) - } -} - -func connHandler(conn net.Conn, thing string) { - defer conn.Close() - - data := make([]byte, 4) - - _, err := conn.Read(data) - - if err != nil { - return - } - - switch string(data) { - case "ruok": - switch thing { - case "dead": - return - default: - conn.Write([]byte("imok")) - } - case "srvr": - switch thing { - case "dead": - return - default: - conn.Write([]byte(zkSrvrOut)) - } - case "cons": - switch thing { - case "dead": - return - default: - conn.Write([]byte(zkConsOut)) - } - default: - conn.Write([]byte("This ZooKeeper instance is not currently serving requests.")) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go deleted file mode 100644 index 8a3478a336..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package zk - -import ( - "testing" - "time" -) - -func TestLock(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - acls := WorldACL(PermAll) - - l := NewLock(zk, "/test", acls) - if err := l.Lock(); err != nil { - t.Fatal(err) - } - if err := l.Unlock(); err != nil { - t.Fatal(err) - } - - val := make(chan int, 3) - - if err := l.Lock(); err != nil { - t.Fatal(err) - } - - l2 := NewLock(zk, "/test", acls) - go func() { - if err := l2.Lock(); err != nil { - t.Fatal(err) - } - val <- 2 - if err := l2.Unlock(); err != nil { - t.Fatal(err) - } - val <- 3 - }() - time.Sleep(time.Millisecond * 100) - - val <- 1 - if err := l.Unlock(); err != nil { - t.Fatal(err) - } - if x := <-val; x != 1 { - t.Fatalf("Expected 1 instead of %d", x) - } - if x := <-val; x != 2 { - t.Fatalf("Expected 2 instead of %d", x) - } - if x := <-val; x != 3 { - t.Fatalf("Expected 3 instead of %d", x) - } -} - -// This tests creating a lock with a path that's more than 1 node deep (e.g. "/test-multi-level/lock"), -// when a part of that path already exists (i.e. "/test-multi-level" node already exists). -func TestMultiLevelLock(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - acls := WorldACL(PermAll) - path := "/test-multi-level" - if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if p != path { - t.Fatalf("Create returned different path '%s' != '%s'", p, path) - } - l := NewLock(zk, "/test-multi-level/lock", acls) - defer zk.Delete("/test-multi-level", -1) // Clean up what we've created for this test - defer zk.Delete("/test-multi-level/lock", -1) - if err := l.Lock(); err != nil { - t.Fatal(err) - } - if err := l.Unlock(); err != nil { - t.Fatal(err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go deleted file mode 100644 index 64f18e8d3e..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package zk - -import ( - "reflect" - "testing" -) - -func TestEncodeDecodePacket(t *testing.T) { - encodeDecodeTest(t, &requestHeader{-2, 5}) - encodeDecodeTest(t, &connectResponse{1, 2, 3, nil}) - encodeDecodeTest(t, &connectResponse{1, 2, 3, []byte{4, 5, 6}}) - encodeDecodeTest(t, &getAclResponse{[]ACL{{12, "s", "anyone"}}, Stat{}}) - encodeDecodeTest(t, &getChildrenResponse{[]string{"foo", "bar"}}) - encodeDecodeTest(t, &pathWatchRequest{"path", true}) - encodeDecodeTest(t, &pathWatchRequest{"path", false}) - encodeDecodeTest(t, &CheckVersionRequest{"/", -1}) - encodeDecodeTest(t, &multiRequest{Ops: []multiRequestOp{{multiHeader{opCheck, false, -1}, &CheckVersionRequest{"/", -1}}}}) -} - -func encodeDecodeTest(t *testing.T, r interface{}) { - buf := make([]byte, 1024) - n, err := encodePacket(buf, r) - if err != nil { - t.Errorf("encodePacket returned non-nil error %+v\n", err) - return - } - t.Logf("%+v %x", r, buf[:n]) - r2 := reflect.New(reflect.ValueOf(r).Elem().Type()).Interface() - n2, err := decodePacket(buf[:n], r2) - if err != nil { - t.Errorf("decodePacket returned non-nil error %+v\n", err) - return - } - if n != n2 { - t.Errorf("sizes don't match: %d != %d", n, n2) - return - } - if !reflect.DeepEqual(r, r2) { - t.Errorf("results don't match: %+v != %+v", r, r2) - return - } -} - -func TestEncodeShortBuffer(t *testing.T) { - buf := make([]byte, 0) - _, err := encodePacket(buf, &requestHeader{1, 2}) - if err != ErrShortBuffer { - t.Errorf("encodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err) - return - } -} - -func TestDecodeShortBuffer(t *testing.T) { - buf := make([]byte, 0) - _, err := decodePacket(buf, &responseHeader{}) - if err != ErrShortBuffer { - t.Errorf("decodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err) - return - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go deleted file mode 100644 index b56f77550d..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package zk - -import "testing" - -func TestFormatServers(t *testing.T) { - servers := []string{"127.0.0.1:2181", "127.0.0.42", "127.0.42.1:8811"} - r := []string{"127.0.0.1:2181", "127.0.0.42:2181", "127.0.42.1:8811"} - - var s []string - s = FormatServers(servers) - - for i := range s { - if s[i] != r[i] { - t.Errorf("%v should equal %v", s[i], r[i]) - } - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go b/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go deleted file mode 100644 index fdbe517273..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go +++ /dev/null @@ -1,518 +0,0 @@ -package zk - -import ( - "fmt" - "io" - "net" - "strings" - "testing" - "time" - - "camlistore.org/pkg/throttle" -) - -func TestCreate(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - path := "/gozk-test" - - if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if p != path { - t.Fatalf("Create returned different path '%s' != '%s'", p, path) - } - if data, stat, err := zk.Get(path); err != nil { - t.Fatalf("Get returned error: %+v", err) - } else if stat == nil { - t.Fatal("Get returned nil stat") - } else if len(data) < 4 { - t.Fatal("Get returned wrong size data") - } -} - -func TestMulti(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - path := "/gozk-test" - - if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - ops := []interface{}{ - &CreateRequest{Path: path, Data: []byte{1, 2, 3, 4}, Acl: WorldACL(PermAll)}, - &SetDataRequest{Path: path, Data: []byte{1, 2, 3, 4}, Version: -1}, - } - if res, err := zk.Multi(ops...); err != nil { - t.Fatalf("Multi returned error: %+v", err) - } else if len(res) != 2 { - t.Fatalf("Expected 2 responses got %d", len(res)) - } else { - t.Logf("%+v", res) - } - if data, stat, err := zk.Get(path); err != nil { - t.Fatalf("Get returned error: %+v", err) - } else if stat == nil { - t.Fatal("Get returned nil stat") - } else if len(data) < 4 { - t.Fatal("Get returned wrong size data") - } -} - -func TestGetSetACL(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - if err := zk.AddAuth("digest", []byte("blah")); err != nil { - t.Fatalf("AddAuth returned error %+v", err) - } - - path := "/gozk-test" - - if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - if path, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if path != "/gozk-test" { - t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) - } - - expected := WorldACL(PermAll) - - if acl, stat, err := zk.GetACL(path); err != nil { - t.Fatalf("GetACL returned error %+v", err) - } else if stat == nil { - t.Fatalf("GetACL returned nil Stat") - } else if len(acl) != 1 || expected[0] != acl[0] { - t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl) - } - - expected = []ACL{{PermAll, "ip", "127.0.0.1"}} - - if stat, err := zk.SetACL(path, expected, -1); err != nil { - t.Fatalf("SetACL returned error %+v", err) - } else if stat == nil { - t.Fatalf("SetACL returned nil Stat") - } - - if acl, stat, err := zk.GetACL(path); err != nil { - t.Fatalf("GetACL returned error %+v", err) - } else if stat == nil { - t.Fatalf("GetACL returned nil Stat") - } else if len(acl) != 1 || expected[0] != acl[0] { - t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl) - } -} - -func TestAuth(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - path := "/gozk-digest-test" - if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - acl := DigestACL(PermAll, "user", "password") - - if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, acl); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if p != path { - t.Fatalf("Create returned different path '%s' != '%s'", p, path) - } - - if a, stat, err := zk.GetACL(path); err != nil { - t.Fatalf("GetACL returned error %+v", err) - } else if stat == nil { - t.Fatalf("GetACL returned nil Stat") - } else if len(a) != 1 || acl[0] != a[0] { - t.Fatalf("GetACL mismatch expected %+v instead of %+v", acl, a) - } - - if _, _, err := zk.Get(path); err != ErrNoAuth { - t.Fatalf("Get returned error %+v instead of ErrNoAuth", err) - } - - if err := zk.AddAuth("digest", []byte("user:password")); err != nil { - t.Fatalf("AddAuth returned error %+v", err) - } - - if data, stat, err := zk.Get(path); err != nil { - t.Fatalf("Get returned error %+v", err) - } else if stat == nil { - t.Fatalf("Get returned nil Stat") - } else if len(data) != 4 { - t.Fatalf("Get returned wrong data length") - } -} - -func TestChildWatch(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - children, stat, childCh, err := zk.ChildrenW("/") - if err != nil { - t.Fatalf("Children returned error: %+v", err) - } else if stat == nil { - t.Fatal("Children returned nil stat") - } else if len(children) < 1 { - t.Fatal("Children should return at least 1 child") - } - - if path, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if path != "/gozk-test" { - t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) - } - - select { - case ev := <-childCh: - if ev.Err != nil { - t.Fatalf("Child watcher error %+v", ev.Err) - } - if ev.Path != "/" { - t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") - } - case _ = <-time.After(time.Second * 2): - t.Fatal("Child watcher timed out") - } - - // Delete of the watched node should trigger the watch - - children, stat, childCh, err = zk.ChildrenW("/gozk-test") - if err != nil { - t.Fatalf("Children returned error: %+v", err) - } else if stat == nil { - t.Fatal("Children returned nil stat") - } else if len(children) != 0 { - t.Fatal("Children should return 0 children") - } - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - select { - case ev := <-childCh: - if ev.Err != nil { - t.Fatalf("Child watcher error %+v", ev.Err) - } - if ev.Path != "/gozk-test" { - t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") - } - case _ = <-time.After(time.Second * 2): - t.Fatal("Child watcher timed out") - } -} - -func TestSetWatchers(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - zk.reconnectDelay = time.Second - - zk2, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk2.Close() - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - testPath, err := zk.Create("/gozk-test-2", []byte{}, 0, WorldACL(PermAll)) - if err != nil { - t.Fatalf("Create returned: %+v", err) - } - - _, _, testEvCh, err := zk.GetW(testPath) - if err != nil { - t.Fatalf("GetW returned: %+v", err) - } - - children, stat, childCh, err := zk.ChildrenW("/") - if err != nil { - t.Fatalf("Children returned error: %+v", err) - } else if stat == nil { - t.Fatal("Children returned nil stat") - } else if len(children) < 1 { - t.Fatal("Children should return at least 1 child") - } - - zk.conn.Close() - if err := zk2.Delete(testPath, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - time.Sleep(time.Millisecond * 100) - - if path, err := zk2.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if path != "/gozk-test" { - t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) - } - - select { - case ev := <-testEvCh: - if ev.Err != nil { - t.Fatalf("GetW watcher error %+v", ev.Err) - } - if ev.Path != testPath { - t.Fatalf("GetW watcher wrong path %s instead of %s", ev.Path, testPath) - } - case <-time.After(2 * time.Second): - t.Fatal("GetW watcher timed out") - } - - select { - case ev := <-childCh: - if ev.Err != nil { - t.Fatalf("Child watcher error %+v", ev.Err) - } - if ev.Path != "/" { - t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") - } - case <-time.After(2 * time.Second): - t.Fatal("Child watcher timed out") - } -} - -func TestExpiringWatch(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - children, stat, childCh, err := zk.ChildrenW("/") - if err != nil { - t.Fatalf("Children returned error: %+v", err) - } else if stat == nil { - t.Fatal("Children returned nil stat") - } else if len(children) < 1 { - t.Fatal("Children should return at least 1 child") - } - - zk.sessionID = 99999 - zk.conn.Close() - - select { - case ev := <-childCh: - if ev.Err != ErrSessionExpired { - t.Fatalf("Child watcher error %+v instead of expected ErrSessionExpired", ev.Err) - } - if ev.Path != "/" { - t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") - } - case <-time.After(2 * time.Second): - t.Fatal("Child watcher timed out") - } -} - -func TestRequestFail(t *testing.T) { - // If connecting fails to all servers in the list then pending requests - // should be errored out so they don't hang forever. - - zk, _, err := Connect([]string{"127.0.0.1:32444"}, time.Second*15) - if err != nil { - t.Fatal(err) - } - defer zk.Close() - - ch := make(chan error) - go func() { - _, _, err := zk.Get("/blah") - ch <- err - }() - select { - case err := <-ch: - if err == nil { - t.Fatal("Expected non-nil error on failed request due to connection failure") - } - case <-time.After(time.Second * 2): - t.Fatal("Get hung when connection could not be made") - } -} - -func TestSlowServer(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - - realAddr := fmt.Sprintf("127.0.0.1:%d", ts.Servers[0].Port) - proxyAddr, stopCh, err := startSlowProxy(t, - throttle.Rate{}, throttle.Rate{}, - realAddr, func(ln *throttle.Listener) { - if ln.Up.Latency == 0 { - ln.Up.Latency = time.Millisecond * 2000 - ln.Down.Latency = time.Millisecond * 2000 - } else { - ln.Up.Latency = 0 - ln.Down.Latency = 0 - } - }) - if err != nil { - t.Fatal(err) - } - defer close(stopCh) - - zk, _, err := Connect([]string{proxyAddr}, time.Millisecond*500) - if err != nil { - t.Fatal(err) - } - defer zk.Close() - - _, _, wch, err := zk.ChildrenW("/") - if err != nil { - t.Fatal(err) - } - - // Force a reconnect to get a throttled connection - zk.conn.Close() - - time.Sleep(time.Millisecond * 100) - - if err := zk.Delete("/gozk-test", -1); err == nil { - t.Fatal("Delete should have failed") - } - - // The previous request should have timed out causing the server to be disconnected and reconnected - - if _, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatal(err) - } - - // Make sure event is still returned because the session should not have been affected - select { - case ev := <-wch: - t.Logf("Received event: %+v", ev) - case <-time.After(time.Second): - t.Fatal("Expected to receive a watch event") - } -} - -func startSlowProxy(t *testing.T, up, down throttle.Rate, upstream string, adj func(ln *throttle.Listener)) (string, chan bool, error) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return "", nil, err - } - tln := &throttle.Listener{ - Listener: ln, - Up: up, - Down: down, - } - stopCh := make(chan bool) - go func() { - <-stopCh - tln.Close() - }() - go func() { - for { - cn, err := tln.Accept() - if err != nil { - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("Accept failed: %s", err.Error()) - } - return - } - if adj != nil { - adj(tln) - } - go func(cn net.Conn) { - defer cn.Close() - upcn, err := net.Dial("tcp", upstream) - if err != nil { - t.Log(err) - return - } - // This will leave hanging goroutines util stopCh is closed - // but it doesn't matter in the context of running tests. - go func() { - <-stopCh - upcn.Close() - }() - go func() { - if _, err := io.Copy(upcn, cn); err != nil { - if !strings.Contains(err.Error(), "use of closed network connection") { - // log.Printf("Upstream write failed: %s", err.Error()) - } - } - }() - if _, err := io.Copy(cn, upcn); err != nil { - if !strings.Contains(err.Error(), "use of closed network connection") { - // log.Printf("Upstream read failed: %s", err.Error()) - } - } - }(cn) - } - }() - return ln.Addr().String(), stopCh, nil -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go b/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go deleted file mode 100644 index d859c77b91..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go +++ /dev/null @@ -1,791 +0,0 @@ -package assert - -import ( - "errors" - "math" - "regexp" - "testing" - "time" -) - -// AssertionTesterInterface defines an interface to be used for testing assertion methods -type AssertionTesterInterface interface { - TestMethod() -} - -// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface -type AssertionTesterConformingObject struct { -} - -func (a *AssertionTesterConformingObject) TestMethod() { -} - -// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface -type AssertionTesterNonConformingObject struct { -} - -func TestObjectsAreEqual(t *testing.T) { - - if !ObjectsAreEqual("Hello World", "Hello World") { - t.Error("objectsAreEqual should return true") - } - if !ObjectsAreEqual(123, 123) { - t.Error("objectsAreEqual should return true") - } - if !ObjectsAreEqual(123.5, 123.5) { - t.Error("objectsAreEqual should return true") - } - if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { - t.Error("objectsAreEqual should return true") - } - if !ObjectsAreEqual(nil, nil) { - t.Error("objectsAreEqual should return true") - } - if ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual('x', "x") { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual("x", 'x') { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual(0, 0.1) { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual(0.1, 0) { - t.Error("objectsAreEqual should return false") - } - if ObjectsAreEqual(uint32(10), int32(10)) { - t.Error("objectsAreEqual should return false") - } - if !ObjectsAreEqualValues(uint32(10), int32(10)) { - t.Error("ObjectsAreEqualValues should return true") - } - -} - -func TestImplements(t *testing.T) { - - mockT := new(testing.T) - - if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { - t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") - } - if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { - t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") - } - -} - -func TestIsType(t *testing.T) { - - mockT := new(testing.T) - - if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { - t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") - } - if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { - t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") - } - -} - -func TestEqual(t *testing.T) { - - mockT := new(testing.T) - - if !Equal(mockT, "Hello World", "Hello World") { - t.Error("Equal should return true") - } - if !Equal(mockT, 123, 123) { - t.Error("Equal should return true") - } - if !Equal(mockT, 123.5, 123.5) { - t.Error("Equal should return true") - } - if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { - t.Error("Equal should return true") - } - if !Equal(mockT, nil, nil) { - t.Error("Equal should return true") - } - if !Equal(mockT, int32(123), int32(123)) { - t.Error("Equal should return true") - } - if !Equal(mockT, uint64(123), uint64(123)) { - t.Error("Equal should return true") - } - -} - -func TestNotNil(t *testing.T) { - - mockT := new(testing.T) - - if !NotNil(mockT, new(AssertionTesterConformingObject)) { - t.Error("NotNil should return true: object is not nil") - } - if NotNil(mockT, nil) { - t.Error("NotNil should return false: object is nil") - } - -} - -func TestNil(t *testing.T) { - - mockT := new(testing.T) - - if !Nil(mockT, nil) { - t.Error("Nil should return true: object is nil") - } - if Nil(mockT, new(AssertionTesterConformingObject)) { - t.Error("Nil should return false: object is not nil") - } - -} - -func TestTrue(t *testing.T) { - - mockT := new(testing.T) - - if !True(mockT, true) { - t.Error("True should return true") - } - if True(mockT, false) { - t.Error("True should return false") - } - -} - -func TestFalse(t *testing.T) { - - mockT := new(testing.T) - - if !False(mockT, false) { - t.Error("False should return true") - } - if False(mockT, true) { - t.Error("False should return false") - } - -} - -func TestExactly(t *testing.T) { - - mockT := new(testing.T) - - a := float32(1) - b := float64(1) - c := float32(1) - d := float32(2) - - if Exactly(mockT, a, b) { - t.Error("Exactly should return false") - } - if Exactly(mockT, a, d) { - t.Error("Exactly should return false") - } - if !Exactly(mockT, a, c) { - t.Error("Exactly should return true") - } - - if Exactly(mockT, nil, a) { - t.Error("Exactly should return false") - } - if Exactly(mockT, a, nil) { - t.Error("Exactly should return false") - } - -} - -func TestNotEqual(t *testing.T) { - - mockT := new(testing.T) - - if !NotEqual(mockT, "Hello World", "Hello World!") { - t.Error("NotEqual should return true") - } - if !NotEqual(mockT, 123, 1234) { - t.Error("NotEqual should return true") - } - if !NotEqual(mockT, 123.5, 123.55) { - t.Error("NotEqual should return true") - } - if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { - t.Error("NotEqual should return true") - } - if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { - t.Error("NotEqual should return true") - } - funcA := func() int { return 23 } - funcB := func() int { return 42 } - if !NotEqual(mockT, funcA, funcB) { - t.Error("NotEqual should return true") - } - - if NotEqual(mockT, "Hello World", "Hello World") { - t.Error("NotEqual should return false") - } - if NotEqual(mockT, 123, 123) { - t.Error("NotEqual should return false") - } - if NotEqual(mockT, 123.5, 123.5) { - t.Error("NotEqual should return false") - } - if NotEqual(mockT, []byte("Hello World"), []byte("Hello World")) { - t.Error("NotEqual should return false") - } - if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { - t.Error("NotEqual should return false") - } -} - -type A struct { - Name, Value string -} - -func TestContains(t *testing.T) { - - mockT := new(testing.T) - list := []string{"Foo", "Bar"} - complexList := []*A{ - {"b", "c"}, - {"d", "e"}, - {"g", "h"}, - {"j", "k"}, - } - - if !Contains(mockT, "Hello World", "Hello") { - t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") - } - if Contains(mockT, "Hello World", "Salut") { - t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") - } - - if !Contains(mockT, list, "Bar") { - t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Bar\"") - } - if Contains(mockT, list, "Salut") { - t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") - } - if !Contains(mockT, complexList, &A{"g", "h"}) { - t.Error("Contains should return true: complexList contains {\"g\", \"h\"}") - } - if Contains(mockT, complexList, &A{"g", "e"}) { - t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") - } -} - -func TestNotContains(t *testing.T) { - - mockT := new(testing.T) - list := []string{"Foo", "Bar"} - - if !NotContains(mockT, "Hello World", "Hello!") { - t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") - } - if NotContains(mockT, "Hello World", "Hello") { - t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") - } - - if !NotContains(mockT, list, "Foo!") { - t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") - } - if NotContains(mockT, list, "Foo") { - t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") - } - -} - -func Test_includeElement(t *testing.T) { - - list1 := []string{"Foo", "Bar"} - list2 := []int{1, 2} - - ok, found := includeElement("Hello World", "World") - True(t, ok) - True(t, found) - - ok, found = includeElement(list1, "Foo") - True(t, ok) - True(t, found) - - ok, found = includeElement(list1, "Bar") - True(t, ok) - True(t, found) - - ok, found = includeElement(list2, 1) - True(t, ok) - True(t, found) - - ok, found = includeElement(list2, 2) - True(t, ok) - True(t, found) - - ok, found = includeElement(list1, "Foo!") - True(t, ok) - False(t, found) - - ok, found = includeElement(list2, 3) - True(t, ok) - False(t, found) - - ok, found = includeElement(list2, "1") - True(t, ok) - False(t, found) - - ok, found = includeElement(1433, "1") - False(t, ok) - False(t, found) - -} - -func TestCondition(t *testing.T) { - mockT := new(testing.T) - - if !Condition(mockT, func() bool { return true }, "Truth") { - t.Error("Condition should return true") - } - - if Condition(mockT, func() bool { return false }, "Lie") { - t.Error("Condition should return false") - } - -} - -func TestDidPanic(t *testing.T) { - - if funcDidPanic, _ := didPanic(func() { - panic("Panic!") - }); !funcDidPanic { - t.Error("didPanic should return true") - } - - if funcDidPanic, _ := didPanic(func() { - }); funcDidPanic { - t.Error("didPanic should return false") - } - -} - -func TestPanics(t *testing.T) { - - mockT := new(testing.T) - - if !Panics(mockT, func() { - panic("Panic!") - }) { - t.Error("Panics should return true") - } - - if Panics(mockT, func() { - }) { - t.Error("Panics should return false") - } - -} - -func TestNotPanics(t *testing.T) { - - mockT := new(testing.T) - - if !NotPanics(mockT, func() { - }) { - t.Error("NotPanics should return true") - } - - if NotPanics(mockT, func() { - panic("Panic!") - }) { - t.Error("NotPanics should return false") - } - -} - -func TestNoError(t *testing.T) { - - mockT := new(testing.T) - - // start with a nil error - var err error - - True(t, NoError(mockT, err), "NoError should return True for nil arg") - - // now set an error - err = errors.New("some error") - - False(t, NoError(mockT, err), "NoError with error should return False") - -} - -func TestError(t *testing.T) { - - mockT := new(testing.T) - - // start with a nil error - var err error - - False(t, Error(mockT, err), "Error should return False for nil arg") - - // now set an error - err = errors.New("some error") - - True(t, Error(mockT, err), "Error with error should return True") - -} - -func TestEqualError(t *testing.T) { - mockT := new(testing.T) - - // start with a nil error - var err error - False(t, EqualError(mockT, err, ""), - "EqualError should return false for nil arg") - - // now set an error - err = errors.New("some error") - False(t, EqualError(mockT, err, "Not some error"), - "EqualError should return false for different error string") - True(t, EqualError(mockT, err, "some error"), - "EqualError should return true") -} - -func Test_isEmpty(t *testing.T) { - - chWithValue := make(chan struct{}, 1) - chWithValue <- struct{}{} - - True(t, isEmpty("")) - True(t, isEmpty(nil)) - True(t, isEmpty([]string{})) - True(t, isEmpty(0)) - True(t, isEmpty(int32(0))) - True(t, isEmpty(int64(0))) - True(t, isEmpty(false)) - True(t, isEmpty(map[string]string{})) - True(t, isEmpty(new(time.Time))) - True(t, isEmpty(make(chan struct{}))) - False(t, isEmpty("something")) - False(t, isEmpty(errors.New("something"))) - False(t, isEmpty([]string{"something"})) - False(t, isEmpty(1)) - False(t, isEmpty(true)) - False(t, isEmpty(map[string]string{"Hello": "World"})) - False(t, isEmpty(chWithValue)) - -} - -func TestEmpty(t *testing.T) { - - mockT := new(testing.T) - chWithValue := make(chan struct{}, 1) - chWithValue <- struct{}{} - - True(t, Empty(mockT, ""), "Empty string is empty") - True(t, Empty(mockT, nil), "Nil is empty") - True(t, Empty(mockT, []string{}), "Empty string array is empty") - True(t, Empty(mockT, 0), "Zero int value is empty") - True(t, Empty(mockT, false), "False value is empty") - True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") - - False(t, Empty(mockT, "something"), "Non Empty string is not empty") - False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") - False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") - False(t, Empty(mockT, 1), "Non-zero int value is not empty") - False(t, Empty(mockT, true), "True value is not empty") - False(t, Empty(mockT, chWithValue), "Channel with values is not empty") -} - -func TestNotEmpty(t *testing.T) { - - mockT := new(testing.T) - chWithValue := make(chan struct{}, 1) - chWithValue <- struct{}{} - - False(t, NotEmpty(mockT, ""), "Empty string is empty") - False(t, NotEmpty(mockT, nil), "Nil is empty") - False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") - False(t, NotEmpty(mockT, 0), "Zero int value is empty") - False(t, NotEmpty(mockT, false), "False value is empty") - False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") - - True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") - True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") - True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") - True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") - True(t, NotEmpty(mockT, true), "True value is not empty") - True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") -} - -func Test_getLen(t *testing.T) { - falseCases := []interface{}{ - nil, - 0, - true, - false, - 'A', - struct{}{}, - } - for _, v := range falseCases { - ok, l := getLen(v) - False(t, ok, "Expected getLen fail to get length of %#v", v) - Equal(t, 0, l, "getLen should return 0 for %#v", v) - } - - ch := make(chan int, 5) - ch <- 1 - ch <- 2 - ch <- 3 - trueCases := []struct { - v interface{} - l int - }{ - {[]int{1, 2, 3}, 3}, - {[...]int{1, 2, 3}, 3}, - {"ABC", 3}, - {map[int]int{1: 2, 2: 4, 3: 6}, 3}, - {ch, 3}, - - {[]int{}, 0}, - {map[int]int{}, 0}, - {make(chan int), 0}, - - {[]int(nil), 0}, - {map[int]int(nil), 0}, - {(chan int)(nil), 0}, - } - - for _, c := range trueCases { - ok, l := getLen(c.v) - True(t, ok, "Expected getLen success to get length of %#v", c.v) - Equal(t, c.l, l) - } -} - -func TestLen(t *testing.T) { - mockT := new(testing.T) - - False(t, Len(mockT, nil, 0), "nil does not have length") - False(t, Len(mockT, 0, 0), "int does not have length") - False(t, Len(mockT, true, 0), "true does not have length") - False(t, Len(mockT, false, 0), "false does not have length") - False(t, Len(mockT, 'A', 0), "Rune does not have length") - False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") - - ch := make(chan int, 5) - ch <- 1 - ch <- 2 - ch <- 3 - - cases := []struct { - v interface{} - l int - }{ - {[]int{1, 2, 3}, 3}, - {[...]int{1, 2, 3}, 3}, - {"ABC", 3}, - {map[int]int{1: 2, 2: 4, 3: 6}, 3}, - {ch, 3}, - - {[]int{}, 0}, - {map[int]int{}, 0}, - {make(chan int), 0}, - - {[]int(nil), 0}, - {map[int]int(nil), 0}, - {(chan int)(nil), 0}, - } - - for _, c := range cases { - True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) - } - - cases = []struct { - v interface{} - l int - }{ - {[]int{1, 2, 3}, 4}, - {[...]int{1, 2, 3}, 2}, - {"ABC", 2}, - {map[int]int{1: 2, 2: 4, 3: 6}, 4}, - {ch, 2}, - - {[]int{}, 1}, - {map[int]int{}, 1}, - {make(chan int), 1}, - - {[]int(nil), 1}, - {map[int]int(nil), 1}, - {(chan int)(nil), 1}, - } - - for _, c := range cases { - False(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) - } -} - -func TestWithinDuration(t *testing.T) { - - mockT := new(testing.T) - a := time.Now() - b := a.Add(10 * time.Second) - - True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") - True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") - - False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") - False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") - - False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") - False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") - - False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") - False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") -} - -func TestInDelta(t *testing.T) { - mockT := new(testing.T) - - True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") - True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") - True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") - False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") - False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") - False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") - False(t, InDelta(mockT, 42, math.NaN(), 0.01), "Expected NaN for actual to fail") - False(t, InDelta(mockT, math.NaN(), 42, 0.01), "Expected NaN for expected to fail") - - cases := []struct { - a, b interface{} - delta float64 - }{ - {uint8(2), uint8(1), 1}, - {uint16(2), uint16(1), 1}, - {uint32(2), uint32(1), 1}, - {uint64(2), uint64(1), 1}, - - {int(2), int(1), 1}, - {int8(2), int8(1), 1}, - {int16(2), int16(1), 1}, - {int32(2), int32(1), 1}, - {int64(2), int64(1), 1}, - - {float32(2), float32(1), 1}, - {float64(2), float64(1), 1}, - } - - for _, tc := range cases { - True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) - } -} - -func TestInDeltaSlice(t *testing.T) { - mockT := new(testing.T) - - True(t, InDeltaSlice(mockT, - []float64{1.001, 0.999}, - []float64{1, 1}, - 0.1), "{1.001, 0.009} is element-wise close to {1, 1} in delta=0.1") - - True(t, InDeltaSlice(mockT, - []float64{1, 2}, - []float64{0, 3}, - 1), "{1, 2} is element-wise close to {0, 3} in delta=1") - - False(t, InDeltaSlice(mockT, - []float64{1, 2}, - []float64{0, 3}, - 0.1), "{1, 2} is not element-wise close to {0, 3} in delta=0.1") - - False(t, InDeltaSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") -} - -func TestInEpsilon(t *testing.T) { - mockT := new(testing.T) - - cases := []struct { - a, b interface{} - epsilon float64 - }{ - {uint8(2), uint16(2), .001}, - {2.1, 2.2, 0.1}, - {2.2, 2.1, 0.1}, - {-2.1, -2.2, 0.1}, - {-2.2, -2.1, 0.1}, - {uint64(100), uint8(101), 0.01}, - {0.1, -0.1, 2}, - } - - for _, tc := range cases { - True(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) - } - - cases = []struct { - a, b interface{} - epsilon float64 - }{ - {uint8(2), int16(-2), .001}, - {uint64(100), uint8(102), 0.01}, - {2.1, 2.2, 0.001}, - {2.2, 2.1, 0.001}, - {2.1, -2.2, 1}, - {2.1, "bla-bla", 0}, - {0.1, -0.1, 1.99}, - } - - for _, tc := range cases { - False(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) - } - -} - -func TestInEpsilonSlice(t *testing.T) { - mockT := new(testing.T) - - True(t, InEpsilonSlice(mockT, - []float64{2.2, 2.0}, - []float64{2.1, 2.1}, - 0.06), "{2.2, 2.0} is element-wise close to {2.1, 2.1} in espilon=0.06") - - False(t, InEpsilonSlice(mockT, - []float64{2.2, 2.0}, - []float64{2.1, 2.1}, - 0.04), "{2.2, 2.0} is not element-wise close to {2.1, 2.1} in espilon=0.04") - - False(t, InEpsilonSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") -} - -func TestRegexp(t *testing.T) { - mockT := new(testing.T) - - cases := []struct { - rx, str string - }{ - {"^start", "start of the line"}, - {"end$", "in the end"}, - {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, - } - - for _, tc := range cases { - True(t, Regexp(mockT, tc.rx, tc.str)) - True(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) - False(t, NotRegexp(mockT, tc.rx, tc.str)) - False(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) - } - - cases = []struct { - rx, str string - }{ - {"^asdfastart", "Not the start of the line"}, - {"end$", "in the end."}, - {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, - } - - for _, tc := range cases { - False(t, Regexp(mockT, tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) - False(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) - True(t, NotRegexp(mockT, tc.rx, tc.str)) - True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go b/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go deleted file mode 100644 index 3df3f3917a..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions_test.go +++ /dev/null @@ -1,511 +0,0 @@ -package assert - -import ( - "errors" - "regexp" - "testing" - "time" -) - -func TestImplementsWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { - t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") - } - if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { - t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") - } -} - -func TestIsTypeWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { - t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") - } - if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { - t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") - } - -} - -func TestEqualWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.Equal("Hello World", "Hello World") { - t.Error("Equal should return true") - } - if !assert.Equal(123, 123) { - t.Error("Equal should return true") - } - if !assert.Equal(123.5, 123.5) { - t.Error("Equal should return true") - } - if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { - t.Error("Equal should return true") - } - if !assert.Equal(nil, nil) { - t.Error("Equal should return true") - } -} - -func TestEqualValuesWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.EqualValues(uint32(10), int32(10)) { - t.Error("EqualValues should return true") - } -} - -func TestNotNilWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.NotNil(new(AssertionTesterConformingObject)) { - t.Error("NotNil should return true: object is not nil") - } - if assert.NotNil(nil) { - t.Error("NotNil should return false: object is nil") - } - -} - -func TestNilWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.Nil(nil) { - t.Error("Nil should return true: object is nil") - } - if assert.Nil(new(AssertionTesterConformingObject)) { - t.Error("Nil should return false: object is not nil") - } - -} - -func TestTrueWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.True(true) { - t.Error("True should return true") - } - if assert.True(false) { - t.Error("True should return false") - } - -} - -func TestFalseWrapper(t *testing.T) { - assert := New(new(testing.T)) - - if !assert.False(false) { - t.Error("False should return true") - } - if assert.False(true) { - t.Error("False should return false") - } - -} - -func TestExactlyWrapper(t *testing.T) { - assert := New(new(testing.T)) - - a := float32(1) - b := float64(1) - c := float32(1) - d := float32(2) - - if assert.Exactly(a, b) { - t.Error("Exactly should return false") - } - if assert.Exactly(a, d) { - t.Error("Exactly should return false") - } - if !assert.Exactly(a, c) { - t.Error("Exactly should return true") - } - - if assert.Exactly(nil, a) { - t.Error("Exactly should return false") - } - if assert.Exactly(a, nil) { - t.Error("Exactly should return false") - } - -} - -func TestNotEqualWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - if !assert.NotEqual("Hello World", "Hello World!") { - t.Error("NotEqual should return true") - } - if !assert.NotEqual(123, 1234) { - t.Error("NotEqual should return true") - } - if !assert.NotEqual(123.5, 123.55) { - t.Error("NotEqual should return true") - } - if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { - t.Error("NotEqual should return true") - } - if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { - t.Error("NotEqual should return true") - } -} - -func TestContainsWrapper(t *testing.T) { - - assert := New(new(testing.T)) - list := []string{"Foo", "Bar"} - - if !assert.Contains("Hello World", "Hello") { - t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") - } - if assert.Contains("Hello World", "Salut") { - t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") - } - - if !assert.Contains(list, "Foo") { - t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") - } - if assert.Contains(list, "Salut") { - t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") - } - -} - -func TestNotContainsWrapper(t *testing.T) { - - assert := New(new(testing.T)) - list := []string{"Foo", "Bar"} - - if !assert.NotContains("Hello World", "Hello!") { - t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") - } - if assert.NotContains("Hello World", "Hello") { - t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") - } - - if !assert.NotContains(list, "Foo!") { - t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") - } - if assert.NotContains(list, "Foo") { - t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") - } - -} - -func TestConditionWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - if !assert.Condition(func() bool { return true }, "Truth") { - t.Error("Condition should return true") - } - - if assert.Condition(func() bool { return false }, "Lie") { - t.Error("Condition should return false") - } - -} - -func TestDidPanicWrapper(t *testing.T) { - - if funcDidPanic, _ := didPanic(func() { - panic("Panic!") - }); !funcDidPanic { - t.Error("didPanic should return true") - } - - if funcDidPanic, _ := didPanic(func() { - }); funcDidPanic { - t.Error("didPanic should return false") - } - -} - -func TestPanicsWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - if !assert.Panics(func() { - panic("Panic!") - }) { - t.Error("Panics should return true") - } - - if assert.Panics(func() { - }) { - t.Error("Panics should return false") - } - -} - -func TestNotPanicsWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - if !assert.NotPanics(func() { - }) { - t.Error("NotPanics should return true") - } - - if assert.NotPanics(func() { - panic("Panic!") - }) { - t.Error("NotPanics should return false") - } - -} - -func TestNoErrorWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - // start with a nil error - var err error - - assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") - - // now set an error - err = errors.New("Some error") - - assert.False(mockAssert.NoError(err), "NoError with error should return False") - -} - -func TestErrorWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - // start with a nil error - var err error - - assert.False(mockAssert.Error(err), "Error should return False for nil arg") - - // now set an error - err = errors.New("Some error") - - assert.True(mockAssert.Error(err), "Error with error should return True") - -} - -func TestEqualErrorWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - // start with a nil error - var err error - assert.False(mockAssert.EqualError(err, ""), - "EqualError should return false for nil arg") - - // now set an error - err = errors.New("some error") - assert.False(mockAssert.EqualError(err, "Not some error"), - "EqualError should return false for different error string") - assert.True(mockAssert.EqualError(err, "some error"), - "EqualError should return true") -} - -func TestEmptyWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.True(mockAssert.Empty(""), "Empty string is empty") - assert.True(mockAssert.Empty(nil), "Nil is empty") - assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") - assert.True(mockAssert.Empty(0), "Zero int value is empty") - assert.True(mockAssert.Empty(false), "False value is empty") - - assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") - assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") - assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") - assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") - assert.False(mockAssert.Empty(true), "True value is not empty") - -} - -func TestNotEmptyWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.False(mockAssert.NotEmpty(""), "Empty string is empty") - assert.False(mockAssert.NotEmpty(nil), "Nil is empty") - assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") - assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") - assert.False(mockAssert.NotEmpty(false), "False value is empty") - - assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") - assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") - assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") - assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") - assert.True(mockAssert.NotEmpty(true), "True value is not empty") - -} - -func TestLenWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.False(mockAssert.Len(nil, 0), "nil does not have length") - assert.False(mockAssert.Len(0, 0), "int does not have length") - assert.False(mockAssert.Len(true, 0), "true does not have length") - assert.False(mockAssert.Len(false, 0), "false does not have length") - assert.False(mockAssert.Len('A', 0), "Rune does not have length") - assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") - - ch := make(chan int, 5) - ch <- 1 - ch <- 2 - ch <- 3 - - cases := []struct { - v interface{} - l int - }{ - {[]int{1, 2, 3}, 3}, - {[...]int{1, 2, 3}, 3}, - {"ABC", 3}, - {map[int]int{1: 2, 2: 4, 3: 6}, 3}, - {ch, 3}, - - {[]int{}, 0}, - {map[int]int{}, 0}, - {make(chan int), 0}, - - {[]int(nil), 0}, - {map[int]int(nil), 0}, - {(chan int)(nil), 0}, - } - - for _, c := range cases { - assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) - } -} - -func TestWithinDurationWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - a := time.Now() - b := a.Add(10 * time.Second) - - assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") - assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") - - assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") - assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") - - assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") - assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") - - assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") - assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") -} - -func TestInDeltaWrapper(t *testing.T) { - assert := New(new(testing.T)) - - True(t, assert.InDelta(1.001, 1, 0.01), "|1.001 - 1| <= 0.01") - True(t, assert.InDelta(1, 1.001, 0.01), "|1 - 1.001| <= 0.01") - True(t, assert.InDelta(1, 2, 1), "|1 - 2| <= 1") - False(t, assert.InDelta(1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") - False(t, assert.InDelta(2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") - False(t, assert.InDelta("", nil, 1), "Expected non numerals to fail") - - cases := []struct { - a, b interface{} - delta float64 - }{ - {uint8(2), uint8(1), 1}, - {uint16(2), uint16(1), 1}, - {uint32(2), uint32(1), 1}, - {uint64(2), uint64(1), 1}, - - {int(2), int(1), 1}, - {int8(2), int8(1), 1}, - {int16(2), int16(1), 1}, - {int32(2), int32(1), 1}, - {int64(2), int64(1), 1}, - - {float32(2), float32(1), 1}, - {float64(2), float64(1), 1}, - } - - for _, tc := range cases { - True(t, assert.InDelta(tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) - } -} - -func TestInEpsilonWrapper(t *testing.T) { - assert := New(new(testing.T)) - - cases := []struct { - a, b interface{} - epsilon float64 - }{ - {uint8(2), uint16(2), .001}, - {2.1, 2.2, 0.1}, - {2.2, 2.1, 0.1}, - {-2.1, -2.2, 0.1}, - {-2.2, -2.1, 0.1}, - {uint64(100), uint8(101), 0.01}, - {0.1, -0.1, 2}, - } - - for _, tc := range cases { - True(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) - } - - cases = []struct { - a, b interface{} - epsilon float64 - }{ - {uint8(2), int16(-2), .001}, - {uint64(100), uint8(102), 0.01}, - {2.1, 2.2, 0.001}, - {2.2, 2.1, 0.001}, - {2.1, -2.2, 1}, - {2.1, "bla-bla", 0}, - {0.1, -0.1, 1.99}, - } - - for _, tc := range cases { - False(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) - } -} - -func TestRegexpWrapper(t *testing.T) { - - assert := New(new(testing.T)) - - cases := []struct { - rx, str string - }{ - {"^start", "start of the line"}, - {"end$", "in the end"}, - {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, - } - - for _, tc := range cases { - True(t, assert.Regexp(tc.rx, tc.str)) - True(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) - False(t, assert.NotRegexp(tc.rx, tc.str)) - False(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) - } - - cases = []struct { - rx, str string - }{ - {"^asdfastart", "Not the start of the line"}, - {"end$", "in the end."}, - {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, - } - - for _, tc := range cases { - False(t, assert.Regexp(tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) - False(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) - True(t, assert.NotRegexp(tc.rx, tc.str)) - True(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go b/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go deleted file mode 100644 index 684c2d5d1c..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/url" - "testing" -) - -func httpOK(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) -} - -func httpRedirect(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusTemporaryRedirect) -} - -func httpError(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) -} - -func TestHTTPStatuses(t *testing.T) { - assert := New(t) - mockT := new(testing.T) - - assert.Equal(HTTPSuccess(mockT, httpOK, "GET", "/", nil), true) - assert.Equal(HTTPSuccess(mockT, httpRedirect, "GET", "/", nil), false) - assert.Equal(HTTPSuccess(mockT, httpError, "GET", "/", nil), false) - - assert.Equal(HTTPRedirect(mockT, httpOK, "GET", "/", nil), false) - assert.Equal(HTTPRedirect(mockT, httpRedirect, "GET", "/", nil), true) - assert.Equal(HTTPRedirect(mockT, httpError, "GET", "/", nil), false) - - assert.Equal(HTTPError(mockT, httpOK, "GET", "/", nil), false) - assert.Equal(HTTPError(mockT, httpRedirect, "GET", "/", nil), false) - assert.Equal(HTTPError(mockT, httpError, "GET", "/", nil), true) -} - -func TestHTTPStatusesWrapper(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.Equal(mockAssert.HTTPSuccess(httpOK, "GET", "/", nil), true) - assert.Equal(mockAssert.HTTPSuccess(httpRedirect, "GET", "/", nil), false) - assert.Equal(mockAssert.HTTPSuccess(httpError, "GET", "/", nil), false) - - assert.Equal(mockAssert.HTTPRedirect(httpOK, "GET", "/", nil), false) - assert.Equal(mockAssert.HTTPRedirect(httpRedirect, "GET", "/", nil), true) - assert.Equal(mockAssert.HTTPRedirect(httpError, "GET", "/", nil), false) - - assert.Equal(mockAssert.HTTPError(httpOK, "GET", "/", nil), false) - assert.Equal(mockAssert.HTTPError(httpRedirect, "GET", "/", nil), false) - assert.Equal(mockAssert.HTTPError(httpError, "GET", "/", nil), true) -} - -func httpHelloName(w http.ResponseWriter, r *http.Request) { - name := r.FormValue("name") - w.Write([]byte(fmt.Sprintf("Hello, %s!", name))) -} - -func TestHttpBody(t *testing.T) { - assert := New(t) - mockT := new(testing.T) - - assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) - assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) - assert.False(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) - - assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) - assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) - assert.True(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) -} - -func TestHttpBodyWrappers(t *testing.T) { - assert := New(t) - mockAssert := New(new(testing.T)) - - assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) - assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) - assert.False(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) - - assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) - assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) - assert.True(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) - -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE b/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE new file mode 100644 index 0000000000..80dd96de77 --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE @@ -0,0 +1,24 @@ +Copyright 2013 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_test.go b/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_test.go deleted file mode 100644 index 8108655c05..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package capability - -import "testing" - -func TestState(t *testing.T) { - testEmpty := func(name string, c Capabilities, whats CapType) { - for i := CapType(1); i <= BOUNDING; i <<= 1 { - if (i&whats) != 0 && !c.Empty(i) { - t.Errorf(name+": capabilities set %q wasn't empty", i) - } - } - } - testFull := func(name string, c Capabilities, whats CapType) { - for i := CapType(1); i <= BOUNDING; i <<= 1 { - if (i&whats) != 0 && !c.Full(i) { - t.Errorf(name+": capabilities set %q wasn't full", i) - } - } - } - testPartial := func(name string, c Capabilities, whats CapType) { - for i := CapType(1); i <= BOUNDING; i <<= 1 { - if (i&whats) != 0 && (c.Empty(i) || c.Full(i)) { - t.Errorf(name+": capabilities set %q wasn't partial", i) - } - } - } - testGet := func(name string, c Capabilities, whats CapType, max Cap) { - for i := CapType(1); i <= BOUNDING; i <<= 1 { - if (i & whats) == 0 { - continue - } - for j := Cap(0); j <= max; j++ { - if !c.Get(i, j) { - t.Errorf(name+": capability %q wasn't found on %q", j, i) - } - } - } - } - - capf := new(capsFile) - capf.data.version = 2 - for _, tc := range []struct { - name string - c Capabilities - sets CapType - max Cap - }{ - {"v1", new(capsV1), EFFECTIVE | PERMITTED, CAP_AUDIT_CONTROL}, - {"v3", new(capsV3), EFFECTIVE | PERMITTED | BOUNDING, CAP_LAST_CAP}, - {"file_v1", new(capsFile), EFFECTIVE | PERMITTED, CAP_AUDIT_CONTROL}, - {"file_v2", capf, EFFECTIVE | PERMITTED, CAP_LAST_CAP}, - } { - testEmpty(tc.name, tc.c, tc.sets) - tc.c.Fill(CAPS | BOUNDS) - testFull(tc.name, tc.c, tc.sets) - testGet(tc.name, tc.c, tc.sets, tc.max) - tc.c.Clear(CAPS | BOUNDS) - testEmpty(tc.name, tc.c, tc.sets) - for i := CapType(1); i <= BOUNDING; i <<= 1 { - for j := Cap(0); j <= CAP_LAST_CAP; j++ { - tc.c.Set(i, j) - } - } - testFull(tc.name, tc.c, tc.sets) - testGet(tc.name, tc.c, tc.sets, tc.max) - for i := CapType(1); i <= BOUNDING; i <<= 1 { - for j := Cap(0); j <= CAP_LAST_CAP; j++ { - tc.c.Unset(i, j) - } - } - testEmpty(tc.name, tc.c, tc.sets) - tc.c.Set(PERMITTED, CAP_CHOWN) - testPartial(tc.name, tc.c, PERMITTED) - tc.c.Clear(CAPS | BOUNDS) - testEmpty(tc.name, tc.c, tc.sets) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enumgen/gen.go b/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enumgen/gen.go deleted file mode 100644 index 4c733809b1..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enumgen/gen.go +++ /dev/null @@ -1,92 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "strings" -) - -const fileName = "enum.go" -const genName = "enum_gen.go" - -type generator struct { - buf bytes.Buffer - caps []string -} - -func (g *generator) writeHeader() { - g.buf.WriteString("// generated file; DO NOT EDIT - use go generate in directory with source\n") - g.buf.WriteString("\n") - g.buf.WriteString("package capability") -} - -func (g *generator) writeStringFunc() { - g.buf.WriteString("\n") - g.buf.WriteString("func (c Cap) String() string {\n") - g.buf.WriteString("switch c {\n") - for _, cap := range g.caps { - fmt.Fprintf(&g.buf, "case %s:\n", cap) - fmt.Fprintf(&g.buf, "return \"%s\"\n", strings.ToLower(cap[4:])) - } - g.buf.WriteString("}\n") - g.buf.WriteString("return \"unknown\"\n") - g.buf.WriteString("}\n") -} - -func (g *generator) writeListFunc() { - g.buf.WriteString("\n") - g.buf.WriteString("// List returns list of all supported capabilities\n") - g.buf.WriteString("func List() []Cap {\n") - g.buf.WriteString("return []Cap{\n") - for _, cap := range g.caps { - fmt.Fprintf(&g.buf, "%s,\n", cap) - } - g.buf.WriteString("}\n") - g.buf.WriteString("}\n") -} - -func main() { - fs := token.NewFileSet() - parsedFile, err := parser.ParseFile(fs, fileName, nil, 0) - if err != nil { - log.Fatal(err) - } - var caps []string - for _, decl := range parsedFile.Decls { - decl, ok := decl.(*ast.GenDecl) - if !ok || decl.Tok != token.CONST { - continue - } - for _, spec := range decl.Specs { - vspec := spec.(*ast.ValueSpec) - name := vspec.Names[0].Name - if strings.HasPrefix(name, "CAP_") { - caps = append(caps, name) - } - } - } - g := &generator{caps: caps} - g.writeHeader() - g.writeStringFunc() - g.writeListFunc() - src, err := format.Source(g.buf.Bytes()) - if err != nil { - fmt.Println("generated invalid Go code") - fmt.Println(g.buf.String()) - log.Fatal(err) - } - fi, err := os.Stat(fileName) - if err != nil { - log.Fatal(err) - } - if err := ioutil.WriteFile(genName, src, fi.Mode().Perm()); err != nil { - log.Fatal(err) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/LICENSE b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/LICENSE new file mode 100644 index 0000000000..95a0f0541c --- /dev/null +++ b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go deleted file mode 100644 index 205dffa7d1..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor_test.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bufio" - "bytes" - "encoding/hex" - "math" - "os" - "regexp" - "strings" - "testing" -) - -func TestCborIndefiniteLength(t *testing.T) { - oldMapType := testCborH.MapType - defer func() { - testCborH.MapType = oldMapType - }() - testCborH.MapType = testMapStrIntfTyp - // var ( - // M1 map[string][]byte - // M2 map[uint64]bool - // L1 []interface{} - // S1 []string - // B1 []byte - // ) - var v, vv interface{} - // define it (v), encode it using indefinite lengths, decode it (vv), compare v to vv - v = map[string]interface{}{ - "one-byte-key": []byte{1, 2, 3, 4, 5, 6}, - "two-string-key": "two-value", - "three-list-key": []interface{}{true, false, uint64(1), int64(-1)}, - } - var buf bytes.Buffer - // buf.Reset() - e := NewEncoder(&buf, testCborH) - buf.WriteByte(cborBdIndefiniteMap) - //---- - buf.WriteByte(cborBdIndefiniteString) - e.MustEncode("one-") - e.MustEncode("byte-") - e.MustEncode("key") - buf.WriteByte(cborBdBreak) - - buf.WriteByte(cborBdIndefiniteBytes) - e.MustEncode([]byte{1, 2, 3}) - e.MustEncode([]byte{4, 5, 6}) - buf.WriteByte(cborBdBreak) - - //---- - buf.WriteByte(cborBdIndefiniteString) - e.MustEncode("two-") - e.MustEncode("string-") - e.MustEncode("key") - buf.WriteByte(cborBdBreak) - - buf.WriteByte(cborBdIndefiniteString) - e.MustEncode([]byte("two-")) // encode as bytes, to check robustness of code - e.MustEncode([]byte("value")) - buf.WriteByte(cborBdBreak) - - //---- - buf.WriteByte(cborBdIndefiniteString) - e.MustEncode("three-") - e.MustEncode("list-") - e.MustEncode("key") - buf.WriteByte(cborBdBreak) - - buf.WriteByte(cborBdIndefiniteArray) - e.MustEncode(true) - e.MustEncode(false) - e.MustEncode(uint64(1)) - e.MustEncode(int64(-1)) - buf.WriteByte(cborBdBreak) - - buf.WriteByte(cborBdBreak) // close map - - NewDecoderBytes(buf.Bytes(), testCborH).MustDecode(&vv) - if err := deepEqual(v, vv); err != nil { - logT(t, "-------- Before and After marshal do not match: Error: %v", err) - logT(t, " ....... GOLDEN: (%T) %#v", v, v) - logT(t, " ....... DECODED: (%T) %#v", vv, vv) - failT(t) - } -} - -type testCborGolden struct { - Base64 string `codec:"cbor"` - Hex string `codec:"hex"` - Roundtrip bool `codec:"roundtrip"` - Decoded interface{} `codec:"decoded"` - Diagnostic string `codec:"diagnostic"` - Skip bool `codec:"skip"` -} - -// Some tests are skipped because they include numbers outside the range of int64/uint64 -func doTestCborGoldens(t *testing.T) { - oldMapType := testCborH.MapType - defer func() { - testCborH.MapType = oldMapType - }() - testCborH.MapType = testMapStrIntfTyp - // decode test-cbor-goldens.json into a list of []*testCborGolden - // for each one, - // - decode hex into []byte bs - // - decode bs into interface{} v - // - compare both using deepequal - // - for any miss, record it - var gs []*testCborGolden - f, err := os.Open("test-cbor-goldens.json") - if err != nil { - logT(t, "error opening test-cbor-goldens.json: %v", err) - failT(t) - } - defer f.Close() - jh := new(JsonHandle) - jh.MapType = testMapStrIntfTyp - // d := NewDecoder(f, jh) - d := NewDecoder(bufio.NewReader(f), jh) - // err = d.Decode(&gs) - d.MustDecode(&gs) - if err != nil { - logT(t, "error json decoding test-cbor-goldens.json: %v", err) - failT(t) - } - - tagregex := regexp.MustCompile(`[\d]+\(.+?\)`) - hexregex := regexp.MustCompile(`h'([0-9a-fA-F]*)'`) - for i, g := range gs { - // fmt.Printf("%v, skip: %v, isTag: %v, %s\n", i, g.Skip, tagregex.MatchString(g.Diagnostic), g.Diagnostic) - // skip tags or simple or those with prefix, as we can't verify them. - if g.Skip || strings.HasPrefix(g.Diagnostic, "simple(") || tagregex.MatchString(g.Diagnostic) { - // fmt.Printf("%v: skipped\n", i) - logT(t, "[%v] skipping because skip=true OR unsupported simple value or Tag Value", i) - continue - } - // println("++++++++++++", i, "g.Diagnostic", g.Diagnostic) - if hexregex.MatchString(g.Diagnostic) { - // println(i, "g.Diagnostic matched hex") - if s2 := g.Diagnostic[2 : len(g.Diagnostic)-1]; s2 == "" { - g.Decoded = zeroByteSlice - } else if bs2, err2 := hex.DecodeString(s2); err2 == nil { - g.Decoded = bs2 - } - // fmt.Printf("%v: hex: %v\n", i, g.Decoded) - } - bs, err := hex.DecodeString(g.Hex) - if err != nil { - logT(t, "[%v] error hex decoding %s [%v]: %v", i, g.Hex, err) - failT(t) - } - var v interface{} - NewDecoderBytes(bs, testCborH).MustDecode(&v) - if _, ok := v.(RawExt); ok { - continue - } - // check the diagnostics to compare - switch g.Diagnostic { - case "Infinity": - b := math.IsInf(v.(float64), 1) - testCborError(t, i, math.Inf(1), v, nil, &b) - case "-Infinity": - b := math.IsInf(v.(float64), -1) - testCborError(t, i, math.Inf(-1), v, nil, &b) - case "NaN": - // println(i, "checking NaN") - b := math.IsNaN(v.(float64)) - testCborError(t, i, math.NaN(), v, nil, &b) - case "undefined": - b := v == nil - testCborError(t, i, nil, v, nil, &b) - default: - v0 := g.Decoded - // testCborCoerceJsonNumber(reflect.ValueOf(&v0)) - testCborError(t, i, v0, v, deepEqual(v0, v), nil) - } - } -} - -func testCborError(t *testing.T, i int, v0, v1 interface{}, err error, equal *bool) { - if err == nil && equal == nil { - // fmt.Printf("%v testCborError passed (err and equal nil)\n", i) - return - } - if err != nil { - logT(t, "[%v] deepEqual error: %v", i, err) - logT(t, " ....... GOLDEN: (%T) %#v", v0, v0) - logT(t, " ....... DECODED: (%T) %#v", v1, v1) - failT(t) - } - if equal != nil && !*equal { - logT(t, "[%v] values not equal", i) - logT(t, " ....... GOLDEN: (%T) %#v", v0, v0) - logT(t, " ....... DECODED: (%T) %#v", v1, v1) - failT(t) - } - // fmt.Printf("%v testCborError passed (checks passed)\n", i) -} - -func TestCborGoldens(t *testing.T) { - doTestCborGoldens(t) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go deleted file mode 100644 index cd93556f7d..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codec_test.go +++ /dev/null @@ -1,1117 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// Test works by using a slice of interfaces. -// It can test for encoding/decoding into/from a nil interface{} -// or passing the object to encode/decode into. -// -// There are basically 2 main tests here. -// First test internally encodes and decodes things and verifies that -// the artifact was as expected. -// Second test will use python msgpack to create a bunch of golden files, -// read those files, and compare them to what it should be. It then -// writes those files back out and compares the byte streams. -// -// Taken together, the tests are pretty extensive. -// -// The following manual tests must be done: -// - TestCodecUnderlyingType -// - Set fastpathEnabled to false and run tests (to ensure that regular reflection works). -// We don't want to use a variable there so that code is ellided. - -import ( - "bytes" - "encoding/gob" - "flag" - "fmt" - "io/ioutil" - "math" - "net" - "net/rpc" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "strconv" - "sync/atomic" - "testing" - "time" -) - -func init() { - testInitFlags() - testPreInitFns = append(testPreInitFns, testInit) -} - -type testVerifyArg int - -const ( - testVerifyMapTypeSame testVerifyArg = iota - testVerifyMapTypeStrIntf - testVerifyMapTypeIntfIntf - // testVerifySliceIntf - testVerifyForPython -) - -const testSkipRPCTests = false - -var ( - testVerbose bool - testInitDebug bool - testUseIoEncDec bool - testStructToArray bool - testCanonical bool - testWriteNoSymbols bool - testSkipIntf bool - - skipVerifyVal interface{} = &(struct{}{}) - - testMapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - timeLoc = time.FixedZone("", -8*60*60) // UTC-08:00 //time.UTC-8 - timeToCompare1 = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc).UTC() - timeToCompare2 = time.Date(1900, 2, 2, 2, 2, 2, 2000, timeLoc).UTC() - timeToCompare3 = time.Unix(0, 270).UTC() // use value that must be encoded as uint64 for nanoseconds (for cbor/msgpack comparison) - //timeToCompare4 = time.Time{}.UTC() // does not work well with simple cbor time encoding (overflow) - timeToCompare4 = time.Unix(-2013855848, 4223).UTC() - - table []interface{} // main items we encode - tableVerify []interface{} // we verify encoded things against this after decode - tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different) - tablePythonVerify []interface{} // for verifying for python, since Python sometimes - // will encode a float32 as float64, or large int as uint - testRpcInt = new(TestRpcInt) -) - -func testInitFlags() { - // delete(testDecOpts.ExtFuncs, timeTyp) - flag.BoolVar(&testVerbose, "tv", false, "Test Verbose") - flag.BoolVar(&testInitDebug, "tg", false, "Test Init Debug") - flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal") - flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option") - flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option") - flag.BoolVar(&testCanonical, "tc", false, "Set Canonical option") - flag.BoolVar(&testSkipIntf, "tf", false, "Skip Interfaces") -} - -type TestABC struct { - A, B, C string -} - -type TestRpcInt struct { - i int -} - -func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil } -func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil } -func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil } -func (r *TestRpcInt) EchoStruct(arg TestABC, res *string) error { - *res = fmt.Sprintf("%#v", arg) - return nil -} -func (r *TestRpcInt) Echo123(args []string, res *string) error { - *res = fmt.Sprintf("%#v", args) - return nil -} - -type testUnixNanoTimeExt struct{} - -func (x testUnixNanoTimeExt) WriteExt(interface{}) []byte { panic("unsupported") } -func (x testUnixNanoTimeExt) ReadExt(interface{}, []byte) { panic("unsupported") } -func (x testUnixNanoTimeExt) ConvertExt(v interface{}) interface{} { - switch v2 := v.(type) { - case time.Time: - return v2.UTC().UnixNano() - case *time.Time: - return v2.UTC().UnixNano() - default: - panic(fmt.Sprintf("unsupported format for time conversion: expecting time.Time; got %T", v)) - } -} -func (x testUnixNanoTimeExt) UpdateExt(dest interface{}, v interface{}) { - // fmt.Printf("testUnixNanoTimeExt.UpdateExt: v: %v\n", v) - tt := dest.(*time.Time) - switch v2 := v.(type) { - case int64: - *tt = time.Unix(0, v2).UTC() - case uint64: - *tt = time.Unix(0, int64(v2)).UTC() - //case float64: - //case string: - default: - panic(fmt.Sprintf("unsupported format for time conversion: expecting int64/uint64; got %T", v)) - } - // fmt.Printf("testUnixNanoTimeExt.UpdateExt: v: %v, tt: %#v\n", v, tt) -} - -func testVerifyVal(v interface{}, arg testVerifyArg) (v2 interface{}) { - //for python msgpack, - // - all positive integers are unsigned 64-bit ints - // - all floats are float64 - switch iv := v.(type) { - case int8: - if iv >= 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int16: - if iv >= 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int32: - if iv >= 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int64: - if iv >= 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case uint8: - v2 = uint64(iv) - case uint16: - v2 = uint64(iv) - case uint32: - v2 = uint64(iv) - case uint64: - v2 = uint64(iv) - case float32: - v2 = float64(iv) - case float64: - v2 = float64(iv) - case []interface{}: - m2 := make([]interface{}, len(iv)) - for j, vj := range iv { - m2[j] = testVerifyVal(vj, arg) - } - v2 = m2 - case map[string]bool: - switch arg { - case testVerifyMapTypeSame: - m2 := make(map[string]bool) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - case testVerifyMapTypeStrIntf, testVerifyForPython: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - case testVerifyMapTypeIntfIntf: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - } - case map[string]interface{}: - switch arg { - case testVerifyMapTypeSame: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - case testVerifyMapTypeStrIntf, testVerifyForPython: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - case testVerifyMapTypeIntfIntf: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - } - case map[interface{}]interface{}: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[testVerifyVal(kj, arg)] = testVerifyVal(kv, arg) - } - v2 = m2 - case time.Time: - switch arg { - case testVerifyForPython: - if iv2 := iv.UnixNano(); iv2 >= 0 { - v2 = uint64(iv2) - } else { - v2 = int64(iv2) - } - default: - v2 = v - } - default: - v2 = v - } - return -} - -func testInit() { - gob.Register(new(TestStruc)) - if testInitDebug { - ts0 := newTestStruc(2, false, !testSkipIntf, false) - fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0) - } - - testJsonH.Canonical = testCanonical - testCborH.Canonical = testCanonical - testSimpleH.Canonical = testCanonical - testBincH.Canonical = testCanonical - testMsgpackH.Canonical = testCanonical - - testJsonH.StructToArray = testStructToArray - testCborH.StructToArray = testStructToArray - testSimpleH.StructToArray = testStructToArray - testBincH.StructToArray = testStructToArray - testMsgpackH.StructToArray = testStructToArray - - testMsgpackH.RawToString = true - - if testWriteNoSymbols { - testBincH.AsSymbols = AsSymbolNone - } else { - testBincH.AsSymbols = AsSymbolAll - } - - // testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt) - // testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt) - timeEncExt := func(rv reflect.Value) (bs []byte, err error) { - switch v2 := rv.Interface().(type) { - case time.Time: - bs = encodeTime(v2) - case *time.Time: - bs = encodeTime(*v2) - default: - err = fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2) - } - return - } - timeDecExt := func(rv reflect.Value, bs []byte) (err error) { - tt, err := decodeTime(bs) - if err == nil { - *(rv.Interface().(*time.Time)) = tt - } - return - } - - // add extensions for msgpack, simple for time.Time, so we can encode/decode same way. - testMsgpackH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) - testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) - testCborH.SetExt(timeTyp, 1, &testUnixNanoTimeExt{}) - testJsonH.SetExt(timeTyp, 1, &testUnixNanoTimeExt{}) - - primitives := []interface{}{ - int8(-8), - int16(-1616), - int32(-32323232), - int64(-6464646464646464), - uint8(192), - uint16(1616), - uint32(32323232), - uint64(6464646464646464), - byte(192), - float32(-3232.0), - float64(-6464646464.0), - float32(3232.0), - float64(6464646464.0), - false, - true, - nil, - "someday", - "", - "bytestring", - timeToCompare1, - timeToCompare2, - timeToCompare3, - timeToCompare4, - } - mapsAndStrucs := []interface{}{ - map[string]bool{ - "true": true, - "false": false, - }, - map[string]interface{}{ - "true": "True", - "false": false, - "uint16(1616)": uint16(1616), - }, - //add a complex combo map in here. (map has list which has map) - //note that after the first thing, everything else should be generic. - map[string]interface{}{ - "list": []interface{}{ - int16(1616), - int32(32323232), - true, - float32(-3232.0), - map[string]interface{}{ - "TRUE": true, - "FALSE": false, - }, - []interface{}{true, false}, - }, - "int32": int32(32323232), - "bool": true, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890", - }, - map[interface{}]interface{}{ - true: "true", - uint8(138): false, - "false": uint8(200), - }, - newTestStruc(0, false, !testSkipIntf, false), - } - - table = []interface{}{} - table = append(table, primitives...) //0-19 are primitives - table = append(table, primitives) //20 is a list of primitives - table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct - - tableVerify = make([]interface{}, len(table)) - tableTestNilVerify = make([]interface{}, len(table)) - tablePythonVerify = make([]interface{}, len(table)) - - lp := len(primitives) - av := tableVerify - for i, v := range table { - if i == lp+3 { - av[i] = skipVerifyVal - continue - } - //av[i] = testVerifyVal(v, testVerifyMapTypeSame) - switch v.(type) { - case []interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - case map[string]interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - case map[interface{}]interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - default: - av[i] = v - } - } - - av = tableTestNilVerify - for i, v := range table { - if i > lp+3 { - av[i] = skipVerifyVal - continue - } - av[i] = testVerifyVal(v, testVerifyMapTypeStrIntf) - } - - av = tablePythonVerify - for i, v := range table { - if i > lp+3 { - av[i] = skipVerifyVal - continue - } - av[i] = testVerifyVal(v, testVerifyForPython) - } - - tablePythonVerify = tablePythonVerify[:24] -} - -func testUnmarshal(v interface{}, data []byte, h Handle) (err error) { - if testUseIoEncDec { - NewDecoder(bytes.NewBuffer(data), h).MustDecode(v) - } else { - NewDecoderBytes(data, h).MustDecode(v) - } - return -} - -func testMarshal(v interface{}, h Handle) (bs []byte, err error) { - if testUseIoEncDec { - var buf bytes.Buffer - NewEncoder(&buf, h).MustEncode(v) - bs = buf.Bytes() - return - } - NewEncoderBytes(&bs, h).MustEncode(v) - return -} - -func testMarshalErr(v interface{}, h Handle, t *testing.T, name string) (bs []byte, err error) { - if bs, err = testMarshal(v, h); err != nil { - logT(t, "Error encoding %s: %v, Err: %v", name, v, err) - t.FailNow() - } - return -} - -func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name string) (err error) { - if err = testUnmarshal(v, data, h); err != nil { - logT(t, "Error Decoding into %s: %v, Err: %v", name, v, err) - t.FailNow() - } - return -} - -// doTestCodecTableOne allows us test for different variations based on arguments passed. -func doTestCodecTableOne(t *testing.T, testNil bool, h Handle, - vs []interface{}, vsVerify []interface{}) { - //if testNil, then just test for when a pointer to a nil interface{} is passed. It should work. - //Current setup allows us test (at least manually) the nil interface or typed interface. - logT(t, "================ TestNil: %v ================\n", testNil) - for i, v0 := range vs { - logT(t, "..............................................") - logT(t, " Testing: #%d:, %T, %#v\n", i, v0, v0) - b0, err := testMarshalErr(v0, h, t, "v0") - if err != nil { - continue - } - if h.isBinary() { - logT(t, " Encoded bytes: len: %v, %v\n", len(b0), b0) - } else { - logT(t, " Encoded string: len: %v, %v\n", len(string(b0)), string(b0)) - // println("########### encoded string: " + string(b0)) - } - var v1 interface{} - - if testNil { - err = testUnmarshal(&v1, b0, h) - } else { - if v0 != nil { - v0rt := reflect.TypeOf(v0) // ptr - rv1 := reflect.New(v0rt) - err = testUnmarshal(rv1.Interface(), b0, h) - v1 = rv1.Elem().Interface() - // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() - } - } - - logT(t, " v1 returned: %T, %#v", v1, v1) - // if v1 != nil { - // logT(t, " v1 returned: %T, %#v", v1, v1) - // //we always indirect, because ptr to typed value may be passed (if not testNil) - // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() - // } - if err != nil { - logT(t, "-------- Error: %v. Partial return: %v", err, v1) - failT(t) - continue - } - v0check := vsVerify[i] - if v0check == skipVerifyVal { - logT(t, " Nil Check skipped: Decoded: %T, %#v\n", v1, v1) - continue - } - - if err = deepEqual(v0check, v1); err == nil { - logT(t, "++++++++ Before and After marshal matched\n") - } else { - // logT(t, "-------- Before and After marshal do not match: Error: %v"+ - // " ====> GOLDEN: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1) - logT(t, "-------- Before and After marshal do not match: Error: %v", err) - logT(t, " ....... GOLDEN: (%T) %#v", v0check, v0check) - logT(t, " ....... DECODED: (%T) %#v", v1, v1) - failT(t) - } - } -} - -func testCodecTableOne(t *testing.T, h Handle) { - testOnce.Do(testInitAll) - // func TestMsgpackAllExperimental(t *testing.T) { - // dopts := testDecOpts(nil, nil, false, true, true), - - idxTime, numPrim, numMap := 19, 23, 4 - //println("#################") - switch v := h.(type) { - case *MsgpackHandle: - var oldWriteExt, oldRawToString bool - oldWriteExt, v.WriteExt = v.WriteExt, true - oldRawToString, v.RawToString = v.RawToString, true - doTestCodecTableOne(t, false, h, table, tableVerify) - v.WriteExt, v.RawToString = oldWriteExt, oldRawToString - case *JsonHandle: - //skip []interface{} containing time.Time, as it encodes as a number, but cannot decode back to time.Time. - //As there is no real support for extension tags in json, this must be skipped. - doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) - doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) - default: - doTestCodecTableOne(t, false, h, table, tableVerify) - } - // func TestMsgpackAll(t *testing.T) { - - // //skip []interface{} containing time.Time - // doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) - // doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) - // func TestMsgpackNilStringMap(t *testing.T) { - var oldMapType reflect.Type - v := h.getBasicHandle() - - oldMapType, v.MapType = v.MapType, testMapStrIntfTyp - - //skip time.Time, []interface{} containing time.Time, last map, and newStruc - doTestCodecTableOne(t, true, h, table[:idxTime], tableTestNilVerify[:idxTime]) - doTestCodecTableOne(t, true, h, table[numPrim+1:numPrim+numMap], tableTestNilVerify[numPrim+1:numPrim+numMap]) - - v.MapType = oldMapType - - // func TestMsgpackNilIntf(t *testing.T) { - - //do newTestStruc and last element of map - doTestCodecTableOne(t, true, h, table[numPrim+numMap:], tableTestNilVerify[numPrim+numMap:]) - //TODO? What is this one? - //doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18]) -} - -func testCodecMiscOne(t *testing.T, h Handle) { - testOnce.Do(testInitAll) - b, err := testMarshalErr(32, h, t, "32") - // Cannot do this nil one, because faster type assertion decoding will panic - // var i *int32 - // if err = testUnmarshal(b, i, nil); err == nil { - // logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr") - // t.FailNow() - // } - var i2 int32 = 0 - err = testUnmarshalErr(&i2, b, h, t, "int32-ptr") - if i2 != int32(32) { - logT(t, "------- didn't unmarshal to 32: Received: %d", i2) - t.FailNow() - } - - // func TestMsgpackDecodePtr(t *testing.T) { - ts := newTestStruc(0, false, !testSkipIntf, false) - b, err = testMarshalErr(ts, h, t, "pointer-to-struct") - if len(b) < 40 { - logT(t, "------- Size must be > 40. Size: %d", len(b)) - t.FailNow() - } - if h.isBinary() { - logT(t, "------- b: %v", b) - } else { - logT(t, "------- b: %s", b) - } - ts2 := new(TestStruc) - err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct") - if ts2.I64 != math.MaxInt64*2/3 { - logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64) - t.FailNow() - } - - // func TestMsgpackIntfDecode(t *testing.T) { - m := map[string]int{"A": 2, "B": 3} - p := []interface{}{m} - bs, err := testMarshalErr(p, h, t, "p") - - m2 := map[string]int{} - p2 := []interface{}{m2} - err = testUnmarshalErr(&p2, bs, h, t, "&p2") - - if m2["A"] != 2 || m2["B"] != 3 { - logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2) - t.FailNow() - } - // log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2) - checkEqualT(t, p, p2, "p=p2") - checkEqualT(t, m, m2, "m=m2") - if err = deepEqual(p, p2); err == nil { - logT(t, "p and p2 match") - } else { - logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2) - t.FailNow() - } - if err = deepEqual(m, m2); err == nil { - logT(t, "m and m2 match") - } else { - logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2) - t.FailNow() - } - - // func TestMsgpackDecodeStructSubset(t *testing.T) { - // test that we can decode a subset of the stream - mm := map[string]interface{}{"A": 5, "B": 99, "C": 333} - bs, err = testMarshalErr(mm, h, t, "mm") - type ttt struct { - A uint8 - C int32 - } - var t2 ttt - testUnmarshalErr(&t2, bs, h, t, "t2") - t3 := ttt{5, 333} - checkEqualT(t, t2, t3, "t2=t3") - - // println(">>>>>") - // test simple arrays, non-addressable arrays, slices - type tarr struct { - A int64 - B [3]int64 - C []byte - D [3]byte - } - var tarr0 = tarr{1, [3]int64{2, 3, 4}, []byte{4, 5, 6}, [3]byte{7, 8, 9}} - // test both pointer and non-pointer (value) - for _, tarr1 := range []interface{}{tarr0, &tarr0} { - bs, err = testMarshalErr(tarr1, h, t, "tarr1") - var tarr2 tarr - testUnmarshalErr(&tarr2, bs, h, t, "tarr2") - checkEqualT(t, tarr0, tarr2, "tarr0=tarr2") - // fmt.Printf(">>>> err: %v. tarr1: %v, tarr2: %v\n", err, tarr0, tarr2) - } - - // test byte array, even if empty (msgpack only) - if h == testMsgpackH { - type ystruct struct { - Anarray []byte - } - var ya = ystruct{} - testUnmarshalErr(&ya, []byte{0x91, 0x90}, h, t, "ya") - } -} - -func testCodecEmbeddedPointer(t *testing.T, h Handle) { - testOnce.Do(testInitAll) - type Z int - type A struct { - AnInt int - } - type B struct { - *Z - *A - MoreInt int - } - var z Z = 4 - x1 := &B{&z, &A{5}, 6} - bs, err := testMarshalErr(x1, h, t, "x1") - // fmt.Printf("buf: len(%v): %x\n", buf.Len(), buf.Bytes()) - var x2 = new(B) - err = testUnmarshalErr(x2, bs, h, t, "x2") - err = checkEqualT(t, x1, x2, "x1=x2") - _ = err -} - -func testCodecUnderlyingType(t *testing.T, h Handle) { - testOnce.Do(testInitAll) - // Manual Test. - // Run by hand, with accompanying print statements in fast-path.go - // to ensure that the fast functions are called. - type T1 map[string]string - v := T1{"1": "1s", "2": "2s"} - var bs []byte - var err error - NewEncoderBytes(&bs, h).MustEncode(v) - if err != nil { - logT(t, "Error during encode: %v", err) - failT(t) - } - var v2 T1 - NewDecoderBytes(bs, h).MustDecode(&v2) - if err != nil { - logT(t, "Error during decode: %v", err) - failT(t) - } -} - -func testCodecChan(t *testing.T, h Handle) { - // - send a slice []*int64 (sl1) into an chan (ch1) with cap > len(s1) - // - encode ch1 as a stream array - // - decode a chan (ch2), with cap > len(s1) from the stream array - // - receive from ch2 into slice sl2 - // - compare sl1 and sl2 - // - do this for codecs: json, cbor (covers all types) - sl1 := make([]*int64, 4) - for i := range sl1 { - var j int64 = int64(i) - sl1[i] = &j - } - ch1 := make(chan *int64, 4) - for _, j := range sl1 { - ch1 <- j - } - var bs []byte - NewEncoderBytes(&bs, h).MustEncode(ch1) - // if !h.isBinary() { - // fmt.Printf("before: len(ch1): %v, bs: %s\n", len(ch1), bs) - // } - // var ch2 chan *int64 // this will block if json, etc. - ch2 := make(chan *int64, 8) - NewDecoderBytes(bs, h).MustDecode(&ch2) - // logT(t, "Len(ch2): %v", len(ch2)) - // fmt.Printf("after: len(ch2): %v, ch2: %v\n", len(ch2), ch2) - close(ch2) - var sl2 []*int64 - for j := range ch2 { - sl2 = append(sl2, j) - } - if err := deepEqual(sl1, sl2); err != nil { - logT(t, "Not Match: %v; len: %v, %v", err, len(sl1), len(sl2)) - failT(t) - } -} - -func testCodecRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs time.Duration, -) (port int) { - testOnce.Do(testInitAll) - if testSkipRPCTests { - return - } - // rpc needs EOF, which is sent via a panic, and so must be recovered. - if !recoverPanicToErr { - logT(t, "EXPECTED. set recoverPanicToErr=true, since rpc needs EOF") - t.FailNow() - } - srv := rpc.NewServer() - srv.Register(testRpcInt) - ln, err := net.Listen("tcp", "127.0.0.1:0") - // log("listener: %v", ln.Addr()) - checkErrT(t, err) - port = (ln.Addr().(*net.TCPAddr)).Port - // var opts *DecoderOptions - // opts := testDecOpts - // opts.MapType = mapStrIntfTyp - // opts.RawToString = false - serverExitChan := make(chan bool, 1) - var serverExitFlag uint64 = 0 - serverFn := func() { - for { - conn1, err1 := ln.Accept() - // if err1 != nil { - // //fmt.Printf("accept err1: %v\n", err1) - // continue - // } - if atomic.LoadUint64(&serverExitFlag) == 1 { - serverExitChan <- true - conn1.Close() - return // exit serverFn goroutine - } - if err1 == nil { - var sc rpc.ServerCodec = rr.ServerCodec(conn1, h) - srv.ServeCodec(sc) - } - } - } - - clientFn := func(cc rpc.ClientCodec) { - cl := rpc.NewClientWithCodec(cc) - defer cl.Close() - // defer func() { println("##### client closing"); cl.Close() }() - var up, sq, mult int - var rstr string - // log("Calling client") - checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up)) - // log("Called TestRpcInt.Update") - checkEqualT(t, testRpcInt.i, 5, "testRpcInt.i=5") - checkEqualT(t, up, 5, "up=5") - checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq)) - checkEqualT(t, sq, 25, "sq=25") - checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult)) - checkEqualT(t, mult, 100, "mult=100") - checkErrT(t, cl.Call("TestRpcInt.EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) - checkEqualT(t, rstr, fmt.Sprintf("%#v", TestABC{"Aa", "Bb", "Cc"}), "rstr=") - checkErrT(t, cl.Call("TestRpcInt.Echo123", []string{"A1", "B2", "C3"}, &rstr)) - checkEqualT(t, rstr, fmt.Sprintf("%#v", []string{"A1", "B2", "C3"}), "rstr=") - } - - connFn := func() (bs net.Conn) { - // log("calling f1") - bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String()) - //fmt.Printf("f1. bs: %v, err2: %v\n", bs, err2) - checkErrT(t, err2) - return - } - - exitFn := func() { - atomic.StoreUint64(&serverExitFlag, 1) - bs := connFn() - <-serverExitChan - bs.Close() - // serverExitChan <- true - } - - go serverFn() - runtime.Gosched() - //time.Sleep(100 * time.Millisecond) - if exitSleepMs == 0 { - defer ln.Close() - defer exitFn() - } - if doRequest { - bs := connFn() - cc := rr.ClientCodec(bs, h) - clientFn(cc) - } - if exitSleepMs != 0 { - go func() { - defer ln.Close() - time.Sleep(exitSleepMs) - exitFn() - }() - } - return -} - -// Comprehensive testing that generates data encoded from python handle (cbor, msgpack), -// and validates that our code can read and write it out accordingly. -// We keep this unexported here, and put actual test in ext_dep_test.go. -// This way, it can be excluded by excluding file completely. -func doTestPythonGenStreams(t *testing.T, name string, h Handle) { - logT(t, "TestPythonGenStreams-%v", name) - tmpdir, err := ioutil.TempDir("", "golang-"+name+"-test") - if err != nil { - logT(t, "-------- Unable to create temp directory\n") - t.FailNow() - } - defer os.RemoveAll(tmpdir) - logT(t, "tmpdir: %v", tmpdir) - cmd := exec.Command("python", "test.py", "testdata", tmpdir) - //cmd.Stdin = strings.NewReader("some input") - //cmd.Stdout = &out - var cmdout []byte - if cmdout, err = cmd.CombinedOutput(); err != nil { - logT(t, "-------- Error running test.py testdata. Err: %v", err) - logT(t, " %v", string(cmdout)) - t.FailNow() - } - - bh := h.getBasicHandle() - - oldMapType := bh.MapType - for i, v := range tablePythonVerify { - // if v == uint64(0) && h == testMsgpackH { - // v = int64(0) - // } - bh.MapType = oldMapType - //load up the golden file based on number - //decode it - //compare to in-mem object - //encode it again - //compare to output stream - logT(t, "..............................................") - logT(t, " Testing: #%d: %T, %#v\n", i, v, v) - var bss []byte - bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i)+"."+name+".golden")) - if err != nil { - logT(t, "-------- Error reading golden file: %d. Err: %v", i, err) - failT(t) - continue - } - bh.MapType = testMapStrIntfTyp - - var v1 interface{} - if err = testUnmarshal(&v1, bss, h); err != nil { - logT(t, "-------- Error decoding stream: %d: Err: %v", i, err) - failT(t) - continue - } - if v == skipVerifyVal { - continue - } - //no need to indirect, because we pass a nil ptr, so we already have the value - //if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() } - if err = deepEqual(v, v1); err == nil { - logT(t, "++++++++ Objects match: %T, %v", v, v) - } else { - logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1) - logT(t, "-------- GOLDEN: %#v", v) - // logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) - logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) - failT(t) - } - bsb, err := testMarshal(v1, h) - if err != nil { - logT(t, "Error encoding to stream: %d: Err: %v", i, err) - failT(t) - continue - } - if err = deepEqual(bsb, bss); err == nil { - logT(t, "++++++++ Bytes match") - } else { - logT(t, "???????? Bytes do not match. %v.", err) - xs := "--------" - if reflect.ValueOf(v).Kind() == reflect.Map { - xs = " " - logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs) - } else { - logT(t, "%s It's not a map. They should match.", xs) - failT(t) - } - logT(t, "%s FROM_FILE: %4d] %v", xs, len(bss), bss) - logT(t, "%s ENCODED: %4d] %v", xs, len(bsb), bsb) - } - } - bh.MapType = oldMapType -} - -// To test MsgpackSpecRpc, we test 3 scenarios: -// - Go Client to Go RPC Service (contained within TestMsgpackRpcSpec) -// - Go client to Python RPC Service (contained within doTestMsgpackRpcSpecGoClientToPythonSvc) -// - Python Client to Go RPC Service (contained within doTestMsgpackRpcSpecPythonClientToGoSvc) -// -// This allows us test the different calling conventions -// - Go Service requires only one argument -// - Python Service allows multiple arguments - -func doTestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { - if testSkipRPCTests { - return - } - openPort := "6789" - cmd := exec.Command("python", "test.py", "rpc-server", openPort, "2") - checkErrT(t, cmd.Start()) - time.Sleep(100 * time.Millisecond) // time for python rpc server to start - bs, err2 := net.Dial("tcp", ":"+openPort) - checkErrT(t, err2) - cc := MsgpackSpecRpc.ClientCodec(bs, testMsgpackH) - cl := rpc.NewClientWithCodec(cc) - defer cl.Close() - var rstr string - checkErrT(t, cl.Call("EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) - //checkEqualT(t, rstr, "{'A': 'Aa', 'B': 'Bb', 'C': 'Cc'}") - var mArgs MsgpackSpecRpcMultiArgs = []interface{}{"A1", "B2", "C3"} - checkErrT(t, cl.Call("Echo123", mArgs, &rstr)) - checkEqualT(t, rstr, "1:A1 2:B2 3:C3", "rstr=") -} - -func doTestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { - if testSkipRPCTests { - return - } - port := testCodecRpcOne(t, MsgpackSpecRpc, testMsgpackH, false, 1*time.Second) - //time.Sleep(1000 * time.Millisecond) - cmd := exec.Command("python", "test.py", "rpc-client-go-service", strconv.Itoa(port)) - var cmdout []byte - var err error - if cmdout, err = cmd.CombinedOutput(); err != nil { - logT(t, "-------- Error running test.py rpc-client-go-service. Err: %v", err) - logT(t, " %v", string(cmdout)) - t.FailNow() - } - checkEqualT(t, string(cmdout), - fmt.Sprintf("%#v\n%#v\n", []string{"A1", "B2", "C3"}, TestABC{"Aa", "Bb", "Cc"}), "cmdout=") -} - -func TestBincCodecsTable(t *testing.T) { - testCodecTableOne(t, testBincH) -} - -func TestBincCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testBincH) -} - -func TestBincCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testBincH) -} - -func TestSimpleCodecsTable(t *testing.T) { - testCodecTableOne(t, testSimpleH) -} - -func TestSimpleCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testSimpleH) -} - -func TestSimpleCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testSimpleH) -} - -func TestMsgpackCodecsTable(t *testing.T) { - testCodecTableOne(t, testMsgpackH) -} - -func TestMsgpackCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testMsgpackH) -} - -func TestMsgpackCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testMsgpackH) -} - -func TestCborCodecsTable(t *testing.T) { - testCodecTableOne(t, testCborH) -} - -func TestCborCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testCborH) -} - -func TestCborCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testCborH) -} - -func TestJsonCodecsTable(t *testing.T) { - testCodecTableOne(t, testJsonH) -} - -func TestJsonCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testJsonH) -} - -func TestJsonCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testJsonH) -} - -func TestJsonCodecChan(t *testing.T) { - testCodecChan(t, testJsonH) -} - -func TestCborCodecChan(t *testing.T) { - testCodecChan(t, testCborH) -} - -// ----- RPC ----- - -func TestBincRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testBincH, true, 0) -} - -func TestSimpleRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testSimpleH, true, 0) -} - -func TestMsgpackRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testMsgpackH, true, 0) -} - -func TestCborRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testCborH, true, 0) -} - -func TestJsonRpcGo(t *testing.T) { - testCodecRpcOne(t, GoRpc, testJsonH, true, 0) -} - -func TestMsgpackRpcSpec(t *testing.T) { - testCodecRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0) -} - -func TestBincUnderlyingType(t *testing.T) { - testCodecUnderlyingType(t, testBincH) -} - -// TODO: -// Add Tests for: -// - decoding empty list/map in stream into a nil slice/map -// - binary(M|Unm)arsher support for time.Time (e.g. cbor encoding) -// - text(M|Unm)arshaler support for time.Time (e.g. json encoding) -// - non fast-path scenarios e.g. map[string]uint16, []customStruct. -// Expand cbor to include indefinite length stuff for this non-fast-path types. -// This may not be necessary, since we have the manual tests (fastpathEnabled=false) to test/validate with. -// - CodecSelfer -// Ensure it is called when (en|de)coding interface{} or reflect.Value (2 different codepaths). -// - interfaces: textMarshaler, binaryMarshaler, codecSelfer -// - struct tags: -// on anonymous fields, _struct (all fields), etc -// - codecgen of struct containing channels. -// -// Cleanup tests: -// - The are brittle in their handling of validation and skipping diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md deleted file mode 100644 index 3ae8a056f9..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# codecgen tool - -Generate is given a list of *.go files to parse, and an output file (fout), -codecgen will create an output file __file.go__ which -contains `codec.Selfer` implementations for the named types found -in the files parsed. - -Using codecgen is very straightforward. - -**Download and install the tool** - -`go get -u github.com/ugorji/go/codec/codecgen` - -**Run the tool on your files** - -The command line format is: - -`codecgen [options] (-o outfile) (infile ...)` - -```sh -% codecgen -? -Usage of codecgen: - -c="github.com/ugorji/go/codec": codec path - -o="": out file - -r=".*": regex for type name to match - -rt="": tags for go run - -t="": build tag to put in file - -u=false: Use unsafe, e.g. to avoid unnecessary allocation on []byte->string - -x=false: keep temp file - -% codecgen -o values_codecgen.go values.go values2.go moretypedefs.go -``` - -Please see the [blog article](http://ugorji.net/blog/go-codecgen) -for more information on how to use the tool. - diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go deleted file mode 100644 index 892df598f8..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/gen.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// codecgen generates codec.Selfer implementations for a set of types. -package main - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "text/template" - "time" -) - -const genFrunMainTmpl = `//+build ignore - -package main -{{ if .Types }}import "{{ .ImportPath }}"{{ end }} -func main() { - {{ $.PackageName }}.CodecGenTempWrite{{ .RandString }}() -} -` - -// const genFrunPkgTmpl = `//+build codecgen -const genFrunPkgTmpl = ` -package {{ $.PackageName }} - -import ( - {{ if not .CodecPkgFiles }}{{ .CodecPkgName }} "{{ .CodecImportPath }}"{{ end }} -{{/* - {{ if .Types }}"{{ .ImportPath }}"{{ end }} - "io" -*/}} - "os" - "reflect" - "bytes" - "go/format" -) - -{{/* This is not used anymore. Remove it. -func write(w io.Writer, s string) { - if _, err := io.WriteString(w, s); err != nil { - panic(err) - } -} -*/}} - -func CodecGenTempWrite{{ .RandString }}() { - fout, err := os.Create("{{ .OutFile }}") - if err != nil { - panic(err) - } - defer fout.Close() - var out bytes.Buffer - - var typs []reflect.Type -{{ range $index, $element := .Types }} - var t{{ $index }} {{ . }} - typs = append(typs, reflect.TypeOf(t{{ $index }})) -{{ end }} - {{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}Gen(&out, "{{ .BuildTag }}", "{{ .PackageName }}", {{ .UseUnsafe }}, typs...) - bout, err := format.Source(out.Bytes()) - if err != nil { - fout.Write(out.Bytes()) - panic(err) - } - fout.Write(bout) -} - -` - -// Generate is given a list of *.go files to parse, and an output file (fout). -// -// It finds all types T in the files, and it creates 2 tmp files (frun). -// - main package file passed to 'go run' -// - package level file which calls *genRunner.Selfer to write Selfer impls for each T. -// We use a package level file so that it can reference unexported types in the package being worked on. -// Tool then executes: "go run __frun__" which creates fout. -// fout contains Codec(En|De)codeSelf implementations for every type T. -// -func Generate(outfile, buildTag, codecPkgPath string, useUnsafe bool, goRunTag string, - regexName *regexp.Regexp, deleteTempFile bool, infiles ...string) (err error) { - // For each file, grab AST, find each type, and write a call to it. - if len(infiles) == 0 { - return - } - if outfile == "" || codecPkgPath == "" { - err = errors.New("outfile and codec package path cannot be blank") - return - } - // We have to parse dir for package, before opening the temp file for writing (else ImportDir fails). - // Also, ImportDir(...) must take an absolute path. - lastdir := filepath.Dir(outfile) - absdir, err := filepath.Abs(lastdir) - if err != nil { - return - } - pkg, err := build.Default.ImportDir(absdir, build.AllowBinary) - if err != nil { - return - } - type tmplT struct { - CodecPkgName string - CodecImportPath string - ImportPath string - OutFile string - PackageName string - RandString string - BuildTag string - Types []string - CodecPkgFiles bool - UseUnsafe bool - } - tv := tmplT{ - CodecPkgName: "codec1978", - OutFile: outfile, - CodecImportPath: codecPkgPath, - BuildTag: buildTag, - UseUnsafe: useUnsafe, - RandString: strconv.FormatInt(time.Now().UnixNano(), 10), - } - tv.ImportPath = pkg.ImportPath - if tv.ImportPath == tv.CodecImportPath { - tv.CodecPkgFiles = true - tv.CodecPkgName = "codec" - } - astfiles := make([]*ast.File, len(infiles)) - for i, infile := range infiles { - if filepath.Dir(infile) != lastdir { - err = errors.New("in files must all be in same directory as outfile") - return - } - fset := token.NewFileSet() - astfiles[i], err = parser.ParseFile(fset, infile, nil, 0) - if err != nil { - return - } - if i == 0 { - tv.PackageName = astfiles[i].Name.Name - if tv.PackageName == "main" { - // codecgen cannot be run on types in the 'main' package. - // A temporary 'main' package must be created, and should reference the fully built - // package containing the types. - // Also, the temporary main package will conflict with the main package which already has a main method. - err = errors.New("codecgen cannot be run on types in the 'main' package") - return - } - } - } - - for _, f := range astfiles { - for _, d := range f.Decls { - if gd, ok := d.(*ast.GenDecl); ok { - for _, dd := range gd.Specs { - if td, ok := dd.(*ast.TypeSpec); ok { - // if len(td.Name.Name) == 0 || td.Name.Name[0] > 'Z' || td.Name.Name[0] < 'A' { - if len(td.Name.Name) == 0 { - continue - } - - // only generate for: - // struct: StructType - // primitives (numbers, bool, string): Ident - // map: MapType - // slice, array: ArrayType - // chan: ChanType - // do not generate: - // FuncType, InterfaceType, StarExpr (ptr), etc - switch td.Type.(type) { - case *ast.StructType, *ast.Ident, *ast.MapType, *ast.ArrayType, *ast.ChanType: - if regexName.FindStringIndex(td.Name.Name) != nil { - tv.Types = append(tv.Types, td.Name.Name) - } - } - } - } - } - } - } - - if len(tv.Types) == 0 { - return - } - - // we cannot use ioutil.TempFile, because we cannot guarantee the file suffix (.go). - // Also, we cannot create file in temp directory, - // because go run will not work (as it needs to see the types here). - // Consequently, create the temp file in the current directory, and remove when done. - - // frun, err = ioutil.TempFile("", "codecgen-") - // frunName := filepath.Join(os.TempDir(), "codecgen-"+strconv.FormatInt(time.Now().UnixNano(), 10)+".go") - - frunMainName := "codecgen-main-" + tv.RandString + ".generated.go" - frunPkgName := "codecgen-pkg-" + tv.RandString + ".generated.go" - if deleteTempFile { - defer os.Remove(frunMainName) - defer os.Remove(frunPkgName) - } - // var frunMain, frunPkg *os.File - if _, err = gen1(frunMainName, genFrunMainTmpl, &tv); err != nil { - return - } - if _, err = gen1(frunPkgName, genFrunPkgTmpl, &tv); err != nil { - return - } - - // remove outfile, so "go run ..." will not think that types in outfile already exist. - os.Remove(outfile) - - // execute go run frun - cmd := exec.Command("go", "run", "-tags="+goRunTag, frunMainName) //, frunPkg.Name()) - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - if err = cmd.Run(); err != nil { - err = fmt.Errorf("error running 'go run %s': %v, console: %s", - frunMainName, err, buf.Bytes()) - return - } - os.Stdout.Write(buf.Bytes()) - return -} - -func gen1(frunName, tmplStr string, tv interface{}) (frun *os.File, err error) { - os.Remove(frunName) - if frun, err = os.Create(frunName); err != nil { - return - } - defer frun.Close() - - t := template.New("") - if t, err = t.Parse(tmplStr); err != nil { - return - } - bw := bufio.NewWriter(frun) - if err = t.Execute(bw, tv); err != nil { - return - } - if err = bw.Flush(); err != nil { - return - } - return -} - -func main() { - o := flag.String("o", "", "out file") - c := flag.String("c", genCodecPath, "codec path") - t := flag.String("t", "", "build tag to put in file") - r := flag.String("r", ".*", "regex for type name to match") - rt := flag.String("rt", "", "tags for go run") - x := flag.Bool("x", false, "keep temp file") - u := flag.Bool("u", false, "Use unsafe, e.g. to avoid unnecessary allocation on []byte->string") - - flag.Parse() - if err := Generate(*o, *t, *c, *u, *rt, - regexp.MustCompile(*r), !*x, flag.Args()...); err != nil { - fmt.Fprintf(os.Stderr, "codecgen error: %v\n", err) - os.Exit(1) - } -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go deleted file mode 100644 index e120a4eb9e..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen/z.go +++ /dev/null @@ -1,3 +0,0 @@ -package main - -const genCodecPath = "github.com/ugorji/go/codec" diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go deleted file mode 100644 index 2fdfd161d1..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/codecgen_test.go +++ /dev/null @@ -1,22 +0,0 @@ -//+build x,codecgen - -package codec - -import ( - "fmt" - "testing" -) - -func TestCodecgenJson1(t *testing.T) { - const callCodecgenDirect bool = true - v := newTestStruc(2, false, !testSkipIntf, false) - var bs []byte - e := NewEncoderBytes(&bs, testJsonH) - if callCodecgenDirect { - v.CodecEncodeSelf(e) - e.w.atEndOfEncode() - } else { - e.MustEncode(v) - } - fmt.Printf("%s\n", bs) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go deleted file mode 100644 index 685c576c4e..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// All non-std package dependencies related to testing live in this file, -// so porting to different environment is easy (just update functions). -// -// This file sets up the variables used, including testInitFns. -// Each file should add initialization that should be performed -// after flags are parsed. -// -// init is a multi-step process: -// - setup vars (handled by init functions in each file) -// - parse flags -// - setup derived vars (handled by pre-init registered functions - registered in init function) -// - post init (handled by post-init registered functions - registered in init function) -// This way, no one has to manage carefully control the initialization -// using file names, etc. -// -// Tests which require external dependencies need the -tag=x parameter. -// They should be run as: -// go test -tags=x -run=. -// Benchmarks should also take this parameter, to include the sereal, xdr, etc. -// To run against codecgen, etc, make sure you pass extra parameters. -// Example usage: -// go test "-tags=x codecgen unsafe" -bench=. -// -// To fully test everything: -// go test -tags=x -benchtime=100ms -tv -bg -bi -brw -bu -v -run=. -bench=. - -import ( - "errors" - "flag" - "fmt" - "reflect" - "sync" - "testing" -) - -const ( - testLogToT = true - failNowOnFail = true -) - -var ( - testNoopH = NoopHandle(8) - testMsgpackH = &MsgpackHandle{} - testBincH = &BincHandle{} - testBincHNoSym = &BincHandle{} - testBincHSym = &BincHandle{} - testSimpleH = &SimpleHandle{} - testCborH = &CborHandle{} - testJsonH = &JsonHandle{} - - testPreInitFns []func() - testPostInitFns []func() - - testOnce sync.Once -) - -func init() { - testBincHSym.AsSymbols = AsSymbolAll - testBincHNoSym.AsSymbols = AsSymbolNone -} - -func testInitAll() { - flag.Parse() - for _, f := range testPreInitFns { - f() - } - for _, f := range testPostInitFns { - f() - } -} - -func logT(x interface{}, format string, args ...interface{}) { - if t, ok := x.(*testing.T); ok && t != nil && testLogToT { - if testVerbose { - t.Logf(format, args...) - } - } else if b, ok := x.(*testing.B); ok && b != nil && testLogToT { - b.Logf(format, args...) - } else { - if len(format) == 0 || format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Printf(format, args...) - } -} - -func approxDataSize(rv reflect.Value) (sum int) { - switch rk := rv.Kind(); rk { - case reflect.Invalid: - case reflect.Ptr, reflect.Interface: - sum += int(rv.Type().Size()) - sum += approxDataSize(rv.Elem()) - case reflect.Slice: - sum += int(rv.Type().Size()) - for j := 0; j < rv.Len(); j++ { - sum += approxDataSize(rv.Index(j)) - } - case reflect.String: - sum += int(rv.Type().Size()) - sum += rv.Len() - case reflect.Map: - sum += int(rv.Type().Size()) - for _, mk := range rv.MapKeys() { - sum += approxDataSize(mk) - sum += approxDataSize(rv.MapIndex(mk)) - } - case reflect.Struct: - //struct size already includes the full data size. - //sum += int(rv.Type().Size()) - for j := 0; j < rv.NumField(); j++ { - sum += approxDataSize(rv.Field(j)) - } - default: - //pure value types - sum += int(rv.Type().Size()) - } - return -} - -// ----- functions below are used only by tests (not benchmarks) - -func checkErrT(t *testing.T, err error) { - if err != nil { - logT(t, err.Error()) - failT(t) - } -} - -func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) { - if err = deepEqual(v1, v2); err != nil { - logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2) - failT(t) - } - return -} - -func failT(t *testing.T) { - if failNowOnFail { - t.FailNow() - } else { - t.Fail() - } -} - -func deepEqual(v1, v2 interface{}) (err error) { - if !reflect.DeepEqual(v1, v2) { - err = errors.New("Not Match") - } - return -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/py_test.go b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/py_test.go deleted file mode 100644 index be0374c990..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/py_test.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build x - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// These tests are used to verify msgpack and cbor implementations against their python libraries. -// If you have the library installed, you can enable the tests back by removing the //+build ignore. - -import ( - "testing" -) - -func TestMsgpackPythonGenStreams(t *testing.T) { - doTestPythonGenStreams(t, "msgpack", testMsgpackH) -} - -func TestCborPythonGenStreams(t *testing.T) { - doTestPythonGenStreams(t, "cbor", testCborH) -} - -func TestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { - doTestMsgpackRpcSpecGoClientToPythonSvc(t) -} - -func TestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { - doTestMsgpackRpcSpecPythonClientToGoSvc(t) -} diff --git a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go b/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go deleted file mode 100644 index 4ec28e131c..0000000000 --- a/libnetwork/Godeps/_workspace/src/github.com/ugorji/go/codec/values_test.go +++ /dev/null @@ -1,203 +0,0 @@ -// // +build testing - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// This file contains values used by tests and benchmarks. -// JSON/BSON do not like maps with keys that are not strings, -// so we only use maps with string keys here. - -import ( - "math" - "time" -) - -var testStrucTime = time.Date(2012, 2, 2, 2, 2, 2, 2000, time.UTC).UTC() - -type AnonInTestStruc struct { - AS string - AI64 int64 - AI16 int16 - AUi64 uint64 - ASslice []string - AI64slice []int64 - AF64slice []float64 - // AMI32U32 map[int32]uint32 - // AMU32F64 map[uint32]float64 // json/bson do not like it - AMSU16 map[string]uint16 -} - -type AnonInTestStrucIntf struct { - Islice []interface{} - Ms map[string]interface{} - Nintf interface{} //don't set this, so we can test for nil - T time.Time -} - -type TestStruc struct { - _struct struct{} `codec:",omitempty"` //set omitempty for every field - - S string - I64 int64 - I16 int16 - Ui64 uint64 - Ui8 uint8 - B bool - By uint8 // byte: msgp doesn't like byte - - Sslice []string - I64slice []int64 - I16slice []int16 - Ui64slice []uint64 - Ui8slice []uint8 - Bslice []bool - Byslice []byte - - Iptrslice []*int64 - - // TODO: test these separately, specifically for reflection and codecgen. - // Unfortunately, ffjson doesn't support these. Its compilation even fails. - // Ui64array [4]uint64 - // Ui64slicearray [][4]uint64 - - AnonInTestStruc - - //M map[interface{}]interface{} `json:"-",bson:"-"` - Msi64 map[string]int64 - - // make this a ptr, so that it could be set or not. - // for comparison (e.g. with msgp), give it a struct tag (so it is not inlined), - // make this one omitempty (so it is included if nil). - *AnonInTestStrucIntf `codec:",omitempty"` - - Nmap map[string]bool //don't set this, so we can test for nil - Nslice []byte //don't set this, so we can test for nil - Nint64 *int64 //don't set this, so we can test for nil - Mtsptr map[string]*TestStruc - Mts map[string]TestStruc - Its []*TestStruc - Nteststruc *TestStruc -} - -// small struct for testing that codecgen works for unexported types -type tLowerFirstLetter struct { - I int - u uint64 - S string - b []byte -} - -func newTestStruc(depth int, bench bool, useInterface, useStringKeyOnly bool) (ts *TestStruc) { - var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464 - - ts = &TestStruc{ - S: "some string", - I64: math.MaxInt64 * 2 / 3, // 64, - I16: 1616, - Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it - Ui8: 160, - B: true, - By: 5, - - Sslice: []string{"one", "two", "three"}, - I64slice: []int64{1111, 2222, 3333}, - I16slice: []int16{44, 55, 66}, - Ui64slice: []uint64{12121212, 34343434, 56565656}, - Ui8slice: []uint8{210, 211, 212}, - Bslice: []bool{true, false, true, false}, - Byslice: []byte{13, 14, 15}, - - Msi64: map[string]int64{ - "one": 1, - "two": 2, - }, - AnonInTestStruc: AnonInTestStruc{ - // There's more leeway in altering this. - AS: "A-String", - AI64: -64646464, - AI16: 1616, - AUi64: 64646464, - // (U+1D11E)G-clef character may be represented in json as "\uD834\uDD1E". - // single reverse solidus character may be represented in json as "\u005C". - // include these in ASslice below. - ASslice: []string{"Aone", "Atwo", "Athree", - "Afour.reverse_solidus.\u005c", "Afive.Gclef.\U0001d11E"}, - AI64slice: []int64{1, -22, 333, -4444, 55555, -666666}, - AMSU16: map[string]uint16{"1": 1, "22": 2, "333": 3, "4444": 4}, - AF64slice: []float64{11.11e-11, 22.22E+22, 33.33E-33, 44.44e+44, 555.55E-6, 666.66E6}, - }, - } - if useInterface { - ts.AnonInTestStrucIntf = &AnonInTestStrucIntf{ - Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)}, - Ms: map[string]interface{}{ - "true": "true", - "int64(9)": false, - }, - T: testStrucTime, - } - } - - //For benchmarks, some things will not work. - if !bench { - //json and bson require string keys in maps - //ts.M = map[interface{}]interface{}{ - // true: "true", - // int8(9): false, - //} - //gob cannot encode nil in element in array (encodeArray: nil element) - ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil} - // ts.Iptrslice = nil - } - if !useStringKeyOnly { - // ts.AnonInTestStruc.AMU32F64 = map[uint32]float64{1: 1, 2: 2, 3: 3} // Json/Bson barf - } - if depth > 0 { - depth-- - if ts.Mtsptr == nil { - ts.Mtsptr = make(map[string]*TestStruc) - } - if ts.Mts == nil { - ts.Mts = make(map[string]TestStruc) - } - ts.Mtsptr["0"] = newTestStruc(depth, bench, useInterface, useStringKeyOnly) - ts.Mts["0"] = *(ts.Mtsptr["0"]) - ts.Its = append(ts.Its, ts.Mtsptr["0"]) - } - return -} - -// Some other types - -type Sstring string -type Bbool bool -type Sstructsmall struct { - A int -} - -type Sstructbig struct { - A int - B bool - c string - // Sval Sstruct - Ssmallptr *Sstructsmall - Ssmall *Sstructsmall - Sptr *Sstructbig -} - -type SstructbigMapBySlice struct { - _struct struct{} `codec:",toarray"` - A int - B bool - c string - // Sval Sstruct - Ssmallptr *Sstructsmall - Ssmall *Sstructsmall - Sptr *Sstructbig -} - -type Sinterface interface { - Noop() -} diff --git a/libnetwork/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/libnetwork/Godeps/_workspace/src/golang.org/x/net/context/context_test.go deleted file mode 100644 index 82d2494a49..0000000000 --- a/libnetwork/Godeps/_workspace/src/golang.org/x/net/context/context_test.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "fmt" - "math/rand" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -// otherContext is a Context that's not one of the types defined in context.go. -// This lets us test code paths that differ based on the underlying type of the -// Context. -type otherContext struct { - Context -} - -func TestBackground(t *testing.T) { - c := Background() - if c == nil { - t.Fatalf("Background returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.Background"; got != want { - t.Errorf("Background().String() = %q want %q", got, want) - } -} - -func TestTODO(t *testing.T) { - c := TODO() - if c == nil { - t.Fatalf("TODO returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.TODO"; got != want { - t.Errorf("TODO().String() = %q want %q", got, want) - } -} - -func TestWithCancel(t *testing.T) { - c1, cancel := WithCancel(Background()) - - if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { - t.Errorf("c1.String() = %q want %q", got, want) - } - - o := otherContext{c1} - c2, _ := WithCancel(o) - contexts := []Context{c1, o, c2} - - for i, c := range contexts { - if d := c.Done(); d == nil { - t.Errorf("c[%d].Done() == %v want non-nil", i, d) - } - if e := c.Err(); e != nil { - t.Errorf("c[%d].Err() == %v want nil", i, e) - } - - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - } - - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - - for i, c := range contexts { - select { - case <-c.Done(): - default: - t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) - } - if e := c.Err(); e != Canceled { - t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) - } - } -} - -func TestParentFinishesChild(t *testing.T) { - // Context tree: - // parent -> cancelChild - // parent -> valueChild -> timerChild - parent, cancel := WithCancel(Background()) - cancelChild, stop := WithCancel(parent) - defer stop() - valueChild := WithValue(parent, "key", "value") - timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) - defer stop() - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-cancelChild.Done(): - t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) - case x := <-timerChild.Done(): - t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) - case x := <-valueChild.Done(): - t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) - default: - } - - // The parent's children should contain the two cancelable children. - pc := parent.(*cancelCtx) - cc := cancelChild.(*cancelCtx) - tc := timerChild.(*timerCtx) - pc.mu.Lock() - if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { - t.Errorf("bad linkage: pc.children = %v, want %v and %v", - pc.children, cc, tc) - } - pc.mu.Unlock() - - if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) - } - if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) - } - - cancel() - - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) - } - pc.mu.Unlock() - - // parent and children should all be finished. - check := func(ctx Context, name string) { - select { - case <-ctx.Done(): - default: - t.Errorf("<-%s.Done() blocked, but shouldn't have", name) - } - if e := ctx.Err(); e != Canceled { - t.Errorf("%s.Err() == %v want %v", name, e, Canceled) - } - } - check(parent, "parent") - check(cancelChild, "cancelChild") - check(valueChild, "valueChild") - check(timerChild, "timerChild") - - // WithCancel should return a canceled context on a canceled parent. - precanceledChild := WithValue(parent, "key", "value") - select { - case <-precanceledChild.Done(): - default: - t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") - } - if e := precanceledChild.Err(); e != Canceled { - t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) - } -} - -func TestChildFinishesFirst(t *testing.T) { - cancelable, stop := WithCancel(Background()) - defer stop() - for _, parent := range []Context{Background(), cancelable} { - child, cancel := WithCancel(parent) - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-child.Done(): - t.Errorf("<-child.Done() == %v want nothing (it should block)", x) - default: - } - - cc := child.(*cancelCtx) - pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() - if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { - t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) - } - - if pcok { - pc.mu.Lock() - if len(pc.children) != 1 || !pc.children[cc] { - t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) - } - pc.mu.Unlock() - } - - cancel() - - if pcok { - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) - } - pc.mu.Unlock() - } - - // child should be finished. - select { - case <-child.Done(): - default: - t.Errorf("<-child.Done() blocked, but shouldn't have") - } - if e := child.Err(); e != Canceled { - t.Errorf("child.Err() == %v want %v", e, Canceled) - } - - // parent should not be finished. - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - default: - } - if e := parent.Err(); e != nil { - t.Errorf("parent.Err() == %v want nil", e) - } - } -} - -func testDeadline(c Context, wait time.Duration, t *testing.T) { - select { - case <-time.After(wait): - t.Fatalf("context should have timed out") - case <-c.Done(): - } - if e := c.Err(); e != DeadlineExceeded { - t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) - } -} - -func TestDeadline(t *testing.T) { - c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 100*time.Millisecond) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o = otherContext{c} - c, _ = WithTimeout(o, 300*time.Millisecond) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestCanceledTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 200*time.Millisecond) - o := otherContext{c} - c, cancel := WithTimeout(o, 400*time.Millisecond) - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - select { - case <-c.Done(): - default: - t.Errorf("<-c.Done() blocked, but shouldn't have") - } - if e := c.Err(); e != Canceled { - t.Errorf("c.Err() == %v want %v", e, Canceled) - } -} - -type key1 int -type key2 int - -var k1 = key1(1) -var k2 = key2(1) // same int as k1, different type -var k3 = key2(3) // same type as k2, different int - -func TestValues(t *testing.T) { - check := func(c Context, nm, v1, v2, v3 string) { - if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { - t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) - } - if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { - t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) - } - if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { - t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) - } - } - - c0 := Background() - check(c0, "c0", "", "", "") - - c1 := WithValue(Background(), k1, "c1k1") - check(c1, "c1", "c1k1", "", "") - - if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { - t.Errorf("c.String() = %q want %q", got, want) - } - - c2 := WithValue(c1, k2, "c2k2") - check(c2, "c2", "c1k1", "c2k2", "") - - c3 := WithValue(c2, k3, "c3k3") - check(c3, "c2", "c1k1", "c2k2", "c3k3") - - c4 := WithValue(c3, k1, nil) - check(c4, "c4", "", "c2k2", "c3k3") - - o0 := otherContext{Background()} - check(o0, "o0", "", "", "") - - o1 := otherContext{WithValue(Background(), k1, "c1k1")} - check(o1, "o1", "c1k1", "", "") - - o2 := WithValue(o1, k2, "o2k2") - check(o2, "o2", "c1k1", "o2k2", "") - - o3 := otherContext{c4} - check(o3, "o3", "", "c2k2", "c3k3") - - o4 := WithValue(o3, k3, nil) - check(o4, "o4", "", "c2k2", "") -} - -func TestAllocs(t *testing.T) { - bg := Background() - for _, test := range []struct { - desc string - f func() - limit float64 - gccgoLimit float64 - }{ - { - desc: "Background()", - f: func() { Background() }, - limit: 0, - gccgoLimit: 0, - }, - { - desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), - f: func() { - c := WithValue(bg, k1, nil) - c.Value(k1) - }, - limit: 3, - gccgoLimit: 3, - }, - { - desc: "WithTimeout(bg, 15*time.Millisecond)", - f: func() { - c, _ := WithTimeout(bg, 15*time.Millisecond) - <-c.Done() - }, - limit: 8, - gccgoLimit: 13, - }, - { - desc: "WithCancel(bg)", - f: func() { - c, cancel := WithCancel(bg) - cancel() - <-c.Done() - }, - limit: 5, - gccgoLimit: 8, - }, - { - desc: "WithTimeout(bg, 100*time.Millisecond)", - f: func() { - c, cancel := WithTimeout(bg, 100*time.Millisecond) - cancel() - <-c.Done() - }, - limit: 8, - gccgoLimit: 25, - }, - } { - limit := test.limit - if runtime.Compiler == "gccgo" { - // gccgo does not yet do escape analysis. - // TOOD(iant): Remove this when gccgo does do escape analysis. - limit = test.gccgoLimit - } - if n := testing.AllocsPerRun(100, test.f); n > limit { - t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) - } - } -} - -func TestSimultaneousCancels(t *testing.T) { - root, cancel := WithCancel(Background()) - m := map[Context]CancelFunc{root: cancel} - q := []Context{root} - // Create a tree of contexts. - for len(q) != 0 && len(m) < 100 { - parent := q[0] - q = q[1:] - for i := 0; i < 4; i++ { - ctx, cancel := WithCancel(parent) - m[ctx] = cancel - q = append(q, ctx) - } - } - // Start all the cancels in a random order. - var wg sync.WaitGroup - wg.Add(len(m)) - for _, cancel := range m { - go func(cancel CancelFunc) { - cancel() - wg.Done() - }(cancel) - } - // Wait on all the contexts in a random order. - for ctx := range m { - select { - case <-ctx.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) - } - } - // Wait for all the cancel functions to return. - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) - } -} - -func TestInterlockedCancels(t *testing.T) { - parent, cancelParent := WithCancel(Background()) - child, cancelChild := WithCancel(parent) - go func() { - parent.Done() - cancelChild() - }() - cancelParent() - select { - case <-child.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) - } -} - -func TestLayersCancel(t *testing.T) { - testLayers(t, time.Now().UnixNano(), false) -} - -func TestLayersTimeout(t *testing.T) { - testLayers(t, time.Now().UnixNano(), true) -} - -func testLayers(t *testing.T, seed int64, testTimeout bool) { - rand.Seed(seed) - errorf := func(format string, a ...interface{}) { - t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) - } - const ( - timeout = 200 * time.Millisecond - minLayers = 30 - ) - type value int - var ( - vals []*value - cancels []CancelFunc - numTimers int - ctx = Background() - ) - for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { - switch rand.Intn(3) { - case 0: - v := new(value) - ctx = WithValue(ctx, v, v) - vals = append(vals, v) - case 1: - var cancel CancelFunc - ctx, cancel = WithCancel(ctx) - cancels = append(cancels, cancel) - case 2: - var cancel CancelFunc - ctx, cancel = WithTimeout(ctx, timeout) - cancels = append(cancels, cancel) - numTimers++ - } - } - checkValues := func(when string) { - for _, key := range vals { - if val := ctx.Value(key).(*value); key != val { - errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) - } - } - } - select { - case <-ctx.Done(): - errorf("ctx should not be canceled yet") - default: - } - if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { - t.Errorf("ctx.String() = %q want prefix %q", s, prefix) - } - t.Log(ctx) - checkValues("before cancel") - if testTimeout { - select { - case <-ctx.Done(): - case <-time.After(timeout + timeout/10): - errorf("ctx should have timed out") - } - checkValues("after timeout") - } else { - cancel := cancels[rand.Intn(len(cancels))] - cancel() - select { - case <-ctx.Done(): - default: - errorf("ctx should be canceled") - } - checkValues("after cancel") - } -} diff --git a/libnetwork/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/libnetwork/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go deleted file mode 100644 index a6754dc368..0000000000 --- a/libnetwork/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context_test - -import ( - "fmt" - "time" - - "golang.org/x/net/context" -) - -func ExampleWithTimeout() { - // Pass a context with a timeout to tell a blocking function that it - // should abandon its work after the timeout elapses. - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - select { - case <-time.After(200 * time.Millisecond): - fmt.Println("overslept") - case <-ctx.Done(): - fmt.Println(ctx.Err()) // prints "context deadline exceeded" - } - // Output: - // context deadline exceeded -}