Godeps update on libcontainer
Signed-off-by: Madhu Venugopal <madhu@docker.com>
This commit is contained in:
parent
52bb21b7bd
commit
9ea1f56cdf
236 changed files with 38852 additions and 0 deletions
47
libnetwork/Godeps/Godeps.json
generated
47
libnetwork/Godeps/Godeps.json
generated
|
@ -132,6 +132,53 @@
|
|||
{
|
||||
"ImportPath": "github.com/vishvananda/netlink",
|
||||
"Rev": "4b5dce31de6d42af5bb9811c6d265472199e0fec"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer",
|
||||
"Comment": "v0.0.3",
|
||||
"Rev": "072fa6fdccaba49b11ba91ad4265b1ec1043787e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/dbus",
|
||||
"Comment": "v3",
|
||||
"Rev": "be94bc700879ae8217780e9d141789a2defa302b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/util",
|
||||
"Comment": "v3",
|
||||
"Rev": "be94bc700879ae8217780e9d141789a2defa302b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||
"Comment": "v1.4.1-4127-gb81f2ee",
|
||||
"Rev": "b81f2ee5f20d094c13893f565ce716595c539d22"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/symlink",
|
||||
"Comment": "v1.4.1-4127-gb81f2ee",
|
||||
"Rev": "b81f2ee5f20d094c13893f565ce716595c539d22"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term",
|
||||
"Comment": "v1.4.1-4127-gb81f2ee",
|
||||
"Rev": "b81f2ee5f20d094c13893f565ce716595c539d22"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/units",
|
||||
"Comment": "v1.4.1-4127-gb81f2ee",
|
||||
"Rev": "b81f2ee5f20d094c13893f565ce716595c539d22"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "f7137ae6b19afbfd61a94b746fda3b3fe0491874"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/specs",
|
||||
"Rev": "5b31bb2b7771e5074a4eb14eca432da1ca5182d6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||
"Rev": "e55e5833692b49e49a0073ad5baf7803f21bebf4"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
163
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go
generated
vendored
Normal file
163
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
const (
|
||||
alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
|
||||
num = `0123456789`
|
||||
alphanum = alpha + num
|
||||
signalBuffer = 100
|
||||
)
|
||||
|
||||
// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
|
||||
func needsEscape(i int, b byte) bool {
|
||||
// Escape everything that is not a-z-A-Z-0-9
|
||||
// Also escape 0-9 if it's the first character
|
||||
return strings.IndexByte(alphanum, b) == -1 ||
|
||||
(i == 0 && strings.IndexByte(num, b) != -1)
|
||||
}
|
||||
|
||||
// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
|
||||
// rules that systemd uses for serializing special characters.
|
||||
func PathBusEscape(path string) string {
|
||||
// Special case the empty string
|
||||
if len(path) == 0 {
|
||||
return "_"
|
||||
}
|
||||
n := []byte{}
|
||||
for i := 0; i < len(path); i++ {
|
||||
c := path[i]
|
||||
if needsEscape(i, c) {
|
||||
e := fmt.Sprintf("_%x", c)
|
||||
n = append(n, []byte(e)...)
|
||||
} else {
|
||||
n = append(n, c)
|
||||
}
|
||||
}
|
||||
return string(n)
|
||||
}
|
||||
|
||||
// Conn is a connection to systemd's dbus endpoint.
|
||||
type Conn struct {
|
||||
// sysconn/sysobj are only used to call dbus methods
|
||||
sysconn *dbus.Conn
|
||||
sysobj *dbus.Object
|
||||
|
||||
// sigconn/sigobj are only used to receive dbus signals
|
||||
sigconn *dbus.Conn
|
||||
sigobj *dbus.Object
|
||||
|
||||
jobListener struct {
|
||||
jobs map[dbus.ObjectPath]chan<- string
|
||||
sync.Mutex
|
||||
}
|
||||
subscriber struct {
|
||||
updateCh chan<- *SubStateUpdate
|
||||
errCh chan<- error
|
||||
sync.Mutex
|
||||
ignore map[dbus.ObjectPath]int64
|
||||
cleanIgnore int64
|
||||
}
|
||||
}
|
||||
|
||||
// New establishes a connection to the system bus and authenticates.
|
||||
// Callers should call Close() when done with the connection.
|
||||
func New() (*Conn, error) {
|
||||
return newConnection(dbus.SystemBusPrivate)
|
||||
}
|
||||
|
||||
// NewUserConnection establishes a connection to the session bus and
|
||||
// authenticates. This can be used to connect to systemd user instances.
|
||||
// Callers should call Close() when done with the connection.
|
||||
func NewUserConnection() (*Conn, error) {
|
||||
return newConnection(dbus.SessionBusPrivate)
|
||||
}
|
||||
|
||||
// Close closes an established connection
|
||||
func (c *Conn) Close() {
|
||||
c.sysconn.Close()
|
||||
c.sigconn.Close()
|
||||
}
|
||||
|
||||
func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) {
|
||||
sysconn, err := dbusConnection(createBus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sigconn, err := dbusConnection(createBus)
|
||||
if err != nil {
|
||||
sysconn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Conn{
|
||||
sysconn: sysconn,
|
||||
sysobj: systemdObject(sysconn),
|
||||
sigconn: sigconn,
|
||||
sigobj: systemdObject(sigconn),
|
||||
}
|
||||
|
||||
c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
|
||||
c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
|
||||
|
||||
// Setup the listeners on jobs so that we can get completions
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
|
||||
|
||||
c.dispatch()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func dbusConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
|
||||
conn, err := createBus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only use EXTERNAL method, and hardcode the uid (not username)
|
||||
// to avoid a username lookup (which requires a dynamically linked
|
||||
// libc)
|
||||
methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
|
||||
|
||||
err = conn.Auth(methods)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = conn.Hello()
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func systemdObject(conn *dbus.Conn) *dbus.Object {
|
||||
return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
|
||||
}
|
77
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus_test.go
generated
vendored
Normal file
77
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus_test.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNeedsEscape(t *testing.T) {
|
||||
// Anything not 0-9a-zA-Z should always be escaped
|
||||
for want, vals := range map[bool][]byte{
|
||||
false: []byte{'a', 'b', 'z', 'A', 'Q', '1', '4', '9'},
|
||||
true: []byte{'#', '%', '$', '!', '.', '_', '-', '%', '\\'},
|
||||
} {
|
||||
for i := 1; i < 10; i++ {
|
||||
for _, b := range vals {
|
||||
got := needsEscape(i, b)
|
||||
if got != want {
|
||||
t.Errorf("needsEscape(%d, %c) returned %t, want %t", i, b, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 0-9 in position 0 should be escaped
|
||||
for want, vals := range map[bool][]byte{
|
||||
false: []byte{'A', 'a', 'e', 'x', 'Q', 'Z'},
|
||||
true: []byte{'0', '4', '5', '9'},
|
||||
} {
|
||||
for _, b := range vals {
|
||||
got := needsEscape(0, b)
|
||||
if got != want {
|
||||
t.Errorf("needsEscape(0, %c) returned %t, want %t", b, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPathBusEscape(t *testing.T) {
|
||||
for in, want := range map[string]string{
|
||||
"": "_",
|
||||
"foo.service": "foo_2eservice",
|
||||
"foobar": "foobar",
|
||||
"woof@woof.service": "woof_40woof_2eservice",
|
||||
"0123456": "_30123456",
|
||||
"account_db.service": "account_5fdb_2eservice",
|
||||
"got-dashes": "got_2ddashes",
|
||||
} {
|
||||
got := PathBusEscape(in)
|
||||
if got != want {
|
||||
t.Errorf("bad result for PathBusEscape(%s): got %q, want %q", in, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestNew ensures that New() works without errors.
|
||||
func TestNew(t *testing.T) {
|
||||
_, err := New()
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
410
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go
generated
vendored
Normal file
410
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go
generated
vendored
Normal file
|
@ -0,0 +1,410 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
func (c *Conn) jobComplete(signal *dbus.Signal) {
|
||||
var id uint32
|
||||
var job dbus.ObjectPath
|
||||
var unit string
|
||||
var result string
|
||||
dbus.Store(signal.Body, &id, &job, &unit, &result)
|
||||
c.jobListener.Lock()
|
||||
out, ok := c.jobListener.jobs[job]
|
||||
if ok {
|
||||
out <- result
|
||||
delete(c.jobListener.jobs, job)
|
||||
}
|
||||
c.jobListener.Unlock()
|
||||
}
|
||||
|
||||
func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
|
||||
if ch != nil {
|
||||
c.jobListener.Lock()
|
||||
defer c.jobListener.Unlock()
|
||||
}
|
||||
|
||||
var p dbus.ObjectPath
|
||||
err := c.sysobj.Call(job, 0, args...).Store(&p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if ch != nil {
|
||||
c.jobListener.jobs[p] = ch
|
||||
}
|
||||
|
||||
// ignore error since 0 is fine if conversion fails
|
||||
jobID, _ := strconv.Atoi(path.Base(string(p)))
|
||||
|
||||
return jobID, nil
|
||||
}
|
||||
|
||||
// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
|
||||
// specified by the mode string).
|
||||
//
|
||||
// Takes the unit to activate, plus a mode string. The mode needs to be one of
|
||||
// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
|
||||
// "replace" the call will start the unit and its dependencies, possibly
|
||||
// replacing already queued jobs that conflict with this. If "fail" the call
|
||||
// will start the unit and its dependencies, but will fail if this would change
|
||||
// an already queued job. If "isolate" the call will start the unit in question
|
||||
// and terminate all units that aren't dependencies of it. If
|
||||
// "ignore-dependencies" it will start a unit but ignore all its dependencies.
|
||||
// If "ignore-requirements" it will start a unit but only ignore the
|
||||
// requirement dependencies. It is not recommended to make use of the latter
|
||||
// two options.
|
||||
//
|
||||
// If the provided channel is non-nil, a result string will be sent to it upon
|
||||
// job completion: one of done, canceled, timeout, failed, dependency, skipped.
|
||||
// done indicates successful execution of a job. canceled indicates that a job
|
||||
// has been canceled before it finished execution. timeout indicates that the
|
||||
// job timeout was reached. failed indicates that the job failed. dependency
|
||||
// indicates that a job this job has been depending on failed and the job hence
|
||||
// has been removed too. skipped indicates that a job was skipped because it
|
||||
// didn't apply to the units current state.
|
||||
//
|
||||
// If no error occurs, the ID of the underlying systemd job will be returned. There
|
||||
// does exist the possibility for no error to be returned, but for the returned job
|
||||
// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
|
||||
// should not be considered authoritative.
|
||||
//
|
||||
// If an error does occur, it will be returned to the user alongside a job ID of 0.
|
||||
func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
|
||||
}
|
||||
|
||||
// StopUnit is similar to StartUnit but stops the specified unit rather
|
||||
// than starting it.
|
||||
func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
|
||||
func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
|
||||
}
|
||||
|
||||
// RestartUnit restarts a service. If a service is restarted that isn't
|
||||
// running it will be started.
|
||||
func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// TryRestartUnit is like RestartUnit, except that a service that isn't running
|
||||
// is not affected by the restart.
|
||||
func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadOrRestart attempts a reload if the unit supports it and use a restart
|
||||
// otherwise.
|
||||
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
|
||||
// flavored restart otherwise.
|
||||
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// StartTransientUnit() may be used to create and start a transient unit, which
|
||||
// will be released as soon as it is not running or referenced anymore or the
|
||||
// system is rebooted. name is the unit name including suffix, and must be
|
||||
// unique. mode is the same as in StartUnit(), properties contains properties
|
||||
// of the unit.
|
||||
func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
|
||||
}
|
||||
|
||||
// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
|
||||
// processes are killed.
|
||||
func (c *Conn) KillUnit(name string, signal int32) {
|
||||
c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
|
||||
}
|
||||
|
||||
// ResetFailedUnit resets the "failed" state of a specific unit.
|
||||
func (c *Conn) ResetFailedUnit(name string) error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
|
||||
}
|
||||
|
||||
// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
|
||||
func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
|
||||
var err error
|
||||
var props map[string]dbus.Variant
|
||||
|
||||
path := unitPath(unit)
|
||||
if !path.IsValid() {
|
||||
return nil, errors.New("invalid unit name: " + unit)
|
||||
}
|
||||
|
||||
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
|
||||
err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make(map[string]interface{}, len(props))
|
||||
for k, v := range props {
|
||||
out[k] = v.Value()
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// GetUnitProperties takes the unit name and returns all of its dbus object properties.
|
||||
func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
|
||||
return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
|
||||
}
|
||||
|
||||
func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
|
||||
var err error
|
||||
var prop dbus.Variant
|
||||
|
||||
path := unitPath(unit)
|
||||
if !path.IsValid() {
|
||||
return nil, errors.New("invalid unit name: " + unit)
|
||||
}
|
||||
|
||||
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
|
||||
err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Property{Name: propertyName, Value: prop}, nil
|
||||
}
|
||||
|
||||
func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
|
||||
return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
|
||||
}
|
||||
|
||||
// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
|
||||
// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
|
||||
// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
|
||||
func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
|
||||
return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
|
||||
}
|
||||
|
||||
// SetUnitProperties() may be used to modify certain unit properties at runtime.
|
||||
// Not all properties may be changed at runtime, but many resource management
|
||||
// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
|
||||
// instantly, and stored on disk for future boots, unless runtime is true, in which
|
||||
// case the settings only apply until the next reboot. name is the name of the unit
|
||||
// to modify. properties are the settings to set, encoded as an array of property
|
||||
// name and value pairs.
|
||||
func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
|
||||
}
|
||||
|
||||
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
||||
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
|
||||
}
|
||||
|
||||
// ListUnits returns an array with all currently loaded units. Note that
|
||||
// units may be known by multiple names at the same time, and hence there might
|
||||
// be more unit names loaded than actual units behind them.
|
||||
func (c *Conn) ListUnits() ([]UnitStatus, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
status := make([]UnitStatus, len(result))
|
||||
statusInterface := make([]interface{}, len(status))
|
||||
for i := range status {
|
||||
statusInterface[i] = &status[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, statusInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
type UnitStatus struct {
|
||||
Name string // The primary unit name as string
|
||||
Description string // The human readable description string
|
||||
LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
|
||||
ActiveState string // The active state (i.e. whether the unit is currently started or not)
|
||||
SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
|
||||
Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
|
||||
Path dbus.ObjectPath // The unit object path
|
||||
JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
|
||||
JobType string // The job type as string
|
||||
JobPath dbus.ObjectPath // The job object path
|
||||
}
|
||||
|
||||
type LinkUnitFileChange EnableUnitFileChange
|
||||
|
||||
// LinkUnitFiles() links unit files (that are located outside of the
|
||||
// usual unit search paths) into the unit search path.
|
||||
//
|
||||
// It takes a list of absolute paths to unit files to link and two
|
||||
// booleans. The first boolean controls whether the unit shall be
|
||||
// enabled for runtime only (true, /run), or persistently (false,
|
||||
// /etc).
|
||||
// The second controls whether symlinks pointing to other units shall
|
||||
// be replaced if necessary.
|
||||
//
|
||||
// This call returns a list of the changes made. The list consists of
|
||||
// structures with three strings: the type of the change (one of symlink
|
||||
// or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]LinkUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// EnableUnitFiles() may be used to enable one or more units in the system (by
|
||||
// creating symlinks to them in /etc or /run).
|
||||
//
|
||||
// It takes a list of unit files to enable (either just file names or full
|
||||
// absolute paths if the unit files are residing outside the usual unit
|
||||
// search paths), and two booleans: the first controls whether the unit shall
|
||||
// be enabled for runtime only (true, /run), or persistently (false, /etc).
|
||||
// The second one controls whether symlinks pointing to other units shall
|
||||
// be replaced if necessary.
|
||||
//
|
||||
// This call returns one boolean and an array with the changes made. The
|
||||
// boolean signals whether the unit files contained any enablement
|
||||
// information (i.e. an [Install]) section. The changes list consists of
|
||||
// structures with three strings: the type of the change (one of symlink
|
||||
// or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
|
||||
var carries_install_info bool
|
||||
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]EnableUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
return carries_install_info, changes, nil
|
||||
}
|
||||
|
||||
type EnableUnitFileChange struct {
|
||||
Type string // Type of the change (one of symlink or unlink)
|
||||
Filename string // File name of the symlink
|
||||
Destination string // Destination of the symlink
|
||||
}
|
||||
|
||||
// DisableUnitFiles() may be used to disable one or more units in the system (by
|
||||
// removing symlinks to them from /etc or /run).
|
||||
//
|
||||
// It takes a list of unit files to disable (either just file names or full
|
||||
// absolute paths if the unit files are residing outside the usual unit
|
||||
// search paths), and one boolean: whether the unit was enabled for runtime
|
||||
// only (true, /run), or persistently (false, /etc).
|
||||
//
|
||||
// This call returns an array with the changes made. The changes list
|
||||
// consists of structures with three strings: the type of the change (one of
|
||||
// symlink or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]DisableUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
type DisableUnitFileChange struct {
|
||||
Type string // Type of the change (one of symlink or unlink)
|
||||
Filename string // File name of the symlink
|
||||
Destination string // Destination of the symlink
|
||||
}
|
||||
|
||||
// Reload instructs systemd to scan for and reload unit files. This is
|
||||
// equivalent to a 'systemctl daemon-reload'.
|
||||
func (c *Conn) Reload() error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
|
||||
}
|
||||
|
||||
func unitPath(name string) dbus.ObjectPath {
|
||||
return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
|
||||
}
|
345
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods_test.go
generated
vendored
Normal file
345
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods_test.go
generated
vendored
Normal file
|
@ -0,0 +1,345 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
func setupConn(t *testing.T) *Conn {
|
||||
conn, err := New()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return conn
|
||||
}
|
||||
|
||||
func findFixture(target string, t *testing.T) string {
|
||||
abs, err := filepath.Abs("../fixtures/" + target)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return abs
|
||||
}
|
||||
|
||||
func setupUnit(target string, conn *Conn, t *testing.T) {
|
||||
// Blindly stop the unit in case it is running
|
||||
conn.StopUnit(target, "replace", nil)
|
||||
|
||||
// Blindly remove the symlink in case it exists
|
||||
targetRun := filepath.Join("/run/systemd/system/", target)
|
||||
os.Remove(targetRun)
|
||||
}
|
||||
|
||||
func linkUnit(target string, conn *Conn, t *testing.T) {
|
||||
abs := findFixture(target, t)
|
||||
fixture := []string{abs}
|
||||
|
||||
changes, err := conn.LinkUnitFiles(fixture, true, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes) < 1 {
|
||||
t.Fatalf("Expected one change, got %v", changes)
|
||||
}
|
||||
|
||||
runPath := filepath.Join("/run/systemd/system/", target)
|
||||
if changes[0].Filename != runPath {
|
||||
t.Fatal("Unexpected target filename")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that basic unit starting and stopping works.
|
||||
func TestStartStopUnit(t *testing.T) {
|
||||
target := "start-stop.service"
|
||||
conn := setupConn(t)
|
||||
|
||||
setupUnit(target, conn, t)
|
||||
linkUnit(target, conn, t)
|
||||
|
||||
// 2. Start the unit
|
||||
reschan := make(chan string)
|
||||
_, err := conn.StartUnit(target, "replace", reschan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
job := <-reschan
|
||||
if job != "done" {
|
||||
t.Fatal("Job is not done:", job)
|
||||
}
|
||||
|
||||
units, err := conn.ListUnits()
|
||||
|
||||
var unit *UnitStatus
|
||||
for _, u := range units {
|
||||
if u.Name == target {
|
||||
unit = &u
|
||||
}
|
||||
}
|
||||
|
||||
if unit == nil {
|
||||
t.Fatalf("Test unit not found in list")
|
||||
}
|
||||
|
||||
if unit.ActiveState != "active" {
|
||||
t.Fatalf("Test unit not active")
|
||||
}
|
||||
|
||||
// 3. Stop the unit
|
||||
_, err = conn.StopUnit(target, "replace", reschan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for StopUnit job to complete
|
||||
<-reschan
|
||||
|
||||
units, err = conn.ListUnits()
|
||||
|
||||
unit = nil
|
||||
for _, u := range units {
|
||||
if u.Name == target {
|
||||
unit = &u
|
||||
}
|
||||
}
|
||||
|
||||
if unit != nil {
|
||||
t.Fatalf("Test unit found in list, should be stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// Enables a unit and then immediately tears it down
|
||||
func TestEnableDisableUnit(t *testing.T) {
|
||||
target := "enable-disable.service"
|
||||
conn := setupConn(t)
|
||||
|
||||
setupUnit(target, conn, t)
|
||||
abs := findFixture(target, t)
|
||||
runPath := filepath.Join("/run/systemd/system/", target)
|
||||
|
||||
// 1. Enable the unit
|
||||
install, changes, err := conn.EnableUnitFiles([]string{abs}, true, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if install != false {
|
||||
t.Fatal("Install was true")
|
||||
}
|
||||
|
||||
if len(changes) < 1 {
|
||||
t.Fatalf("Expected one change, got %v", changes)
|
||||
}
|
||||
|
||||
if changes[0].Filename != runPath {
|
||||
t.Fatal("Unexpected target filename")
|
||||
}
|
||||
|
||||
// 2. Disable the unit
|
||||
dChanges, err := conn.DisableUnitFiles([]string{abs}, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(dChanges) != 1 {
|
||||
t.Fatalf("Changes should include the path, %v", dChanges)
|
||||
}
|
||||
if dChanges[0].Filename != runPath {
|
||||
t.Fatalf("Change should include correct filename, %+v", dChanges[0])
|
||||
}
|
||||
if dChanges[0].Destination != "" {
|
||||
t.Fatalf("Change destination should be empty, %+v", dChanges[0])
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetUnitProperties reads the `-.mount` which should exist on all systemd
|
||||
// systems and ensures that one of its properties is valid.
|
||||
func TestGetUnitProperties(t *testing.T) {
|
||||
conn := setupConn(t)
|
||||
|
||||
unit := "-.mount"
|
||||
|
||||
info, err := conn.GetUnitProperties(unit)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
names := info["Wants"].([]string)
|
||||
|
||||
if len(names) < 1 {
|
||||
t.Fatal("/ is unwanted")
|
||||
}
|
||||
|
||||
if names[0] != "system.slice" {
|
||||
t.Fatal("unexpected wants for /")
|
||||
}
|
||||
|
||||
prop, err := conn.GetUnitProperty(unit, "Wants")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if prop.Name != "Wants" {
|
||||
t.Fatal("unexpected property name")
|
||||
}
|
||||
|
||||
val := prop.Value.Value().([]string)
|
||||
if !reflect.DeepEqual(val, names) {
|
||||
t.Fatal("unexpected property value")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetUnitPropertiesRejectsInvalidName attempts to get the properties for a
|
||||
// unit with an invalid name. This test should be run with --test.timeout set,
|
||||
// as a fail will manifest as GetUnitProperties hanging indefinitely.
|
||||
func TestGetUnitPropertiesRejectsInvalidName(t *testing.T) {
|
||||
conn := setupConn(t)
|
||||
|
||||
unit := "//invalid#$^/"
|
||||
|
||||
_, err := conn.GetUnitProperties(unit)
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error, got nil")
|
||||
}
|
||||
|
||||
_, err = conn.GetUnitProperty(unit, "Wants")
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetUnitProperties changes a cgroup setting on the `tmp.mount`
|
||||
// which should exist on all systemd systems and ensures that the
|
||||
// property was set.
|
||||
func TestSetUnitProperties(t *testing.T) {
|
||||
conn := setupConn(t)
|
||||
|
||||
unit := "tmp.mount"
|
||||
|
||||
if err := conn.SetUnitProperties(unit, true, Property{"CPUShares", dbus.MakeVariant(uint64(1023))}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
info, err := conn.GetUnitTypeProperties(unit, "Mount")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value := info["CPUShares"].(uint64)
|
||||
if value != 1023 {
|
||||
t.Fatal("CPUShares of unit is not 1023:", value)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that basic transient unit starting and stopping works.
|
||||
func TestStartStopTransientUnit(t *testing.T) {
|
||||
conn := setupConn(t)
|
||||
|
||||
props := []Property{
|
||||
PropExecStart([]string{"/bin/sleep", "400"}, false),
|
||||
}
|
||||
target := fmt.Sprintf("testing-transient-%d.service", rand.Int())
|
||||
|
||||
// Start the unit
|
||||
reschan := make(chan string)
|
||||
_, err := conn.StartTransientUnit(target, "replace", props, reschan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
job := <-reschan
|
||||
if job != "done" {
|
||||
t.Fatal("Job is not done:", job)
|
||||
}
|
||||
|
||||
units, err := conn.ListUnits()
|
||||
|
||||
var unit *UnitStatus
|
||||
for _, u := range units {
|
||||
if u.Name == target {
|
||||
unit = &u
|
||||
}
|
||||
}
|
||||
|
||||
if unit == nil {
|
||||
t.Fatalf("Test unit not found in list")
|
||||
}
|
||||
|
||||
if unit.ActiveState != "active" {
|
||||
t.Fatalf("Test unit not active")
|
||||
}
|
||||
|
||||
// 3. Stop the unit
|
||||
_, err = conn.StopUnit(target, "replace", reschan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for StopUnit job to complete
|
||||
<-reschan
|
||||
|
||||
units, err = conn.ListUnits()
|
||||
|
||||
unit = nil
|
||||
for _, u := range units {
|
||||
if u.Name == target {
|
||||
unit = &u
|
||||
}
|
||||
}
|
||||
|
||||
if unit != nil {
|
||||
t.Fatalf("Test unit found in list, should be stopped")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnJobListener(t *testing.T) {
|
||||
target := "start-stop.service"
|
||||
conn := setupConn(t)
|
||||
|
||||
setupUnit(target, conn, t)
|
||||
linkUnit(target, conn, t)
|
||||
|
||||
jobSize := len(conn.jobListener.jobs)
|
||||
|
||||
reschan := make(chan string)
|
||||
_, err := conn.StartUnit(target, "replace", reschan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
<-reschan
|
||||
|
||||
_, err = conn.StopUnit(target, "replace", reschan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
<-reschan
|
||||
|
||||
currentJobSize := len(conn.jobListener.jobs)
|
||||
if jobSize != currentJobSize {
|
||||
t.Fatal("JobListener jobs leaked")
|
||||
}
|
||||
}
|
218
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go
generated
vendored
Normal file
218
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go
generated
vendored
Normal file
|
@ -0,0 +1,218 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
// From the systemd docs:
|
||||
//
|
||||
// The properties array of StartTransientUnit() may take many of the settings
|
||||
// that may also be configured in unit files. Not all parameters are currently
|
||||
// accepted though, but we plan to cover more properties with future release.
|
||||
// Currently you may set the Description, Slice and all dependency types of
|
||||
// units, as well as RemainAfterExit, ExecStart for service units,
|
||||
// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
|
||||
// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
|
||||
// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
|
||||
// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
|
||||
// directly to their counterparts in unit files and as normal D-Bus object
|
||||
// properties. The exception here is the PIDs field of scope units which is
|
||||
// used for construction of the scope only and specifies the initial PIDs to
|
||||
// add to the scope object.
|
||||
|
||||
type Property struct {
|
||||
Name string
|
||||
Value dbus.Variant
|
||||
}
|
||||
|
||||
type PropertyCollection struct {
|
||||
Name string
|
||||
Properties []Property
|
||||
}
|
||||
|
||||
type execStart struct {
|
||||
Path string // the binary path to execute
|
||||
Args []string // an array with all arguments to pass to the executed command, starting with argument 0
|
||||
UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
|
||||
}
|
||||
|
||||
// PropExecStart sets the ExecStart service property. The first argument is a
|
||||
// slice with the binary path to execute followed by the arguments to pass to
|
||||
// the executed command. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
|
||||
func PropExecStart(command []string, uncleanIsFailure bool) Property {
|
||||
execStarts := []execStart{
|
||||
execStart{
|
||||
Path: command[0],
|
||||
Args: command,
|
||||
UncleanIsFailure: uncleanIsFailure,
|
||||
},
|
||||
}
|
||||
|
||||
return Property{
|
||||
Name: "ExecStart",
|
||||
Value: dbus.MakeVariant(execStarts),
|
||||
}
|
||||
}
|
||||
|
||||
// PropRemainAfterExit sets the RemainAfterExit service property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
|
||||
func PropRemainAfterExit(b bool) Property {
|
||||
return Property{
|
||||
Name: "RemainAfterExit",
|
||||
Value: dbus.MakeVariant(b),
|
||||
}
|
||||
}
|
||||
|
||||
// PropDescription sets the Description unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
|
||||
func PropDescription(desc string) Property {
|
||||
return Property{
|
||||
Name: "Description",
|
||||
Value: dbus.MakeVariant(desc),
|
||||
}
|
||||
}
|
||||
|
||||
func propDependency(name string, units []string) Property {
|
||||
return Property{
|
||||
Name: name,
|
||||
Value: dbus.MakeVariant(units),
|
||||
}
|
||||
}
|
||||
|
||||
// PropRequires sets the Requires unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
|
||||
func PropRequires(units ...string) Property {
|
||||
return propDependency("Requires", units)
|
||||
}
|
||||
|
||||
// PropRequiresOverridable sets the RequiresOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
|
||||
func PropRequiresOverridable(units ...string) Property {
|
||||
return propDependency("RequiresOverridable", units)
|
||||
}
|
||||
|
||||
// PropRequisite sets the Requisite unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
|
||||
func PropRequisite(units ...string) Property {
|
||||
return propDependency("Requisite", units)
|
||||
}
|
||||
|
||||
// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
|
||||
func PropRequisiteOverridable(units ...string) Property {
|
||||
return propDependency("RequisiteOverridable", units)
|
||||
}
|
||||
|
||||
// PropWants sets the Wants unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
|
||||
func PropWants(units ...string) Property {
|
||||
return propDependency("Wants", units)
|
||||
}
|
||||
|
||||
// PropBindsTo sets the BindsTo unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
|
||||
func PropBindsTo(units ...string) Property {
|
||||
return propDependency("BindsTo", units)
|
||||
}
|
||||
|
||||
// PropRequiredBy sets the RequiredBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
|
||||
func PropRequiredBy(units ...string) Property {
|
||||
return propDependency("RequiredBy", units)
|
||||
}
|
||||
|
||||
// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
|
||||
func PropRequiredByOverridable(units ...string) Property {
|
||||
return propDependency("RequiredByOverridable", units)
|
||||
}
|
||||
|
||||
// PropWantedBy sets the WantedBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
|
||||
func PropWantedBy(units ...string) Property {
|
||||
return propDependency("WantedBy", units)
|
||||
}
|
||||
|
||||
// PropBoundBy sets the BoundBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
|
||||
func PropBoundBy(units ...string) Property {
|
||||
return propDependency("BoundBy", units)
|
||||
}
|
||||
|
||||
// PropConflicts sets the Conflicts unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
|
||||
func PropConflicts(units ...string) Property {
|
||||
return propDependency("Conflicts", units)
|
||||
}
|
||||
|
||||
// PropConflictedBy sets the ConflictedBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
|
||||
func PropConflictedBy(units ...string) Property {
|
||||
return propDependency("ConflictedBy", units)
|
||||
}
|
||||
|
||||
// PropBefore sets the Before unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
|
||||
func PropBefore(units ...string) Property {
|
||||
return propDependency("Before", units)
|
||||
}
|
||||
|
||||
// PropAfter sets the After unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
|
||||
func PropAfter(units ...string) Property {
|
||||
return propDependency("After", units)
|
||||
}
|
||||
|
||||
// PropOnFailure sets the OnFailure unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
|
||||
func PropOnFailure(units ...string) Property {
|
||||
return propDependency("OnFailure", units)
|
||||
}
|
||||
|
||||
// PropTriggers sets the Triggers unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
|
||||
func PropTriggers(units ...string) Property {
|
||||
return propDependency("Triggers", units)
|
||||
}
|
||||
|
||||
// PropTriggeredBy sets the TriggeredBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
|
||||
func PropTriggeredBy(units ...string) Property {
|
||||
return propDependency("TriggeredBy", units)
|
||||
}
|
||||
|
||||
// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
|
||||
func PropPropagatesReloadTo(units ...string) Property {
|
||||
return propDependency("PropagatesReloadTo", units)
|
||||
}
|
||||
|
||||
// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
|
||||
func PropRequiresMountsFor(units ...string) Property {
|
||||
return propDependency("RequiresMountsFor", units)
|
||||
}
|
||||
|
||||
// PropSlice sets the Slice unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
|
||||
func PropSlice(slice string) Property {
|
||||
return Property{
|
||||
Name: "Slice",
|
||||
Value: dbus.MakeVariant(slice),
|
||||
}
|
||||
}
|
47
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go
generated
vendored
Normal file
47
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
type set struct {
|
||||
data map[string]bool
|
||||
}
|
||||
|
||||
func (s *set) Add(value string) {
|
||||
s.data[value] = true
|
||||
}
|
||||
|
||||
func (s *set) Remove(value string) {
|
||||
delete(s.data, value)
|
||||
}
|
||||
|
||||
func (s *set) Contains(value string) (exists bool) {
|
||||
_, exists = s.data[value]
|
||||
return
|
||||
}
|
||||
|
||||
func (s *set) Length() int {
|
||||
return len(s.data)
|
||||
}
|
||||
|
||||
func (s *set) Values() (values []string) {
|
||||
for val, _ := range s.data {
|
||||
values = append(values, val)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newSet() *set {
|
||||
return &set{make(map[string]bool)}
|
||||
}
|
53
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set_test.go
generated
vendored
Normal file
53
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set_test.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestBasicSetActions asserts that Add & Remove behavior is correct
|
||||
func TestBasicSetActions(t *testing.T) {
|
||||
s := newSet()
|
||||
|
||||
if s.Contains("foo") {
|
||||
t.Fatal("set should not contain 'foo'")
|
||||
}
|
||||
|
||||
s.Add("foo")
|
||||
|
||||
if !s.Contains("foo") {
|
||||
t.Fatal("set should contain 'foo'")
|
||||
}
|
||||
|
||||
v := s.Values()
|
||||
if len(v) != 1 {
|
||||
t.Fatal("set.Values did not report correct number of values")
|
||||
}
|
||||
if v[0] != "foo" {
|
||||
t.Fatal("set.Values did not report value")
|
||||
}
|
||||
|
||||
s.Remove("foo")
|
||||
|
||||
if s.Contains("foo") {
|
||||
t.Fatal("set should not contain 'foo'")
|
||||
}
|
||||
|
||||
v = s.Values()
|
||||
if len(v) != 0 {
|
||||
t.Fatal("set.Values did not report correct number of values")
|
||||
}
|
||||
}
|
250
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go
generated
vendored
Normal file
250
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
const (
|
||||
cleanIgnoreInterval = int64(10 * time.Second)
|
||||
ignoreInterval = int64(30 * time.Millisecond)
|
||||
)
|
||||
|
||||
// Subscribe sets up this connection to subscribe to all systemd dbus events.
|
||||
// This is required before calling SubscribeUnits. When the connection closes
|
||||
// systemd will automatically stop sending signals so there is no need to
|
||||
// explicitly call Unsubscribe().
|
||||
func (c *Conn) Subscribe() error {
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
|
||||
|
||||
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unsubscribe this connection from systemd dbus events.
|
||||
func (c *Conn) Unsubscribe() error {
|
||||
err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) dispatch() {
|
||||
ch := make(chan *dbus.Signal, signalBuffer)
|
||||
|
||||
c.sigconn.Signal(ch)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
signal, ok := <-ch
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
|
||||
c.jobComplete(signal)
|
||||
}
|
||||
|
||||
if c.subscriber.updateCh == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var unitPath dbus.ObjectPath
|
||||
switch signal.Name {
|
||||
case "org.freedesktop.systemd1.Manager.JobRemoved":
|
||||
unitName := signal.Body[2].(string)
|
||||
c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
|
||||
case "org.freedesktop.systemd1.Manager.UnitNew":
|
||||
unitPath = signal.Body[1].(dbus.ObjectPath)
|
||||
case "org.freedesktop.DBus.Properties.PropertiesChanged":
|
||||
if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
|
||||
unitPath = signal.Path
|
||||
}
|
||||
}
|
||||
|
||||
if unitPath == dbus.ObjectPath("") {
|
||||
continue
|
||||
}
|
||||
|
||||
c.sendSubStateUpdate(unitPath)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Returns two unbuffered channels which will receive all changed units every
|
||||
// interval. Deleted units are sent as nil.
|
||||
func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
|
||||
}
|
||||
|
||||
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
||||
// size of the channels, the comparison function for detecting changes and a filter
|
||||
// function for cutting down on the noise that your channel receives.
|
||||
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
old := make(map[string]*UnitStatus)
|
||||
statusChan := make(chan map[string]*UnitStatus, buffer)
|
||||
errChan := make(chan error, buffer)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
timerChan := time.After(interval)
|
||||
|
||||
units, err := c.ListUnits()
|
||||
if err == nil {
|
||||
cur := make(map[string]*UnitStatus)
|
||||
for i := range units {
|
||||
if filterUnit != nil && filterUnit(units[i].Name) {
|
||||
continue
|
||||
}
|
||||
cur[units[i].Name] = &units[i]
|
||||
}
|
||||
|
||||
// add all new or changed units
|
||||
changed := make(map[string]*UnitStatus)
|
||||
for n, u := range cur {
|
||||
if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
|
||||
changed[n] = u
|
||||
}
|
||||
delete(old, n)
|
||||
}
|
||||
|
||||
// add all deleted units
|
||||
for oldN := range old {
|
||||
changed[oldN] = nil
|
||||
}
|
||||
|
||||
old = cur
|
||||
|
||||
if len(changed) != 0 {
|
||||
statusChan <- changed
|
||||
}
|
||||
} else {
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
<-timerChan
|
||||
}
|
||||
}()
|
||||
|
||||
return statusChan, errChan
|
||||
}
|
||||
|
||||
type SubStateUpdate struct {
|
||||
UnitName string
|
||||
SubState string
|
||||
}
|
||||
|
||||
// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
|
||||
// Although this writes to updateCh on every state change, the reported state
|
||||
// may be more recent than the change that generated it (due to an unavoidable
|
||||
// race in the systemd dbus interface). That is, this method provides a good
|
||||
// way to keep a current view of all units' states, but is not guaranteed to
|
||||
// show every state transition they go through. Furthermore, state changes
|
||||
// will only be written to the channel with non-blocking writes. If updateCh
|
||||
// is full, it attempts to write an error to errCh; if errCh is full, the error
|
||||
// passes silently.
|
||||
func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
|
||||
c.subscriber.Lock()
|
||||
defer c.subscriber.Unlock()
|
||||
c.subscriber.updateCh = updateCh
|
||||
c.subscriber.errCh = errCh
|
||||
}
|
||||
|
||||
func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
|
||||
c.subscriber.Lock()
|
||||
defer c.subscriber.Unlock()
|
||||
|
||||
if c.shouldIgnore(path) {
|
||||
return
|
||||
}
|
||||
|
||||
info, err := c.GetUnitProperties(string(path))
|
||||
if err != nil {
|
||||
select {
|
||||
case c.subscriber.errCh <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
name := info["Id"].(string)
|
||||
substate := info["SubState"].(string)
|
||||
|
||||
update := &SubStateUpdate{name, substate}
|
||||
select {
|
||||
case c.subscriber.updateCh <- update:
|
||||
default:
|
||||
select {
|
||||
case c.subscriber.errCh <- errors.New("update channel full!"):
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
c.updateIgnore(path, info)
|
||||
}
|
||||
|
||||
// The ignore functions work around a wart in the systemd dbus interface.
|
||||
// Requesting the properties of an unloaded unit will cause systemd to send a
|
||||
// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
|
||||
// properties on UnitNew (as that's the only indication of a new unit coming up
|
||||
// for the first time), we would enter an infinite loop if we did not attempt
|
||||
// to detect and ignore these spurious signals. The signal themselves are
|
||||
// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
|
||||
// unloaded unit's signals for a short time after requesting its properties.
|
||||
// This means that we will miss e.g. a transient unit being restarted
|
||||
// *immediately* upon failure and also a transient unit being started
|
||||
// immediately after requesting its status (with systemctl status, for example,
|
||||
// because this causes a UnitNew signal to be sent which then causes us to fetch
|
||||
// the properties).
|
||||
|
||||
func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
|
||||
t, ok := c.subscriber.ignore[path]
|
||||
return ok && t >= time.Now().UnixNano()
|
||||
}
|
||||
|
||||
func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
|
||||
c.cleanIgnore()
|
||||
|
||||
// unit is unloaded - it will trigger bad systemd dbus behavior
|
||||
if info["LoadState"].(string) == "not-found" {
|
||||
c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
|
||||
}
|
||||
}
|
||||
|
||||
// without this, ignore would grow unboundedly over time
|
||||
func (c *Conn) cleanIgnore() {
|
||||
now := time.Now().UnixNano()
|
||||
if c.subscriber.cleanIgnore < now {
|
||||
c.subscriber.cleanIgnore = now + cleanIgnoreInterval
|
||||
|
||||
for p, t := range c.subscriber.ignore {
|
||||
if t < now {
|
||||
delete(c.subscriber.ignore, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
57
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go
generated
vendored
Normal file
57
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SubscriptionSet returns a subscription set which is like conn.Subscribe but
|
||||
// can filter to only return events for a set of units.
|
||||
type SubscriptionSet struct {
|
||||
*set
|
||||
conn *Conn
|
||||
}
|
||||
|
||||
func (s *SubscriptionSet) filter(unit string) bool {
|
||||
return !s.Contains(unit)
|
||||
}
|
||||
|
||||
// Subscribe starts listening for dbus events for all of the units in the set.
|
||||
// Returns channels identical to conn.SubscribeUnits.
|
||||
func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
// TODO: Make fully evented by using systemd 209 with properties changed values
|
||||
return s.conn.SubscribeUnitsCustom(time.Second, 0,
|
||||
mismatchUnitStatus,
|
||||
func(unit string) bool { return s.filter(unit) },
|
||||
)
|
||||
}
|
||||
|
||||
// NewSubscriptionSet returns a new subscription set.
|
||||
func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
|
||||
return &SubscriptionSet{newSet(), conn}
|
||||
}
|
||||
|
||||
// mismatchUnitStatus returns true if the provided UnitStatus objects
|
||||
// are not equivalent. false is returned if the objects are equivalent.
|
||||
// Only the Name, Description and state-related fields are used in
|
||||
// the comparison.
|
||||
func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
|
||||
return u1.Name != u2.Name ||
|
||||
u1.Description != u2.Description ||
|
||||
u1.LoadState != u2.LoadState ||
|
||||
u1.ActiveState != u2.ActiveState ||
|
||||
u1.SubState != u2.SubState
|
||||
}
|
82
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go
generated
vendored
Normal file
82
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestSubscribeUnit exercises the basics of subscription of a particular unit.
|
||||
func TestSubscriptionSetUnit(t *testing.T) {
|
||||
target := "subscribe-events-set.service"
|
||||
|
||||
conn, err := New()
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = conn.Subscribe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
subSet := conn.NewSubscriptionSet()
|
||||
evChan, errChan := subSet.Subscribe()
|
||||
|
||||
subSet.Add(target)
|
||||
setupUnit(target, conn, t)
|
||||
linkUnit(target, conn, t)
|
||||
|
||||
reschan := make(chan string)
|
||||
_, err = conn.StartUnit(target, "replace", reschan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
job := <-reschan
|
||||
if job != "done" {
|
||||
t.Fatal("Couldn't start", target)
|
||||
}
|
||||
|
||||
timeout := make(chan bool, 1)
|
||||
go func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
close(timeout)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case changes := <-evChan:
|
||||
tCh, ok := changes[target]
|
||||
|
||||
if !ok {
|
||||
t.Fatal("Unexpected event:", changes)
|
||||
}
|
||||
|
||||
if tCh.ActiveState == "active" && tCh.Name == target {
|
||||
goto success
|
||||
}
|
||||
case err = <-errChan:
|
||||
t.Fatal(err)
|
||||
case <-timeout:
|
||||
t.Fatal("Reached timeout")
|
||||
}
|
||||
}
|
||||
|
||||
success:
|
||||
return
|
||||
}
|
105
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_test.go
generated
vendored
Normal file
105
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_test.go
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestSubscribe exercises the basics of subscription
|
||||
func TestSubscribe(t *testing.T) {
|
||||
conn, err := New()
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = conn.Subscribe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = conn.Unsubscribe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSubscribeUnit exercises the basics of subscription of a particular unit.
|
||||
func TestSubscribeUnit(t *testing.T) {
|
||||
target := "subscribe-events.service"
|
||||
|
||||
conn, err := New()
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = conn.Subscribe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = conn.Unsubscribe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
evChan, errChan := conn.SubscribeUnits(time.Second)
|
||||
|
||||
setupUnit(target, conn, t)
|
||||
linkUnit(target, conn, t)
|
||||
|
||||
reschan := make(chan string)
|
||||
_, err = conn.StartUnit(target, "replace", reschan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
job := <-reschan
|
||||
if job != "done" {
|
||||
t.Fatal("Couldn't start", target)
|
||||
}
|
||||
|
||||
timeout := make(chan bool, 1)
|
||||
go func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
close(timeout)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case changes := <-evChan:
|
||||
tCh, ok := changes[target]
|
||||
|
||||
// Just continue until we see our event.
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if tCh.ActiveState == "active" && tCh.Name == target {
|
||||
goto success
|
||||
}
|
||||
case err = <-errChan:
|
||||
t.Fatal(err)
|
||||
case <-timeout:
|
||||
t.Fatal("Reached timeout")
|
||||
}
|
||||
}
|
||||
|
||||
success:
|
||||
return
|
||||
}
|
33
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/util/util.go
generated
vendored
Normal file
33
libnetwork/Godeps/_workspace/src/github.com/coreos/go-systemd/util/util.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package util contains utility functions related to systemd that applications
|
||||
// can use to check things like whether systemd is running.
|
||||
package util
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// IsRunningSystemd checks whether the host was booted with systemd as its init
|
||||
// system. This functions similar to systemd's `sd_booted(3)`: internally, it
|
||||
// checks whether /run/systemd/system/ exists and is a directory.
|
||||
// http://www.freedesktop.org/software/systemd/man/sd_booted.html
|
||||
func IsRunningSystemd() bool {
|
||||
fi, err := os.Lstat("/run/systemd/system")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return fi.IsDir()
|
||||
}
|
69
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
Normal file
69
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse fstab type mount options into mount() flags
|
||||
// and device specific data
|
||||
func parseOptions(options string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
)
|
||||
|
||||
flags := map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"defaults": {false, 0},
|
||||
"ro": {false, RDONLY},
|
||||
"rw": {true, RDONLY},
|
||||
"suid": {true, NOSUID},
|
||||
"nosuid": {false, NOSUID},
|
||||
"dev": {true, NODEV},
|
||||
"nodev": {false, NODEV},
|
||||
"exec": {true, NOEXEC},
|
||||
"noexec": {false, NOEXEC},
|
||||
"sync": {false, SYNCHRONOUS},
|
||||
"async": {true, SYNCHRONOUS},
|
||||
"dirsync": {false, DIRSYNC},
|
||||
"remount": {false, REMOUNT},
|
||||
"mand": {false, MANDLOCK},
|
||||
"nomand": {true, MANDLOCK},
|
||||
"atime": {true, NOATIME},
|
||||
"noatime": {false, NOATIME},
|
||||
"diratime": {true, NODIRATIME},
|
||||
"nodiratime": {false, NODIRATIME},
|
||||
"bind": {false, BIND},
|
||||
"rbind": {false, RBIND},
|
||||
"unbindable": {false, UNBINDABLE},
|
||||
"runbindable": {false, RUNBINDABLE},
|
||||
"private": {false, PRIVATE},
|
||||
"rprivate": {false, RPRIVATE},
|
||||
"shared": {false, SHARED},
|
||||
"rshared": {false, RSHARED},
|
||||
"slave": {false, SLAVE},
|
||||
"rslave": {false, RSLAVE},
|
||||
"relatime": {false, RELATIME},
|
||||
"norelatime": {true, RELATIME},
|
||||
"strictatime": {false, STRICTATIME},
|
||||
"nostrictatime": {true, STRICTATIME},
|
||||
}
|
||||
|
||||
for _, o := range strings.Split(options, ",") {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
// then it is a data value for a specific fs type
|
||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||
if f.clear {
|
||||
flag &= ^f.flag
|
||||
} else {
|
||||
flag |= f.flag
|
||||
}
|
||||
} else {
|
||||
data = append(data, o)
|
||||
}
|
||||
}
|
||||
return flag, strings.Join(data, ",")
|
||||
}
|
48
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
Normal file
48
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
// +build freebsd,cgo
|
||||
|
||||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
const (
|
||||
// RDONLY will mount the filesystem as read-only.
|
||||
RDONLY = C.MNT_RDONLY
|
||||
|
||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||
// take effect.
|
||||
NOSUID = C.MNT_NOSUID
|
||||
|
||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||
NOEXEC = C.MNT_NOEXEC
|
||||
|
||||
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
|
||||
SYNCHRONOUS = C.MNT_SYNCHRONOUS
|
||||
|
||||
// NOATIME will not update the file access time when reading from a file.
|
||||
NOATIME = C.MNT_NOATIME
|
||||
)
|
||||
|
||||
// These flags are unsupported.
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
UNBINDABLE = 0
|
||||
RUNBINDABLE = 0
|
||||
PRIVATE = 0
|
||||
RPRIVATE = 0
|
||||
SHARED = 0
|
||||
RSHARED = 0
|
||||
SLAVE = 0
|
||||
RSLAVE = 0
|
||||
RBIND = 0
|
||||
RELATIVE = 0
|
||||
RELATIME = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
)
|
85
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
Normal file
85
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// RDONLY will mount the file system read-only.
|
||||
RDONLY = syscall.MS_RDONLY
|
||||
|
||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||
// take effect.
|
||||
NOSUID = syscall.MS_NOSUID
|
||||
|
||||
// NODEV will not interpret character or block special devices on the file
|
||||
// system.
|
||||
NODEV = syscall.MS_NODEV
|
||||
|
||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||
NOEXEC = syscall.MS_NOEXEC
|
||||
|
||||
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
|
||||
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
|
||||
|
||||
// DIRSYNC will force all directory updates within the file system to be done
|
||||
// synchronously. This affects the following system calls: creat, link,
|
||||
// unlink, symlink, mkdir, rmdir, mknod and rename.
|
||||
DIRSYNC = syscall.MS_DIRSYNC
|
||||
|
||||
// REMOUNT will attempt to remount an already-mounted file system. This is
|
||||
// commonly used to change the mount flags for a file system, especially to
|
||||
// make a readonly file system writeable. It does not change device or mount
|
||||
// point.
|
||||
REMOUNT = syscall.MS_REMOUNT
|
||||
|
||||
// MANDLOCK will force mandatory locks on a filesystem.
|
||||
MANDLOCK = syscall.MS_MANDLOCK
|
||||
|
||||
// NOATIME will not update the file access time when reading from a file.
|
||||
NOATIME = syscall.MS_NOATIME
|
||||
|
||||
// NODIRATIME will not update the directory access time.
|
||||
NODIRATIME = syscall.MS_NODIRATIME
|
||||
|
||||
// BIND remounts a subtree somewhere else.
|
||||
BIND = syscall.MS_BIND
|
||||
|
||||
// RBIND remounts a subtree and all possible submounts somewhere else.
|
||||
RBIND = syscall.MS_BIND | syscall.MS_REC
|
||||
|
||||
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
|
||||
UNBINDABLE = syscall.MS_UNBINDABLE
|
||||
|
||||
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
|
||||
RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
|
||||
|
||||
// PRIVATE creates a mount which carries no propagation abilities.
|
||||
PRIVATE = syscall.MS_PRIVATE
|
||||
|
||||
// RPRIVATE marks the entire mount tree as PRIVATE.
|
||||
RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
|
||||
|
||||
// SLAVE creates a mount which receives propagation from its master, but not
|
||||
// vice versa.
|
||||
SLAVE = syscall.MS_SLAVE
|
||||
|
||||
// RSLAVE marks the entire mount tree as SLAVE.
|
||||
RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
|
||||
|
||||
// SHARED creates a mount which provides the ability to create mirrors of
|
||||
// that mount such that mounts and unmounts within any of the mirrors
|
||||
// propagate to the other mirrors.
|
||||
SHARED = syscall.MS_SHARED
|
||||
|
||||
// RSHARED marks the entire mount tree as SHARED.
|
||||
RSHARED = syscall.MS_SHARED | syscall.MS_REC
|
||||
|
||||
// RELATIME updates inode access times relative to modify or change time.
|
||||
RELATIME = syscall.MS_RELATIME
|
||||
|
||||
// STRICTATIME allows to explicitly request full atime updates. This makes
|
||||
// it possible for the kernel to default to relatime or noatime but still
|
||||
// allow userspace to override it.
|
||||
STRICTATIME = syscall.MS_STRICTATIME
|
||||
)
|
30
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
Normal file
30
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
// These flags are unsupported.
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NOATIME = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
NOEXEC = 0
|
||||
NOSUID = 0
|
||||
UNBINDABLE = 0
|
||||
RUNBINDABLE = 0
|
||||
PRIVATE = 0
|
||||
RPRIVATE = 0
|
||||
SHARED = 0
|
||||
RSHARED = 0
|
||||
SLAVE = 0
|
||||
RSLAVE = 0
|
||||
RBIND = 0
|
||||
RELATIME = 0
|
||||
RELATIVE = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
SYNCHRONOUS = 0
|
||||
RDONLY = 0
|
||||
)
|
74
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
Normal file
74
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetMounts retrieves a list of mounts for the current running process.
|
||||
func GetMounts() ([]*MountInfo, error) {
|
||||
return parseMountTable()
|
||||
}
|
||||
|
||||
// Mounted looks at /proc/self/mountinfo to determine of the specified
|
||||
// mountpoint has been mounted
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
entries, err := parseMountTable()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Search the table for the mountpoint
|
||||
for _, e := range entries {
|
||||
if e.Mountpoint == mountpoint {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Mount will mount filesystem according to the specified configuration, on the
|
||||
// condition that the target path is *not* already mounted. Options must be
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func Mount(device, target, mType, options string) error {
|
||||
flag, _ := parseOptions(options)
|
||||
if flag&REMOUNT != REMOUNT {
|
||||
if mounted, err := Mounted(target); err != nil || mounted {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ForceMount(device, target, mType, options)
|
||||
}
|
||||
|
||||
// ForceMount will mount a filesystem according to the specified configuration,
|
||||
// *regardless* if the target path is not already mounted. Options must be
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func ForceMount(device, target, mType, options string) error {
|
||||
flag, data := parseOptions(options)
|
||||
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount will unmount the target filesystem, so long as it is mounted.
|
||||
func Unmount(target string) error {
|
||||
if mounted, err := Mounted(target); err != nil || !mounted {
|
||||
return err
|
||||
}
|
||||
return ForceUnmount(target)
|
||||
}
|
||||
|
||||
// ForceUnmount will force an unmount of the target filesystem, regardless if
|
||||
// it is mounted or not.
|
||||
func ForceUnmount(target string) (err error) {
|
||||
// Simple retry logic for unmount
|
||||
for i := 0; i < 10; i++ {
|
||||
if err = unmount(target, 0); err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return
|
||||
}
|
137
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go
generated
vendored
Normal file
137
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMountOptionsParsing(t *testing.T) {
|
||||
options := "noatime,ro,size=10k"
|
||||
|
||||
flag, data := parseOptions(options)
|
||||
|
||||
if data != "size=10k" {
|
||||
t.Fatalf("Expected size=10 got %s", data)
|
||||
}
|
||||
|
||||
expectedFlag := NOATIME | RDONLY
|
||||
|
||||
if flag != expectedFlag {
|
||||
t.Fatalf("Expected %d got %d", expectedFlag, flag)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMounted(t *testing.T) {
|
||||
tmp := path.Join(os.TempDir(), "mount-tests")
|
||||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var (
|
||||
sourceDir = path.Join(tmp, "source")
|
||||
targetDir = path.Join(tmp, "target")
|
||||
sourcePath = path.Join(sourceDir, "file.txt")
|
||||
targetPath = path.Join(targetDir, "file.txt")
|
||||
)
|
||||
|
||||
os.Mkdir(sourceDir, 0777)
|
||||
os.Mkdir(targetDir, 0777)
|
||||
|
||||
f, err := os.Create(sourcePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.WriteString("hello")
|
||||
f.Close()
|
||||
|
||||
f, err = os.Create(targetPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
mounted, err := Mounted(targetDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !mounted {
|
||||
t.Fatalf("Expected %s to be mounted", targetDir)
|
||||
}
|
||||
if _, err := os.Stat(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountReadonly(t *testing.T) {
|
||||
tmp := path.Join(os.TempDir(), "mount-tests")
|
||||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var (
|
||||
sourceDir = path.Join(tmp, "source")
|
||||
targetDir = path.Join(tmp, "target")
|
||||
sourcePath = path.Join(sourceDir, "file.txt")
|
||||
targetPath = path.Join(targetDir, "file.txt")
|
||||
)
|
||||
|
||||
os.Mkdir(sourceDir, 0777)
|
||||
os.Mkdir(targetDir, 0777)
|
||||
|
||||
f, err := os.Create(sourcePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.WriteString("hello")
|
||||
f.Close()
|
||||
|
||||
f, err = os.Create(targetPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
f, err = os.OpenFile(targetPath, os.O_RDWR, 0777)
|
||||
if err == nil {
|
||||
t.Fatal("Should not be able to open a ro file as rw")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMounts(t *testing.T) {
|
||||
mounts, err := GetMounts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
root := false
|
||||
for _, entry := range mounts {
|
||||
if entry.Mountpoint == "/" {
|
||||
root = true
|
||||
}
|
||||
}
|
||||
|
||||
if !root {
|
||||
t.Fatal("/ should be mounted at least")
|
||||
}
|
||||
}
|
59
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
Normal file
59
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
package mount
|
||||
|
||||
/*
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/_iovec.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func allocateIOVecs(options []string) []C.struct_iovec {
|
||||
out := make([]C.struct_iovec, len(options))
|
||||
for i, option := range options {
|
||||
out[i].iov_base = unsafe.Pointer(C.CString(option))
|
||||
out[i].iov_len = C.size_t(len(option) + 1)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
isNullFS := false
|
||||
|
||||
xs := strings.Split(data, ",")
|
||||
for _, x := range xs {
|
||||
if x == "bind" {
|
||||
isNullFS = true
|
||||
}
|
||||
}
|
||||
|
||||
options := []string{"fspath", target}
|
||||
if isNullFS {
|
||||
options = append(options, "fstype", "nullfs", "target", device)
|
||||
} else {
|
||||
options = append(options, "fstype", mType, "from", device)
|
||||
}
|
||||
rawOptions := allocateIOVecs(options)
|
||||
for _, rawOption := range rawOptions {
|
||||
defer C.free(rawOption.iov_base)
|
||||
}
|
||||
|
||||
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
|
||||
reason := C.GoString(C.strerror(*C.__error()))
|
||||
return fmt.Errorf("Failed to call nmount: %s", reason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
return syscall.Unmount(target, flag)
|
||||
}
|
21
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
Normal file
21
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we have a bind mount or remount, remount...
|
||||
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
|
||||
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
return syscall.Unmount(target, flag)
|
||||
}
|
11
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
Normal file
11
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
panic("Not implemented")
|
||||
}
|
40
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
Normal file
40
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package mount
|
||||
|
||||
// MountInfo reveals information about a particular mounted filesystem. This
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
type MountInfo struct {
|
||||
// Id is a unique identifier of the mount (may be reused after umount).
|
||||
Id int
|
||||
|
||||
// Parent indicates the ID of the mount parent (or of self for the top of the
|
||||
// mount tree).
|
||||
Parent int
|
||||
|
||||
// Major indicates one half of the device ID which identifies the device class.
|
||||
Major int
|
||||
|
||||
// Minor indicates one half of the device ID which identifies a specific
|
||||
// instance of device.
|
||||
Minor int
|
||||
|
||||
// Root of the mount within the filesystem.
|
||||
Root string
|
||||
|
||||
// Mountpoint indicates the mount point relative to the process's root.
|
||||
Mountpoint string
|
||||
|
||||
// Opts represents mount-specific options.
|
||||
Opts string
|
||||
|
||||
// Optional represents optional fields.
|
||||
Optional string
|
||||
|
||||
// Fstype indicates the type of filesystem, such as EXT3.
|
||||
Fstype string
|
||||
|
||||
// Source indicates filesystem specific information or "none".
|
||||
Source string
|
||||
|
||||
// VfsOpts represents per super block options.
|
||||
VfsOpts string
|
||||
}
|
41
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
Normal file
41
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/param.h>
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts.
|
||||
func parseMountTable() ([]*MountInfo, error) {
|
||||
var rawEntries *C.struct_statfs
|
||||
|
||||
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
|
||||
if count == 0 {
|
||||
return nil, fmt.Errorf("Failed to call getmntinfo")
|
||||
}
|
||||
|
||||
var entries []C.struct_statfs
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
|
||||
header.Cap = count
|
||||
header.Len = count
|
||||
header.Data = uintptr(unsafe.Pointer(rawEntries))
|
||||
|
||||
var out []*MountInfo
|
||||
for _, entry := range entries {
|
||||
var mountinfo MountInfo
|
||||
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
||||
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
||||
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
|
||||
out = append(out, &mountinfo)
|
||||
}
|
||||
return out, nil
|
||||
}
|
95
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
Normal file
95
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
||||
(3) major:minor: value of st_dev for files on filesystem
|
||||
(4) root: root of the mount within the filesystem
|
||||
(5) mount point: mount point relative to the process's root
|
||||
(6) mount options: per mount options
|
||||
(7) optional fields: zero or more fields of the form "tag[:value]"
|
||||
(8) separator: marks the end of the optional fields
|
||||
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
||||
(10) mount source: filesystem specific information or "none"
|
||||
(11) super options: per super block options*/
|
||||
mountinfoFormat = "%d %d %d:%d %s %s %s %s"
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts
|
||||
func parseMountTable() ([]*MountInfo, error) {
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
||||
|
||||
func parseInfoFile(r io.Reader) ([]*MountInfo, error) {
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []*MountInfo{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
p = &MountInfo{}
|
||||
text = s.Text()
|
||||
optionalFields string
|
||||
)
|
||||
|
||||
if _, err := fmt.Sscanf(text, mountinfoFormat,
|
||||
&p.Id, &p.Parent, &p.Major, &p.Minor,
|
||||
&p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
|
||||
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
|
||||
}
|
||||
// Safe as mountinfo encodes mountpoints with spaces as \040.
|
||||
index := strings.Index(text, " - ")
|
||||
postSeparatorFields := strings.Fields(text[index+3:])
|
||||
if len(postSeparatorFields) < 3 {
|
||||
return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
|
||||
}
|
||||
|
||||
if optionalFields != "-" {
|
||||
p.Optional = optionalFields
|
||||
}
|
||||
|
||||
p.Fstype = postSeparatorFields[0]
|
||||
p.Source = postSeparatorFields[1]
|
||||
p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
|
||||
out = append(out, p)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// PidMountInfo collects the mounts for a specific process ID. If the process
|
||||
// ID is unknown, it is better to use `GetMounts` which will inspect
|
||||
// "/proc/self/mountinfo" instead.
|
||||
func PidMountInfo(pid int) ([]*MountInfo, error) {
|
||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
477
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go
generated
vendored
Normal file
477
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go
generated
vendored
Normal file
|
@ -0,0 +1,477 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
|
||||
16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
|
||||
17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755
|
||||
18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
|
||||
19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw
|
||||
20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
|
||||
21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
|
||||
22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755
|
||||
23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755
|
||||
24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
|
||||
25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
|
||||
26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children
|
||||
27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children
|
||||
28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children
|
||||
29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children
|
||||
30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children
|
||||
31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children
|
||||
32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children
|
||||
33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children
|
||||
34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children
|
||||
35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered
|
||||
36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
|
||||
37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel
|
||||
38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel
|
||||
39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel
|
||||
40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw
|
||||
41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw
|
||||
42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw
|
||||
43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw
|
||||
45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered
|
||||
46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered
|
||||
47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered
|
||||
48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered
|
||||
121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
|
||||
124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw
|
||||
165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
|
||||
167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered
|
||||
171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered
|
||||
175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered
|
||||
179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered
|
||||
183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered
|
||||
187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered
|
||||
191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered
|
||||
195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered
|
||||
199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered
|
||||
203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered
|
||||
207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered
|
||||
211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered
|
||||
215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered
|
||||
219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered
|
||||
223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered
|
||||
227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered
|
||||
231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered
|
||||
235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered
|
||||
239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered
|
||||
243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered
|
||||
247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered
|
||||
31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1`
|
||||
|
||||
ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
|
||||
16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
|
||||
17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755
|
||||
18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
|
||||
19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755
|
||||
20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered
|
||||
21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755
|
||||
22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
|
||||
23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
|
||||
24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
|
||||
25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
|
||||
26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children
|
||||
27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
|
||||
28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu
|
||||
29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
|
||||
30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw
|
||||
31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct
|
||||
32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory
|
||||
33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices
|
||||
34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer
|
||||
35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio
|
||||
36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event
|
||||
37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb
|
||||
38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd
|
||||
39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525
|
||||
40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525
|
||||
41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525
|
||||
42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525
|
||||
43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525
|
||||
44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525
|
||||
45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525
|
||||
46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525
|
||||
47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525
|
||||
48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525
|
||||
49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525
|
||||
50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525
|
||||
51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525
|
||||
52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525
|
||||
53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525
|
||||
54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525
|
||||
55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525
|
||||
56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525
|
||||
57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525
|
||||
58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525
|
||||
59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525
|
||||
60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525
|
||||
61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525
|
||||
62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525
|
||||
63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525
|
||||
64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525
|
||||
65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525
|
||||
66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525
|
||||
67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525
|
||||
68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525
|
||||
69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525
|
||||
70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525
|
||||
71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525
|
||||
72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525
|
||||
73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525
|
||||
74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525
|
||||
75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525
|
||||
76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525
|
||||
77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525
|
||||
78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525
|
||||
79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525
|
||||
80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525
|
||||
81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525
|
||||
82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525
|
||||
83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525
|
||||
84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525
|
||||
85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525
|
||||
86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525
|
||||
87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525
|
||||
88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525
|
||||
89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525
|
||||
90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525
|
||||
91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525
|
||||
92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525
|
||||
93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525
|
||||
94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525
|
||||
95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525
|
||||
96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525
|
||||
97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525
|
||||
98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525
|
||||
99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525
|
||||
100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525
|
||||
101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525
|
||||
102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525
|
||||
103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525
|
||||
104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525
|
||||
105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525
|
||||
106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525
|
||||
107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525
|
||||
108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525
|
||||
109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525
|
||||
110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525
|
||||
111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525
|
||||
112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525
|
||||
113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525
|
||||
114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525
|
||||
115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525
|
||||
116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525
|
||||
117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525
|
||||
118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525
|
||||
119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525
|
||||
120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525
|
||||
121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525
|
||||
122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525
|
||||
123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525
|
||||
124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525
|
||||
125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525
|
||||
126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525
|
||||
127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525
|
||||
128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525
|
||||
129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525
|
||||
130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525
|
||||
131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525
|
||||
132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525
|
||||
133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525
|
||||
134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525
|
||||
135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525
|
||||
136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525
|
||||
137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525
|
||||
138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525
|
||||
139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525
|
||||
140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525
|
||||
141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525
|
||||
142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525
|
||||
143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525
|
||||
144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525`
|
||||
|
||||
gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
|
||||
17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755
|
||||
18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755
|
||||
19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
|
||||
20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
|
||||
21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw
|
||||
22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
|
||||
23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw
|
||||
24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755
|
||||
25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc
|
||||
26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children
|
||||
27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children
|
||||
28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children
|
||||
29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children
|
||||
30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children
|
||||
31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children
|
||||
32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children
|
||||
33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro
|
||||
34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota
|
||||
35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw
|
||||
36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw
|
||||
42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw
|
||||
43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw
|
||||
44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000
|
||||
68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c
|
||||
85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c
|
||||
39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c
|
||||
40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c
|
||||
41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c
|
||||
45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c
|
||||
46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c
|
||||
47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c
|
||||
48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c
|
||||
49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c
|
||||
50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c
|
||||
51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c
|
||||
52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c
|
||||
53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c
|
||||
54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c
|
||||
55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c
|
||||
56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c
|
||||
57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c
|
||||
59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c
|
||||
60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c
|
||||
61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c
|
||||
62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c
|
||||
63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c
|
||||
64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c
|
||||
65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c
|
||||
66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c
|
||||
70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c
|
||||
71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c
|
||||
72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c
|
||||
73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c
|
||||
76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c
|
||||
77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c
|
||||
78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c
|
||||
79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c
|
||||
80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c
|
||||
81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c
|
||||
82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c
|
||||
83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c
|
||||
84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c
|
||||
94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c
|
||||
95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c
|
||||
96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c
|
||||
97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c
|
||||
98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c
|
||||
102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c
|
||||
103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c
|
||||
104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c
|
||||
105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c
|
||||
106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c
|
||||
107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c
|
||||
108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c
|
||||
109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c
|
||||
110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c
|
||||
111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c
|
||||
112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c
|
||||
113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c
|
||||
114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c
|
||||
117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c
|
||||
118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c
|
||||
119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c
|
||||
120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c
|
||||
121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c
|
||||
122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c
|
||||
123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c
|
||||
126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c
|
||||
127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c
|
||||
128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c
|
||||
130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c
|
||||
131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c
|
||||
132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c
|
||||
133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c
|
||||
134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c
|
||||
135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c
|
||||
136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c
|
||||
137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c
|
||||
138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c
|
||||
139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c
|
||||
140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c
|
||||
141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c
|
||||
142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c
|
||||
143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c
|
||||
144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c
|
||||
147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c
|
||||
150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c
|
||||
151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c
|
||||
152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c
|
||||
153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c
|
||||
154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c
|
||||
155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c
|
||||
156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c
|
||||
157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c
|
||||
158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c
|
||||
159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c
|
||||
160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c
|
||||
162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c
|
||||
163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c
|
||||
164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c
|
||||
165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c
|
||||
166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c
|
||||
167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c
|
||||
168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c
|
||||
169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c
|
||||
170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c
|
||||
171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c
|
||||
172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c
|
||||
173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c
|
||||
174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c
|
||||
184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c
|
||||
187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c
|
||||
188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c
|
||||
189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c
|
||||
190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c
|
||||
191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c
|
||||
192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c
|
||||
193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c
|
||||
194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c
|
||||
195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c
|
||||
196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c
|
||||
197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c
|
||||
198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c
|
||||
199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c
|
||||
200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c
|
||||
201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c
|
||||
202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c
|
||||
203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c
|
||||
204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c
|
||||
205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c
|
||||
206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c
|
||||
207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c
|
||||
208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c
|
||||
209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c
|
||||
210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c
|
||||
211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c
|
||||
212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c
|
||||
213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c
|
||||
214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c
|
||||
215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c
|
||||
216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c
|
||||
217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c
|
||||
218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c
|
||||
219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c
|
||||
220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c
|
||||
221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c
|
||||
222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c
|
||||
223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c
|
||||
224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c
|
||||
225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c
|
||||
226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c
|
||||
227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c
|
||||
228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c
|
||||
229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c
|
||||
230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c
|
||||
231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c
|
||||
232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c
|
||||
233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c
|
||||
234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c
|
||||
235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c
|
||||
237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c
|
||||
238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c
|
||||
239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c
|
||||
240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c
|
||||
241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c
|
||||
242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c
|
||||
243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c
|
||||
244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c
|
||||
245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c
|
||||
246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c
|
||||
247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c
|
||||
249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c
|
||||
250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c
|
||||
251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c
|
||||
252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c
|
||||
253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c
|
||||
254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c
|
||||
255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c
|
||||
256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c
|
||||
257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c
|
||||
259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c
|
||||
260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c
|
||||
261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c
|
||||
262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c
|
||||
263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c
|
||||
264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c
|
||||
58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c
|
||||
67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c
|
||||
265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c
|
||||
270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c
|
||||
273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c
|
||||
278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c
|
||||
281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c
|
||||
286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c
|
||||
289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c
|
||||
99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096`
|
||||
)
|
||||
|
||||
func TestParseFedoraMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(fedoraMountinfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseUbuntuMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(ubuntuMountInfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGentooMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(gentooMountinfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFedoraMountinfoFields(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(fedoraMountinfo))
|
||||
infos, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedLength := 58
|
||||
if len(infos) != expectedLength {
|
||||
t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos))
|
||||
}
|
||||
mi := MountInfo{
|
||||
Id: 15,
|
||||
Parent: 35,
|
||||
Major: 0,
|
||||
Minor: 3,
|
||||
Root: "/",
|
||||
Mountpoint: "/proc",
|
||||
Opts: "rw,nosuid,nodev,noexec,relatime",
|
||||
Optional: "shared:5",
|
||||
Fstype: "proc",
|
||||
Source: "proc",
|
||||
VfsOpts: "rw",
|
||||
}
|
||||
|
||||
if *infos[0] != mi {
|
||||
t.Fatalf("expected %#v, got %#v", mi, infos[0])
|
||||
}
|
||||
}
|
12
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
Normal file
12
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func parseMountTable() ([]*MountInfo, error) {
|
||||
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
70
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
generated
vendored
Normal file
70
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeShared(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "shared")
|
||||
}
|
||||
|
||||
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeRShared(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rshared")
|
||||
}
|
||||
|
||||
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakePrivate(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "private")
|
||||
}
|
||||
|
||||
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
|
||||
// enabled. See the supported options in flags.go for further reference.
|
||||
func MakeRPrivate(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rprivate")
|
||||
}
|
||||
|
||||
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeSlave(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "slave")
|
||||
}
|
||||
|
||||
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeRSlave(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rslave")
|
||||
}
|
||||
|
||||
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
|
||||
// enabled. See the supported options in flags.go for further reference.
|
||||
func MakeUnbindable(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "unbindable")
|
||||
}
|
||||
|
||||
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
|
||||
// option enabled. See the supported options in flags.go for further reference.
|
||||
func MakeRUnbindable(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "runbindable")
|
||||
}
|
||||
|
||||
func ensureMountedAs(mountPoint, options string) error {
|
||||
mounted, err := Mounted(mountPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !mounted {
|
||||
if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
mounted, err = Mounted(mountPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ForceMount("", mountPoint, "none", options)
|
||||
}
|
331
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go
generated
vendored
Normal file
331
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go
generated
vendored
Normal file
|
@ -0,0 +1,331 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// nothing is propagated in or out
|
||||
func TestSubtreePrivate(t *testing.T) {
|
||||
tmp := path.Join(os.TempDir(), "mount-tests")
|
||||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var (
|
||||
sourceDir = path.Join(tmp, "source")
|
||||
targetDir = path.Join(tmp, "target")
|
||||
outside1Dir = path.Join(tmp, "outside1")
|
||||
outside2Dir = path.Join(tmp, "outside2")
|
||||
|
||||
outside1Path = path.Join(outside1Dir, "file.txt")
|
||||
outside2Path = path.Join(outside2Dir, "file.txt")
|
||||
outside1CheckPath = path.Join(targetDir, "a", "file.txt")
|
||||
outside2CheckPath = path.Join(sourceDir, "b", "file.txt")
|
||||
)
|
||||
if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Mkdir(targetDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Mkdir(outside1Dir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Mkdir(outside2Dir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := createFile(outside1Path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := createFile(outside2Path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// mount the shared directory to a target
|
||||
if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// next, make the target private
|
||||
if err := MakePrivate(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// mount in an outside path to a mounted path inside the _source_
|
||||
if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(path.Join(sourceDir, "a")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// check that this file _does_not_ show in the _target_
|
||||
if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
} else if err == nil {
|
||||
t.Fatalf("%q should not be visible, but is", outside1CheckPath)
|
||||
}
|
||||
|
||||
// next mount outside2Dir into the _target_
|
||||
if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(path.Join(targetDir, "b")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// check that this file _does_not_ show in the _source_
|
||||
if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
} else if err == nil {
|
||||
t.Fatalf("%q should not be visible, but is", outside2CheckPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Testing that when a target is a shared mount,
|
||||
// then child mounts propogate to the source
|
||||
func TestSubtreeShared(t *testing.T) {
|
||||
tmp := path.Join(os.TempDir(), "mount-tests")
|
||||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var (
|
||||
sourceDir = path.Join(tmp, "source")
|
||||
targetDir = path.Join(tmp, "target")
|
||||
outsideDir = path.Join(tmp, "outside")
|
||||
|
||||
outsidePath = path.Join(outsideDir, "file.txt")
|
||||
sourceCheckPath = path.Join(sourceDir, "a", "file.txt")
|
||||
)
|
||||
|
||||
if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Mkdir(targetDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Mkdir(outsideDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := createFile(outsidePath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// mount the source as shared
|
||||
if err := MakeShared(sourceDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(sourceDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// mount the shared directory to a target
|
||||
if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// mount in an outside path to a mounted path inside the target
|
||||
if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(path.Join(targetDir, "a")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// NOW, check that the file from the outside directory is avaible in the source directory
|
||||
if _, err := os.Stat(sourceCheckPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// testing that mounts to a shared source show up in the slave target,
|
||||
// and that mounts into a slave target do _not_ show up in the shared source
|
||||
func TestSubtreeSharedSlave(t *testing.T) {
|
||||
tmp := path.Join(os.TempDir(), "mount-tests")
|
||||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var (
|
||||
sourceDir = path.Join(tmp, "source")
|
||||
targetDir = path.Join(tmp, "target")
|
||||
outside1Dir = path.Join(tmp, "outside1")
|
||||
outside2Dir = path.Join(tmp, "outside2")
|
||||
|
||||
outside1Path = path.Join(outside1Dir, "file.txt")
|
||||
outside2Path = path.Join(outside2Dir, "file.txt")
|
||||
outside1CheckPath = path.Join(targetDir, "a", "file.txt")
|
||||
outside2CheckPath = path.Join(sourceDir, "b", "file.txt")
|
||||
)
|
||||
if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Mkdir(targetDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Mkdir(outside1Dir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Mkdir(outside2Dir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := createFile(outside1Path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := createFile(outside2Path); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// mount the source as shared
|
||||
if err := MakeShared(sourceDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(sourceDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// mount the shared directory to a target
|
||||
if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// next, make the target slave
|
||||
if err := MakeSlave(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// mount in an outside path to a mounted path inside the _source_
|
||||
if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(path.Join(sourceDir, "a")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// check that this file _does_ show in the _target_
|
||||
if _, err := os.Stat(outside1CheckPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// next mount outside2Dir into the _target_
|
||||
if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(path.Join(targetDir, "b")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// check that this file _does_not_ show in the _source_
|
||||
if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
} else if err == nil {
|
||||
t.Fatalf("%q should not be visible, but is", outside2CheckPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubtreeUnbindable(t *testing.T) {
|
||||
tmp := path.Join(os.TempDir(), "mount-tests")
|
||||
if err := os.MkdirAll(tmp, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
var (
|
||||
sourceDir = path.Join(tmp, "source")
|
||||
targetDir = path.Join(tmp, "target")
|
||||
)
|
||||
if err := os.MkdirAll(sourceDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(targetDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// next, make the source unbindable
|
||||
if err := MakeUnbindable(sourceDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(sourceDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// then attempt to mount it to target. It should fail
|
||||
if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL {
|
||||
t.Fatal(err)
|
||||
} else if err == nil {
|
||||
t.Fatalf("%q should not have been bindable", sourceDir)
|
||||
}
|
||||
defer func() {
|
||||
if err := Unmount(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func createFile(path string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.WriteString("hello world!")
|
||||
return f.Close()
|
||||
}
|
191
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
generated
vendored
Normal file
191
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014-2015 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
27
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD
generated
vendored
Normal file
27
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
5
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md
generated
vendored
Normal file
5
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks
|
||||
from the [Go standard library](https://golang.org/pkg/path/filepath).
|
||||
|
||||
The code from filepath.EvalSymlinks has been adapted in fs.go.
|
||||
Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.
|
131
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go
generated
vendored
Normal file
131
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.BSD file.
|
||||
|
||||
// This code is a modified version of path/filepath/symlink.go from the Go standard library.
|
||||
|
||||
package symlink
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path
|
||||
func FollowSymlinkInScope(path, root string) (string, error) {
|
||||
path, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
root, err = filepath.Abs(root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return evalSymlinksInScope(path, root)
|
||||
}
|
||||
|
||||
// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
|
||||
// a result guaranteed to be contained within the scope `root`, at the time of the call.
|
||||
// Symlinks in `root` are not evaluated and left as-is.
|
||||
// Errors encountered while attempting to evaluate symlinks in path will be returned.
|
||||
// Non-existing paths are valid and do not constitute an error.
|
||||
// `path` has to contain `root` as a prefix, or else an error will be returned.
|
||||
// Trying to break out from `root` does not constitute an error.
|
||||
//
|
||||
// Example:
|
||||
// If /foo/bar -> /outside,
|
||||
// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
|
||||
//
|
||||
// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
|
||||
// are created and not to create subsequently, additional symlinks that could potentially make a
|
||||
// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
|
||||
// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
|
||||
// no longer be considered safely contained in "/foo".
|
||||
func evalSymlinksInScope(path, root string) (string, error) {
|
||||
root = filepath.Clean(root)
|
||||
if path == root {
|
||||
return path, nil
|
||||
}
|
||||
if !strings.HasPrefix(path, root) {
|
||||
return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
|
||||
}
|
||||
const maxIter = 255
|
||||
originalPath := path
|
||||
// given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
|
||||
path = path[len(root):]
|
||||
if root == string(filepath.Separator) {
|
||||
path = string(filepath.Separator) + path
|
||||
}
|
||||
if !strings.HasPrefix(path, string(filepath.Separator)) {
|
||||
return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
|
||||
}
|
||||
path = filepath.Clean(path)
|
||||
// consume path by taking each frontmost path element,
|
||||
// expanding it if it's a symlink, and appending it to b
|
||||
var b bytes.Buffer
|
||||
// b here will always be considered to be the "current absolute path inside
|
||||
// root" when we append paths to it, we also append a slash and use
|
||||
// filepath.Clean after the loop to trim the trailing slash
|
||||
for n := 0; path != ""; n++ {
|
||||
if n > maxIter {
|
||||
return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
|
||||
}
|
||||
|
||||
// find next path component, p
|
||||
i := strings.IndexRune(path, filepath.Separator)
|
||||
var p string
|
||||
if i == -1 {
|
||||
p, path = path, ""
|
||||
} else {
|
||||
p, path = path[:i], path[i+1:]
|
||||
}
|
||||
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// this takes a b.String() like "b/../" and a p like "c" and turns it
|
||||
// into "/b/../c" which then gets filepath.Cleaned into "/c" and then
|
||||
// root gets prepended and we Clean again (to remove any trailing slash
|
||||
// if the first Clean gave us just "/")
|
||||
cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
|
||||
if cleanP == string(filepath.Separator) {
|
||||
// never Lstat "/" itself
|
||||
b.Reset()
|
||||
continue
|
||||
}
|
||||
fullP := filepath.Clean(root + cleanP)
|
||||
|
||||
fi, err := os.Lstat(fullP)
|
||||
if os.IsNotExist(err) {
|
||||
// if p does not exist, accept it
|
||||
b.WriteString(p)
|
||||
b.WriteRune(filepath.Separator)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink == 0 {
|
||||
b.WriteString(p + string(filepath.Separator))
|
||||
continue
|
||||
}
|
||||
|
||||
// it's a symlink, put it at the front of path
|
||||
dest, err := os.Readlink(fullP)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if filepath.IsAbs(dest) {
|
||||
b.Reset()
|
||||
}
|
||||
path = dest + string(filepath.Separator) + path
|
||||
}
|
||||
|
||||
// see note above on "fullP := ..." for why this is double-cleaned and
|
||||
// what's happening here
|
||||
return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
|
||||
}
|
402
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go
generated
vendored
Normal file
402
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_test.go
generated
vendored
Normal file
|
@ -0,0 +1,402 @@
|
|||
// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE
|
||||
|
||||
package symlink
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type dirOrLink struct {
|
||||
path string
|
||||
target string
|
||||
}
|
||||
|
||||
func makeFs(tmpdir string, fs []dirOrLink) error {
|
||||
for _, s := range fs {
|
||||
s.path = filepath.Join(tmpdir, s.path)
|
||||
if s.target == "" {
|
||||
os.MkdirAll(s.path, 0755)
|
||||
continue
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testSymlink(tmpdir, path, expected, scope string) error {
|
||||
rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expected, err = filepath.Abs(filepath.Join(tmpdir, expected))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if expected != rewrite {
|
||||
return fmt.Errorf("Expected %q got %q", expected, rewrite)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestFollowSymlinkAbsolute(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkRelativePath(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
if err := makeFs(tmpdir, []dirOrLink{
|
||||
{path: "linkdir", target: "realdir"},
|
||||
{path: "linkdir/foo/bar"},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkInvalidScopePathPair(t *testing.T) {
|
||||
if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkLastLink(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// avoid letting allowing symlink e lead us to ../b
|
||||
// normalize to the "testdata/fs/a"
|
||||
if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// avoid letting symlink f lead us out of the "testdata" scope
|
||||
// we don't normalize because symlink f is in scope and there is no
|
||||
// information leak
|
||||
if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// avoid letting symlink f lead us out of the "testdata/fs" scope
|
||||
// we don't normalize because symlink f is in scope and there is no
|
||||
// information leak
|
||||
if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkRelativeLinkChain(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
// avoid letting symlink g (pointed at by symlink h) take out of scope
|
||||
// TODO: we should probably normalize to scope here because ../[....]/root
|
||||
// is out of scope and we leak information
|
||||
if err := makeFs(tmpdir, []dirOrLink{
|
||||
{path: "testdata/fs/b/h", target: "../g"},
|
||||
{path: "testdata/fs/g", target: "../../../../../../../../../../../../root"},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkBreakoutPath(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
// avoid letting symlink -> ../directory/file escape from scope
|
||||
// normalize to "testdata/fs/j"
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkToRoot(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
// make sure we don't allow escaping to /
|
||||
// normalize to dir
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkSlashDotdot(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir = filepath.Join(tmpdir, "dir", "subdir")
|
||||
|
||||
// make sure we don't allow escaping to /
|
||||
// normalize to dir
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkDotdot(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpdir = filepath.Join(tmpdir, "dir", "subdir")
|
||||
|
||||
// make sure we stay in scope without leaking information
|
||||
// this also checks for escaping to /
|
||||
// normalize to dir
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkRelativePath2(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkScopeLink(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := makeFs(tmpdir, []dirOrLink{
|
||||
{path: "root2"},
|
||||
{path: "root", target: "root2"},
|
||||
{path: "root2/foo", target: "../bar"},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkRootScope(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
expected, err := filepath.EvalSymlinks(tmpdir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rewrite, err := FollowSymlinkInScope(tmpdir, "/")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if rewrite != expected {
|
||||
t.Fatalf("expected %q got %q", expected, rewrite)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkEmpty(t *testing.T) {
|
||||
res, err := FollowSymlinkInScope("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res != wd {
|
||||
t.Fatalf("expected %q got %q", wd, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkCircular(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
|
||||
t.Fatal("expected an error for foo -> foo")
|
||||
}
|
||||
|
||||
if err := makeFs(tmpdir, []dirOrLink{
|
||||
{path: "root/bar", target: "baz"},
|
||||
{path: "root/baz", target: "../bak"},
|
||||
{path: "root/bak", target: "/bar"},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
|
||||
t.Fatal("expected an error for bar -> baz -> bak -> bar")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := makeFs(tmpdir, []dirOrLink{
|
||||
{path: "root2"},
|
||||
{path: "root", target: "root2"},
|
||||
{path: "root/a", target: "r/s"},
|
||||
{path: "root/r", target: "../root/t"},
|
||||
{path: "root/root/t/s/b", target: "/../u"},
|
||||
{path: "root/u/c", target: "."},
|
||||
{path: "root/u/x/y", target: "../v"},
|
||||
{path: "root/u/v", target: "/../w"},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkBreakoutNonExistent(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := makeFs(tmpdir, []dirOrLink{
|
||||
{path: "root/slash", target: "/"},
|
||||
{path: "root/sym", target: "/idontexist/../slash"},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowSymlinkNoLexicalCleaning(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := makeFs(tmpdir, []dirOrLink{
|
||||
{path: "root/sym", target: "/foo/bar"},
|
||||
{path: "root/hello", target: "/sym/../baz"},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
31
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
generated
vendored
Normal file
31
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HumanDuration returns a human-readable approximation of a duration
|
||||
// (eg. "About a minute", "4 hours ago", etc.)
|
||||
func HumanDuration(d time.Duration) string {
|
||||
if seconds := int(d.Seconds()); seconds < 1 {
|
||||
return "Less than a second"
|
||||
} else if seconds < 60 {
|
||||
return fmt.Sprintf("%d seconds", seconds)
|
||||
} else if minutes := int(d.Minutes()); minutes == 1 {
|
||||
return "About a minute"
|
||||
} else if minutes < 60 {
|
||||
return fmt.Sprintf("%d minutes", minutes)
|
||||
} else if hours := int(d.Hours()); hours == 1 {
|
||||
return "About an hour"
|
||||
} else if hours < 48 {
|
||||
return fmt.Sprintf("%d hours", hours)
|
||||
} else if hours < 24*7*2 {
|
||||
return fmt.Sprintf("%d days", hours/24)
|
||||
} else if hours < 24*30*3 {
|
||||
return fmt.Sprintf("%d weeks", hours/24/7)
|
||||
} else if hours < 24*365*2 {
|
||||
return fmt.Sprintf("%d months", hours/24/30)
|
||||
}
|
||||
return fmt.Sprintf("%d years", int(d.Hours())/24/365)
|
||||
}
|
46
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
generated
vendored
Normal file
46
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package units
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestHumanDuration(t *testing.T) {
|
||||
// Useful duration abstractions
|
||||
day := 24 * time.Hour
|
||||
week := 7 * day
|
||||
month := 30 * day
|
||||
year := 365 * day
|
||||
|
||||
assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond))
|
||||
assertEquals(t, "47 seconds", HumanDuration(47*time.Second))
|
||||
assertEquals(t, "About a minute", HumanDuration(1*time.Minute))
|
||||
assertEquals(t, "3 minutes", HumanDuration(3*time.Minute))
|
||||
assertEquals(t, "35 minutes", HumanDuration(35*time.Minute))
|
||||
assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second))
|
||||
assertEquals(t, "About an hour", HumanDuration(1*time.Hour))
|
||||
assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute))
|
||||
assertEquals(t, "3 hours", HumanDuration(3*time.Hour))
|
||||
assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute))
|
||||
assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute))
|
||||
assertEquals(t, "24 hours", HumanDuration(24*time.Hour))
|
||||
assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour))
|
||||
assertEquals(t, "2 days", HumanDuration(2*day))
|
||||
assertEquals(t, "7 days", HumanDuration(7*day))
|
||||
assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour))
|
||||
assertEquals(t, "2 weeks", HumanDuration(2*week))
|
||||
assertEquals(t, "2 weeks", HumanDuration(2*week+4*day))
|
||||
assertEquals(t, "3 weeks", HumanDuration(3*week))
|
||||
assertEquals(t, "4 weeks", HumanDuration(4*week))
|
||||
assertEquals(t, "4 weeks", HumanDuration(4*week+3*day))
|
||||
assertEquals(t, "4 weeks", HumanDuration(1*month))
|
||||
assertEquals(t, "6 weeks", HumanDuration(1*month+2*week))
|
||||
assertEquals(t, "8 weeks", HumanDuration(2*month))
|
||||
assertEquals(t, "3 months", HumanDuration(3*month+1*week))
|
||||
assertEquals(t, "5 months", HumanDuration(5*month+2*week))
|
||||
assertEquals(t, "13 months", HumanDuration(13*month))
|
||||
assertEquals(t, "23 months", HumanDuration(23*month))
|
||||
assertEquals(t, "24 months", HumanDuration(24*month))
|
||||
assertEquals(t, "2 years", HumanDuration(24*month+2*week))
|
||||
assertEquals(t, "3 years", HumanDuration(3*year+2*month))
|
||||
}
|
93
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
generated
vendored
Normal file
93
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// See: http://en.wikipedia.org/wiki/Binary_prefix
|
||||
const (
|
||||
// Decimal
|
||||
|
||||
KB = 1000
|
||||
MB = 1000 * KB
|
||||
GB = 1000 * MB
|
||||
TB = 1000 * GB
|
||||
PB = 1000 * TB
|
||||
|
||||
// Binary
|
||||
|
||||
KiB = 1024
|
||||
MiB = 1024 * KiB
|
||||
GiB = 1024 * MiB
|
||||
TiB = 1024 * GiB
|
||||
PiB = 1024 * TiB
|
||||
)
|
||||
|
||||
type unitMap map[string]int64
|
||||
|
||||
var (
|
||||
decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
|
||||
binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
|
||||
sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
|
||||
)
|
||||
|
||||
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
|
||||
// CustomSize returns a human-readable approximation of a size
|
||||
// using custom format
|
||||
func CustomSize(format string, size float64, base float64, _map []string) string {
|
||||
i := 0
|
||||
for size >= base {
|
||||
size = size / base
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf(format, size, _map[i])
|
||||
}
|
||||
|
||||
// HumanSize returns a human-readable approximation of a size
|
||||
// using SI standard (eg. "44kB", "17MB")
|
||||
func HumanSize(size float64) string {
|
||||
return CustomSize("%.4g %s", float64(size), 1000.0, decimapAbbrs)
|
||||
}
|
||||
|
||||
func BytesSize(size float64) string {
|
||||
return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
|
||||
}
|
||||
|
||||
// FromHumanSize returns an integer from a human-readable specification of a
|
||||
// size using SI standard (eg. "44kB", "17MB")
|
||||
func FromHumanSize(size string) (int64, error) {
|
||||
return parseSize(size, decimalMap)
|
||||
}
|
||||
|
||||
// RAMInBytes parses a human-readable string representing an amount of RAM
|
||||
// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
|
||||
// returns the number of bytes, or -1 if the string is unparseable.
|
||||
// Units are case-insensitive, and the 'b' suffix is optional.
|
||||
func RAMInBytes(size string) (int64, error) {
|
||||
return parseSize(size, binaryMap)
|
||||
}
|
||||
|
||||
// Parses the human-readable size string into the amount it represents
|
||||
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
|
||||
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
||||
if len(matches) != 3 {
|
||||
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(matches[1], 10, 0)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
unitPrefix := strings.ToLower(matches[2])
|
||||
if mul, ok := uMap[unitPrefix]; ok {
|
||||
size *= mul
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
108
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
generated
vendored
Normal file
108
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
package units
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBytesSize(t *testing.T) {
|
||||
assertEquals(t, "1 KiB", BytesSize(1024))
|
||||
assertEquals(t, "1 MiB", BytesSize(1024*1024))
|
||||
assertEquals(t, "1 MiB", BytesSize(1048576))
|
||||
assertEquals(t, "2 MiB", BytesSize(2*MiB))
|
||||
assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB))
|
||||
assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB))
|
||||
assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB))
|
||||
}
|
||||
|
||||
func TestHumanSize(t *testing.T) {
|
||||
assertEquals(t, "1 kB", HumanSize(1000))
|
||||
assertEquals(t, "1.024 kB", HumanSize(1024))
|
||||
assertEquals(t, "1 MB", HumanSize(1000000))
|
||||
assertEquals(t, "1.049 MB", HumanSize(1048576))
|
||||
assertEquals(t, "2 MB", HumanSize(2*MB))
|
||||
assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB)))
|
||||
assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB)))
|
||||
assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB)))
|
||||
}
|
||||
|
||||
func TestFromHumanSize(t *testing.T) {
|
||||
assertSuccessEquals(t, 32, FromHumanSize, "32")
|
||||
assertSuccessEquals(t, 32, FromHumanSize, "32b")
|
||||
assertSuccessEquals(t, 32, FromHumanSize, "32B")
|
||||
assertSuccessEquals(t, 32*KB, FromHumanSize, "32k")
|
||||
assertSuccessEquals(t, 32*KB, FromHumanSize, "32K")
|
||||
assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb")
|
||||
assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb")
|
||||
assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb")
|
||||
assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb")
|
||||
assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb")
|
||||
assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb")
|
||||
|
||||
assertError(t, FromHumanSize, "")
|
||||
assertError(t, FromHumanSize, "hello")
|
||||
assertError(t, FromHumanSize, "-32")
|
||||
assertError(t, FromHumanSize, "32.3")
|
||||
assertError(t, FromHumanSize, " 32 ")
|
||||
assertError(t, FromHumanSize, "32.3Kb")
|
||||
assertError(t, FromHumanSize, "32 mb")
|
||||
assertError(t, FromHumanSize, "32m b")
|
||||
assertError(t, FromHumanSize, "32bm")
|
||||
}
|
||||
|
||||
func TestRAMInBytes(t *testing.T) {
|
||||
assertSuccessEquals(t, 32, RAMInBytes, "32")
|
||||
assertSuccessEquals(t, 32, RAMInBytes, "32b")
|
||||
assertSuccessEquals(t, 32, RAMInBytes, "32B")
|
||||
assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k")
|
||||
assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K")
|
||||
assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb")
|
||||
assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb")
|
||||
assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb")
|
||||
assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb")
|
||||
assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb")
|
||||
assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb")
|
||||
assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB")
|
||||
assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P")
|
||||
|
||||
assertError(t, RAMInBytes, "")
|
||||
assertError(t, RAMInBytes, "hello")
|
||||
assertError(t, RAMInBytes, "-32")
|
||||
assertError(t, RAMInBytes, "32.3")
|
||||
assertError(t, RAMInBytes, " 32 ")
|
||||
assertError(t, RAMInBytes, "32.3Kb")
|
||||
assertError(t, RAMInBytes, "32 mb")
|
||||
assertError(t, RAMInBytes, "32m b")
|
||||
assertError(t, RAMInBytes, "32bm")
|
||||
}
|
||||
|
||||
func assertEquals(t *testing.T, expected, actual interface{}) {
|
||||
if expected != actual {
|
||||
t.Errorf("Expected '%v' but got '%v'", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
// func that maps to the parse function signatures as testing abstraction
|
||||
type parseFn func(string) (int64, error)
|
||||
|
||||
// Define 'String()' for pretty-print
|
||||
func (fn parseFn) String() string {
|
||||
fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
|
||||
return fnName[strings.LastIndex(fnName, ".")+1:]
|
||||
}
|
||||
|
||||
func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) {
|
||||
res, err := fn(arg)
|
||||
if err != nil || res != expected {
|
||||
t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err)
|
||||
}
|
||||
}
|
||||
|
||||
func assertError(t *testing.T, fn parseFn, arg string) {
|
||||
res, err := fn(arg)
|
||||
if err == nil && res != -1 {
|
||||
t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res)
|
||||
}
|
||||
}
|
43
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
generated
vendored
Normal file
43
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
make -C proto3_proto
|
||||
make
|
2060
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
generated
vendored
Normal file
2060
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
197
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
generated
vendored
Normal file
197
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: MessageSet and RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(pb Message) Message {
|
||||
in := reflect.ValueOf(pb)
|
||||
if in.IsNil() {
|
||||
return pb
|
||||
}
|
||||
|
||||
out := reflect.New(in.Type().Elem())
|
||||
// out is empty so a merge is a deep copy.
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
return out.Interface().(Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
||||
panic("proto: type mismatch")
|
||||
}
|
||||
if in.IsNil() {
|
||||
// Merging nil into non-nil is a quiet no-op
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i))
|
||||
}
|
||||
|
||||
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
|
||||
emOut := out.Addr().Interface().(extendableProto)
|
||||
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
func mergeAny(out, in reflect.Value) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(in)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key))
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem())
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i))
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value))
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
227
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
generated
vendored
Normal file
227
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
generated
vendored
Normal file
|
@ -0,0 +1,227 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
pb "./testdata"
|
||||
)
|
||||
|
||||
var cloneTestMessage = &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
}
|
||||
|
||||
func init() {
|
||||
ext := &pb.Ext{
|
||||
Data: proto.String("extension"),
|
||||
}
|
||||
if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
|
||||
panic("SetExtension: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
|
||||
if !proto.Equal(m, cloneTestMessage) {
|
||||
t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
|
||||
}
|
||||
|
||||
// Verify it was a deep copy.
|
||||
*m.Inner.Port++
|
||||
if proto.Equal(m, cloneTestMessage) {
|
||||
t.Error("Mutating clone changed the original")
|
||||
}
|
||||
// Byte fields and repeated fields should be copied.
|
||||
if &m.Pet[0] == &cloneTestMessage.Pet[0] {
|
||||
t.Error("Pet: repeated field not copied")
|
||||
}
|
||||
if &m.Others[0] == &cloneTestMessage.Others[0] {
|
||||
t.Error("Others: repeated field not copied")
|
||||
}
|
||||
if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
|
||||
t.Error("Others[0].Value: bytes field not copied")
|
||||
}
|
||||
if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
|
||||
t.Error("RepBytes: repeated field not copied")
|
||||
}
|
||||
if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
|
||||
t.Error("RepBytes[0]: bytes field not copied")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloneNil(t *testing.T) {
|
||||
var m *pb.MyMessage
|
||||
if c := proto.Clone(m); !proto.Equal(m, c) {
|
||||
t.Errorf("Clone(%v) = %v", m, c)
|
||||
}
|
||||
}
|
||||
|
||||
var mergeTests = []struct {
|
||||
src, dst, want proto.Message
|
||||
}{
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Name: proto.String("Dave"),
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
},
|
||||
},
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Pet: []string{"horsey"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
},
|
||||
Pet: []string{"bunny", "kitty"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(31415926535),
|
||||
},
|
||||
{
|
||||
// Explicitly test a src=nil field
|
||||
Inner: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
Port: proto.Int32(9099),
|
||||
},
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(31415926535),
|
||||
},
|
||||
{},
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
RepBytes: [][]byte{[]byte("wow")},
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham")},
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
},
|
||||
},
|
||||
// Check that a scalar bytes field replaces rather than appends.
|
||||
{
|
||||
src: &pb.OtherMessage{Value: []byte("foo")},
|
||||
dst: &pb.OtherMessage{Value: []byte("bar")},
|
||||
want: &pb.OtherMessage{Value: []byte("foo")},
|
||||
},
|
||||
{
|
||||
src: &pb.MessageWithMap{
|
||||
NameMapping: map[int32]string{6: "Nigel"},
|
||||
MsgMapping: map[int64]*pb.FloatingPoint{
|
||||
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
|
||||
},
|
||||
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
|
||||
},
|
||||
dst: &pb.MessageWithMap{
|
||||
NameMapping: map[int32]string{
|
||||
6: "Bruce", // should be overwritten
|
||||
7: "Andrew",
|
||||
},
|
||||
},
|
||||
want: &pb.MessageWithMap{
|
||||
NameMapping: map[int32]string{
|
||||
6: "Nigel",
|
||||
7: "Andrew",
|
||||
},
|
||||
MsgMapping: map[int64]*pb.FloatingPoint{
|
||||
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
|
||||
},
|
||||
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
for _, m := range mergeTests {
|
||||
got := proto.Clone(m.dst)
|
||||
proto.Merge(got, m.src)
|
||||
if !proto.Equal(got, m.want) {
|
||||
t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
|
||||
}
|
||||
}
|
||||
}
|
821
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
generated
vendored
Normal file
821
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,821 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// The fundamental decoders that interpret bytes on the wire.
|
||||
// Those that take integer types all return uint64 and are
|
||||
// therefore of type valueDecoder.
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
// x, n already 0
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// These are not ValueDecoders: they produce an array of bytes or a string.
|
||||
// bytes, embedded messages
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
||||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
||||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
|
||||
oi := o.index
|
||||
|
||||
err := o.skip(t, tag, wire)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !unrecField.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ptr := structPointer_Bytes(base, unrecField)
|
||||
|
||||
// Add the skipped field to struct field
|
||||
obuf := o.buf
|
||||
|
||||
o.buf = *ptr
|
||||
o.EncodeVarint(uint64(tag<<3 | wire))
|
||||
*ptr = append(o.buf, obuf[oi:o.index]...)
|
||||
|
||||
o.buf = obuf
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
|
||||
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
switch wire {
|
||||
case WireVarint:
|
||||
_, err = o.DecodeVarint()
|
||||
case WireFixed64:
|
||||
_, err = o.DecodeFixed64()
|
||||
case WireBytes:
|
||||
_, err = o.DecodeRawBytes(false)
|
||||
case WireFixed32:
|
||||
_, err = o.DecodeFixed32()
|
||||
case WireStartGroup:
|
||||
for {
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fwire := int(u & 0x7)
|
||||
if fwire == WireEndGroup {
|
||||
break
|
||||
}
|
||||
ftag := int(u >> 3)
|
||||
err = o.skip(t, ftag, fwire)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The method should reset the receiver before
|
||||
// decoding starts. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
return UnmarshalMerge(buf, pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
typ, base, err := getbase(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
|
||||
|
||||
if collectStats {
|
||||
stats.Decode++
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// unmarshalType does the work of unmarshaling a structure.
|
||||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
|
||||
var state errorState
|
||||
required, reqFields := prop.reqCount, uint64(0)
|
||||
|
||||
var err error
|
||||
for err == nil && o.index < len(o.buf) {
|
||||
oi := o.index
|
||||
var u uint64
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
wire := int(u & 0x7)
|
||||
if wire == WireEndGroup {
|
||||
if is_group {
|
||||
return nil // input is satisfied
|
||||
}
|
||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||
}
|
||||
tag := int(u >> 3)
|
||||
if tag <= 0 {
|
||||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
|
||||
}
|
||||
fieldnum, ok := prop.decoderTags.get(tag)
|
||||
if !ok {
|
||||
// Maybe it's an extension?
|
||||
if prop.extendable {
|
||||
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
ext := e.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
e.ExtensionMap()[int32(tag)] = ext
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
|
||||
continue
|
||||
}
|
||||
p := prop.Prop[fieldnum]
|
||||
|
||||
if p.dec == nil {
|
||||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
|
||||
continue
|
||||
}
|
||||
dec := p.dec
|
||||
if wire != WireStartGroup && wire != p.WireType {
|
||||
if wire == WireBytes && p.packedDec != nil {
|
||||
// a packable field
|
||||
dec = p.packedDec
|
||||
} else {
|
||||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
|
||||
continue
|
||||
}
|
||||
}
|
||||
decErr := dec(o, p, base)
|
||||
if decErr != nil && !state.shouldContinue(decErr, p) {
|
||||
err = decErr
|
||||
}
|
||||
if err == nil && p.Required {
|
||||
// Successfully decoded a required field.
|
||||
if tag <= 64 {
|
||||
// use bitmap for fields 1-64 to catch field reuse.
|
||||
var mask uint64 = 1 << uint64(tag-1)
|
||||
if reqFields&mask == 0 {
|
||||
// new required field
|
||||
reqFields |= mask
|
||||
required--
|
||||
}
|
||||
} else {
|
||||
// This is imprecise. It can be fooled by a required field
|
||||
// with a tag > 64 that is encoded twice; that's very rare.
|
||||
// A fully correct implementation would require allocating
|
||||
// a data structure, which we would like to avoid.
|
||||
required--
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if is_group {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if state.err != nil {
|
||||
return state.err
|
||||
}
|
||||
if required > 0 {
|
||||
// Not enough information to determine the exact field. If we use extra
|
||||
// CPU, we could determine the field only if the missing required field
|
||||
// has a tag <= 64 and we check reqFields.
|
||||
return &RequiredNotSetError{"{Unknown}"}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Individual type decoders
|
||||
// For each,
|
||||
// u is the decoded value,
|
||||
// v is a pointer to the field (pointer) in the struct
|
||||
|
||||
// Sizes of the pools to allocate inside the Buffer.
|
||||
// The goal is modest amortization and allocation
|
||||
// on at least 16-byte boundaries.
|
||||
const (
|
||||
boolPoolSize = 16
|
||||
uint32PoolSize = 8
|
||||
uint64PoolSize = 4
|
||||
)
|
||||
|
||||
// Decode a bool.
|
||||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(o.bools) == 0 {
|
||||
o.bools = make([]bool, boolPoolSize)
|
||||
}
|
||||
o.bools[0] = u != 0
|
||||
*structPointer_Bool(base, p.field) = &o.bools[0]
|
||||
o.bools = o.bools[1:]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_BoolVal(base, p.field) = u != 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int32.
|
||||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int64.
|
||||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64_Set(structPointer_Word64(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a string.
|
||||
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_String(base, p.field) = &s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_StringVal(base, p.field) = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bytes ([]byte).
|
||||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_Bytes(base, p.field) = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool).
|
||||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
*v = append(*v, u != 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded bools
|
||||
|
||||
y := *v
|
||||
for i := 0; i < nb; i++ {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
y = append(y, u != 0)
|
||||
}
|
||||
|
||||
*v = y
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32).
|
||||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
structPointer_Word32Slice(base, p.field).Append(uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word32Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int32s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(uint32(u))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64).
|
||||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
structPointer_Word64Slice(base, p.field).Append(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word64Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int64s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(u)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of strings ([]string).
|
||||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_StringSlice(base, p.field)
|
||||
*v = append(*v, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of slice of bytes ([][]byte).
|
||||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BytesSlice(base, p.field)
|
||||
*v = append(*v, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a map field.
|
||||
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oi := o.index // index at the end of this map entry
|
||||
o.index -= len(raw) // move buffer back to start of map entry
|
||||
|
||||
mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V
|
||||
if mptr.Elem().IsNil() {
|
||||
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
|
||||
}
|
||||
v := mptr.Elem() // map[K]V
|
||||
|
||||
// Prepare addressable doubly-indirect placeholders for the key and value types.
|
||||
// See enc_new_map for why.
|
||||
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
|
||||
keybase := toStructPointer(keyptr.Addr()) // **K
|
||||
|
||||
var valbase structPointer
|
||||
var valptr reflect.Value
|
||||
switch p.mtype.Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
// []byte
|
||||
var dummy []byte
|
||||
valptr = reflect.ValueOf(&dummy) // *[]byte
|
||||
valbase = toStructPointer(valptr) // *[]byte
|
||||
case reflect.Ptr:
|
||||
// message; valptr is **Msg; need to allocate the intermediate pointer
|
||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||
valptr.Set(reflect.New(valptr.Type().Elem()))
|
||||
valbase = toStructPointer(valptr)
|
||||
default:
|
||||
// everything else
|
||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||
valbase = toStructPointer(valptr.Addr()) // **V
|
||||
}
|
||||
|
||||
// Decode.
|
||||
// This parses a restricted wire format, namely the encoding of a message
|
||||
// with two fields. See enc_new_map for the format.
|
||||
for o.index < oi {
|
||||
// tagcode for key and value properties are always a single byte
|
||||
// because they have tags 1 and 2.
|
||||
tagcode := o.buf[o.index]
|
||||
o.index++
|
||||
switch tagcode {
|
||||
case p.mkeyprop.tagcode[0]:
|
||||
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
|
||||
return err
|
||||
}
|
||||
case p.mvalprop.tagcode[0]:
|
||||
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// TODO: Should we silently skip this instead?
|
||||
return fmt.Errorf("proto: bad map data tag %d", raw[0])
|
||||
}
|
||||
}
|
||||
|
||||
v.SetMapIndex(keyptr.Elem(), valptr.Elem())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a group.
|
||||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
return o.unmarshalType(p.stype, p.sprop, true, bas)
|
||||
}
|
||||
|
||||
// Decode an embedded message.
|
||||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
|
||||
raw, e := o.DecodeRawBytes(false)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := structPointer_Interface(bas, p.stype)
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, false, bas)
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode a slice of embedded messages.
|
||||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, false, base)
|
||||
}
|
||||
|
||||
// Decode a slice of embedded groups.
|
||||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, true, base)
|
||||
}
|
||||
|
||||
// Decode a slice of structs ([]*struct).
|
||||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
|
||||
v := reflect.New(p.stype)
|
||||
bas := toStructPointer(v)
|
||||
structPointer_StructPointerSlice(base, p.field).Append(bas)
|
||||
|
||||
if is_group {
|
||||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := v.Interface()
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
1286
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
generated
vendored
Normal file
1286
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
256
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
generated
vendored
Normal file
256
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
generated
vendored
Normal file
|
@ -0,0 +1,256 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
// TODO: MessageSet.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN.
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal (a "bytes" field,
|
||||
although represented by []byte, is not a repeated field)
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
b1, ok := f1.Interface().(raw)
|
||||
if ok {
|
||||
b2 := f2.Interface().(raw)
|
||||
// RawMessage
|
||||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
if !bytes.Equal(u1, u2) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalAny(v1, v2 reflect.Value) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
return equalAny(v1.Elem(), v2.Elem())
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// em1 and em2 are extension maps.
|
||||
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1, m2 := e1.value, e2.value
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
191
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
generated
vendored
Normal file
191
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
pb "./testdata"
|
||||
. "github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// Four identical base messages.
|
||||
// The init function adds extensions to some of them.
|
||||
var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
|
||||
var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
|
||||
|
||||
// Two messages with non-message extensions.
|
||||
var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
|
||||
var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
|
||||
|
||||
func init() {
|
||||
ext1 := &pb.Ext{Data: String("Kirk")}
|
||||
ext2 := &pb.Ext{Data: String("Picard")}
|
||||
|
||||
// messageWithExtension1a has ext1, but never marshals it.
|
||||
if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
|
||||
panic("SetExtension on 1a failed: " + err.Error())
|
||||
}
|
||||
|
||||
// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
|
||||
if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
|
||||
panic("SetExtension on 1b failed: " + err.Error())
|
||||
}
|
||||
buf, err := Marshal(messageWithExtension1b)
|
||||
if err != nil {
|
||||
panic("Marshal of 1b failed: " + err.Error())
|
||||
}
|
||||
messageWithExtension1b.Reset()
|
||||
if err := Unmarshal(buf, messageWithExtension1b); err != nil {
|
||||
panic("Unmarshal of 1b failed: " + err.Error())
|
||||
}
|
||||
|
||||
// messageWithExtension2 has ext2.
|
||||
if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
|
||||
panic("SetExtension on 2 failed: " + err.Error())
|
||||
}
|
||||
|
||||
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
|
||||
panic("SetExtension on Int32-1 failed: " + err.Error())
|
||||
}
|
||||
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
|
||||
panic("SetExtension on Int32-2 failed: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var EqualTests = []struct {
|
||||
desc string
|
||||
a, b Message
|
||||
exp bool
|
||||
}{
|
||||
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
|
||||
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
|
||||
{"nil vs nil", nil, nil, true},
|
||||
{"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
|
||||
{"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
|
||||
{"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
|
||||
|
||||
{"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
|
||||
{"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
|
||||
{"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
|
||||
{"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
|
||||
|
||||
{"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
|
||||
{"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
|
||||
{"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
|
||||
{"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
|
||||
{"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
|
||||
{"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
|
||||
{"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
|
||||
|
||||
{
|
||||
"nested, different",
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"nested, equal",
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
|
||||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
|
||||
true,
|
||||
},
|
||||
|
||||
{"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
|
||||
{"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
|
||||
{"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
|
||||
{
|
||||
"repeated bytes",
|
||||
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
|
||||
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
|
||||
true,
|
||||
},
|
||||
|
||||
{"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
|
||||
{"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
|
||||
{"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
|
||||
|
||||
{"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
|
||||
{"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
|
||||
|
||||
{
|
||||
"message with group",
|
||||
&pb.MyMessage{
|
||||
Count: Int32(1),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: Int32(5),
|
||||
},
|
||||
},
|
||||
&pb.MyMessage{
|
||||
Count: Int32(1),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: Int32(5),
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
"map same",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"map different entry",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"map different key only",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"map different value only",
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
for _, tc := range EqualTests {
|
||||
if res := Equal(tc.a, tc.b); res != tc.exp {
|
||||
t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
|
||||
}
|
||||
}
|
||||
}
|
362
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
generated
vendored
Normal file
362
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
generated
vendored
Normal file
|
@ -0,0 +1,362 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
value interface{}
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base extendableProto, id int32, b []byte) {
|
||||
base.ExtensionMap()[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
// Check the extended type.
|
||||
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
|
||||
func encodeExtensionMap(m map[int32]Extension) error {
|
||||
for k, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
p := NewBuffer(nil)
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
e.enc = p.buf
|
||||
m[k] = e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sizeExtensionMap(m map[int32]Extension) (n int) {
|
||||
for _, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
n += len(e.enc)
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
n += props.size(props, toStructPointer(x))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
_, ok := pb.ExtensionMap()[extension.Field]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
delete(pb.ExtensionMap(), extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension parses and returns the given extension of pb.
|
||||
// If the extension is not present it returns ErrMissingExtension.
|
||||
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
emap := pb.ExtensionMap()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
o := NewBuffer(b)
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
rep := extension.repeated()
|
||||
|
||||
props := extensionProperties(extension)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate a "field" to store the pointer/slice itself; the
|
||||
// pointer/slice will be stored here. We pass
|
||||
// the address of this field to props.dec.
|
||||
// This passes a zero field and a *t and lets props.dec
|
||||
// interpret it as a *struct{ x t }.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
for {
|
||||
// Discard wire type and field number varint. It isn't needed.
|
||||
if _, err := o.DecodeVarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !rep || o.index >= len(o.buf) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, ok := pb.(extendableProto)
|
||||
if !ok {
|
||||
err = errors.New("proto: not an extendable proto")
|
||||
return
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return errors.New("proto: bad extension value type")
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
153
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
generated
vendored
Normal file
153
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
generated
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
pb "./testdata"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
|
||||
msg := &pb.MyMessage{}
|
||||
ext1 := &pb.Ext{}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
||||
t.Fatalf("Could not set ext1: %s", ext1)
|
||||
}
|
||||
exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
|
||||
pb.E_Ext_More,
|
||||
pb.E_Ext_Text,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtensions() failed: %s", err)
|
||||
}
|
||||
if exts[0] != ext1 {
|
||||
t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
|
||||
}
|
||||
if exts[1] != nil {
|
||||
t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExtensionStability(t *testing.T) {
|
||||
check := func(m *pb.MyMessage) bool {
|
||||
ext1, err := proto.GetExtension(m, pb.E_Ext_More)
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtension() failed: %s", err)
|
||||
}
|
||||
ext2, err := proto.GetExtension(m, pb.E_Ext_More)
|
||||
if err != nil {
|
||||
t.Fatalf("GetExtension() failed: %s", err)
|
||||
}
|
||||
return ext1 == ext2
|
||||
}
|
||||
msg := &pb.MyMessage{Count: proto.Int32(4)}
|
||||
ext0 := &pb.Ext{}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
|
||||
t.Fatalf("Could not set ext1: %s", ext0)
|
||||
}
|
||||
if !check(msg) {
|
||||
t.Errorf("GetExtension() not stable before marshaling")
|
||||
}
|
||||
bb, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal() failed: %s", err)
|
||||
}
|
||||
msg1 := &pb.MyMessage{}
|
||||
err = proto.Unmarshal(bb, msg1)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal() failed: %s", err)
|
||||
}
|
||||
if !check(msg1) {
|
||||
t.Errorf("GetExtension() not stable after unmarshaling")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtensionsRoundTrip(t *testing.T) {
|
||||
msg := &pb.MyMessage{}
|
||||
ext1 := &pb.Ext{
|
||||
Data: proto.String("hi"),
|
||||
}
|
||||
ext2 := &pb.Ext{
|
||||
Data: proto.String("there"),
|
||||
}
|
||||
exists := proto.HasExtension(msg, pb.E_Ext_More)
|
||||
if exists {
|
||||
t.Error("Extension More present unexpectedly")
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
e, err := proto.GetExtension(msg, pb.E_Ext_More)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
x, ok := e.(*pb.Ext)
|
||||
if !ok {
|
||||
t.Errorf("e has type %T, expected testdata.Ext", e)
|
||||
} else if *x.Data != "there" {
|
||||
t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
|
||||
}
|
||||
proto.ClearExtension(msg, pb.E_Ext_More)
|
||||
if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
|
||||
t.Errorf("got %v, expected ErrMissingExtension", e)
|
||||
}
|
||||
if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
|
||||
t.Error("expected bad extension error, got nil")
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
|
||||
t.Error("expected extension err")
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
|
||||
t.Error("expected some sort of type mismatch error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilExtension(t *testing.T) {
|
||||
msg := &pb.MyMessage{
|
||||
Count: proto.Int32(1),
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
|
||||
t.Error("expected SetExtension to fail due to a nil extension")
|
||||
} else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
|
||||
t.Errorf("expected error %v, got %v", want, err)
|
||||
}
|
||||
// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
|
||||
// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
|
||||
}
|
759
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
generated
vendored
Normal file
759
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
generated
vendored
Normal file
|
@ -0,0 +1,759 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// Stats records allocation details about the protocol buffer encoders
|
||||
// and decoders. Useful for tuning the library itself.
|
||||
type Stats struct {
|
||||
Emalloc uint64 // mallocs in encode
|
||||
Dmalloc uint64 // mallocs in decode
|
||||
Encode uint64 // number of encodes
|
||||
Decode uint64 // number of decodes
|
||||
Chit uint64 // number of cache hits
|
||||
Cmiss uint64 // number of cache misses
|
||||
Size uint64 // number of sizes
|
||||
}
|
||||
|
||||
// Set to true to enable stats collection.
|
||||
const collectStats = false
|
||||
|
||||
var stats Stats
|
||||
|
||||
// GetStats returns a copy of the global Stats structure.
|
||||
func GetStats() Stats { return stats }
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // write point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
uint32s []uint32
|
||||
uint64s []uint64
|
||||
|
||||
// extra pools, only used with pointer_reflect.go
|
||||
int32s []int32
|
||||
int64s []int64
|
||||
float32s []float32
|
||||
float64s []float64
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (o *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := o.buf
|
||||
index := o.index
|
||||
o.buf = b
|
||||
o.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := o.index
|
||||
if index == len(o.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = o.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = o.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
break
|
||||
|
||||
case WireVarint:
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
o.buf = obuf
|
||||
o.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
// f is *T or []*T
|
||||
if f.Kind() == reflect.Ptr {
|
||||
setDefaults(f, recur, zeros)
|
||||
} else {
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
func ptrToStruct(t reflect.Type) bool {
|
||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
// nested messages
|
||||
if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) {
|
||||
dm.nested = append(dm.nested, fi)
|
||||
continue
|
||||
}
|
||||
|
||||
sf := scalarField{
|
||||
index: fi,
|
||||
kind: ft.Elem().Kind(),
|
||||
}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
dm.scalars = append(dm.scalars, sf)
|
||||
continue
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
continue
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
log.Printf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
continue
|
||||
}
|
||||
|
||||
dm.scalars = append(dm.scalars, sf)
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
// The easiest way to sort them in some deterministic order is to use fmt.
|
||||
// If this turns out to be inefficient we can always consider other options,
|
||||
// such as doing a Schwartzian transform.
|
||||
|
||||
type mapKeys []reflect.Value
|
||||
|
||||
func (s mapKeys) Len() int { return len(s) }
|
||||
func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s mapKeys) Less(i, j int) bool {
|
||||
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
|
||||
}
|
287
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go
generated
vendored
Normal file
287
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go
generated
vendored
Normal file
|
@ -0,0 +1,287 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and MessageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
//
|
||||
// When a proto1 proto has a field that looks like:
|
||||
// optional message<MessageSet> info = 3;
|
||||
// the protocol compiler produces a field in the generated struct that looks like:
|
||||
// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"`
|
||||
// The package is automatically inserted so there is no need for that proto file to
|
||||
// import this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type MessageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure MessageSet is a Message.
|
||||
var _ Message = (*MessageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Has(pb Message) bool {
|
||||
if ms.find(pb) != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return ErrNoMessageTypeId
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return ErrNoMessageTypeId
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Reset() { *ms = MessageSet{} }
|
||||
func (ms *MessageSet) String() string { return CompactTextString(ms) }
|
||||
func (*MessageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
|
||||
if err := encodeExtensionMap(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sort extension IDs to provide a deterministic encoding.
|
||||
// See also enc_map in encode.go.
|
||||
ids := make([]int, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, int(id))
|
||||
}
|
||||
sort.Ints(ids)
|
||||
|
||||
ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
|
||||
for _, id := range ids {
|
||||
e := m[int32(id)]
|
||||
// Remove the wire type and field number varint, as well as the length varint.
|
||||
msg := skipVarint(skipVarint(e.enc))
|
||||
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: Int32(int32(id)),
|
||||
Message: msg,
|
||||
})
|
||||
}
|
||||
return Marshal(ms)
|
||||
}
|
||||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
||||
ms := new(MessageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
// Process the map in key order for deterministic output.
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
||||
|
||||
for i, id := range ids {
|
||||
ext := m[id]
|
||||
if i > 0 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
msd, ok := messageSetMap[id]
|
||||
if !ok {
|
||||
// Unknown type; we can't render it, so skip it.
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||
|
||||
x := ext.value
|
||||
if x == nil {
|
||||
x = reflect.New(msd.t.Elem()).Interface()
|
||||
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := json.Marshal(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Write(d)
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is fairly tricky, and it's not clear that it is needed.
|
||||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
||||
}
|
||||
|
||||
// A global registry of types that can be used in a MessageSet.
|
||||
|
||||
var messageSetMap = make(map[int32]messageSetDesc)
|
||||
|
||||
type messageSetDesc struct {
|
||||
t reflect.Type // pointer to struct
|
||||
name string
|
||||
}
|
||||
|
||||
// RegisterMessageSetType is called from the generated code.
|
||||
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
||||
messageSetMap[fieldNum] = messageSetDesc{
|
||||
t: reflect.TypeOf(m),
|
||||
name: name,
|
||||
}
|
||||
}
|
66
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go
generated
vendored
Normal file
66
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
|
||||
// Check that a repeated message set entry will be concatenated.
|
||||
in := &MessageSet{
|
||||
Item: []*_MessageSet_Item{
|
||||
{TypeId: Int32(12345), Message: []byte("hoo")},
|
||||
{TypeId: Int32(12345), Message: []byte("hah")},
|
||||
},
|
||||
}
|
||||
b, err := Marshal(in)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal: %v", err)
|
||||
}
|
||||
t.Logf("Marshaled bytes: %q", b)
|
||||
|
||||
m := make(map[int32]Extension)
|
||||
if err := UnmarshalMessageSet(b, m); err != nil {
|
||||
t.Fatalf("UnmarshalMessageSet: %v", err)
|
||||
}
|
||||
ext, ok := m[12345]
|
||||
if !ok {
|
||||
t.Fatalf("Didn't retrieve extension 12345; map is %v", m)
|
||||
}
|
||||
// Skip wire type/field number and length varints.
|
||||
got := skipVarint(skipVarint(ext.enc))
|
||||
if want := []byte("hoohah"); !bytes.Equal(got, want) {
|
||||
t.Errorf("Combined extension is %q, want %q", got, want)
|
||||
}
|
||||
}
|
479
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
Normal file
479
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
Normal file
|
@ -0,0 +1,479 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build appengine
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
// The reflect value must itself be a pointer to a struct.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer{v}
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer as an interface value.
|
||||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
||||
return p.v.Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// field returns the given field in the struct as a reflect value.
|
||||
func structPointer_field(p structPointer, f field) reflect.Value {
|
||||
// Special case: an extension map entry with a value of type T
|
||||
// passes a *T to the struct-handling code with a zero field,
|
||||
// expecting that it will be treated as equivalent to *struct{ X T },
|
||||
// which has the same memory layout. We have to handle that case
|
||||
// specially, because reflect will panic if we call FieldByIndex on a
|
||||
// non-struct.
|
||||
if f == nil {
|
||||
return p.v.Elem()
|
||||
}
|
||||
|
||||
return p.v.Elem().FieldByIndex(f)
|
||||
}
|
||||
|
||||
// ifield returns the given field in the struct as an interface value.
|
||||
func structPointer_ifield(p structPointer, f field) interface{} {
|
||||
return structPointer_field(p, f).Addr().Interface()
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return structPointer_ifield(p, f).(*[]byte)
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return structPointer_ifield(p, f).(*[][]byte)
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return structPointer_ifield(p, f).(**bool)
|
||||
}
|
||||
|
||||
// BoolVal returns the address of a bool field in the struct.
|
||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||
return structPointer_ifield(p, f).(*bool)
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return structPointer_ifield(p, f).(*[]bool)
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return structPointer_ifield(p, f).(**string)
|
||||
}
|
||||
|
||||
// StringVal returns the address of a string field in the struct.
|
||||
func structPointer_StringVal(p structPointer, f field) *string {
|
||||
return structPointer_ifield(p, f).(*string)
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return structPointer_ifield(p, f).(*[]string)
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||
}
|
||||
|
||||
// Map returns the reflect.Value for the address of a map field in the struct.
|
||||
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||
return structPointer_field(p, f).Addr()
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
structPointer_field(p, f).Set(q.v)
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return structPointer{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
||||
return structPointerSlice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A structPointerSlice represents the address of a slice of pointers to structs
|
||||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
||||
type structPointerSlice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p structPointerSlice) Len() int { return p.v.Len() }
|
||||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
||||
func (p structPointerSlice) Append(q structPointer) {
|
||||
p.v.Set(reflect.Append(p.v, q.v))
|
||||
}
|
||||
|
||||
var (
|
||||
int32Type = reflect.TypeOf(int32(0))
|
||||
uint32Type = reflect.TypeOf(uint32(0))
|
||||
float32Type = reflect.TypeOf(float32(0))
|
||||
int64Type = reflect.TypeOf(int64(0))
|
||||
uint64Type = reflect.TypeOf(uint64(0))
|
||||
float64Type = reflect.TypeOf(float64(0))
|
||||
)
|
||||
|
||||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
||||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
||||
type word32 struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Set sets p to point at a newly allocated word with bits set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int32Type:
|
||||
if len(o.int32s) == 0 {
|
||||
o.int32s = make([]int32, uint32PoolSize)
|
||||
}
|
||||
o.int32s[0] = int32(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
||||
o.int32s = o.int32s[1:]
|
||||
return
|
||||
case uint32Type:
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
||||
o.uint32s = o.uint32s[1:]
|
||||
return
|
||||
case float32Type:
|
||||
if len(o.float32s) == 0 {
|
||||
o.float32s = make([]float32, uint32PoolSize)
|
||||
}
|
||||
o.float32s[0] = math.Float32frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
||||
o.float32s = o.float32s[1:]
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.Set(reflect.New(t))
|
||||
p.v.Elem().SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32_Get(p word32) uint32 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
||||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
||||
type word32Val struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// Set sets *p to x.
|
||||
func word32Val_Set(p word32Val, x uint32) {
|
||||
switch p.v.Type() {
|
||||
case int32Type:
|
||||
p.v.SetInt(int64(x))
|
||||
return
|
||||
case uint32Type:
|
||||
p.v.SetUint(uint64(x))
|
||||
return
|
||||
case float32Type:
|
||||
p.v.SetFloat(float64(math.Float32frombits(x)))
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32Val_Get(p word32Val) uint32 {
|
||||
elem := p.v
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||
return word32Val{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
||||
type word32Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word32Slice) Append(x uint32) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
elem.SetInt(int64(int32(x)))
|
||||
case reflect.Uint32:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float32:
|
||||
elem.SetFloat(float64(math.Float32frombits(x)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p word32Slice) Len() int {
|
||||
return p.v.Len()
|
||||
}
|
||||
|
||||
func (p word32Slice) Index(i int) uint32 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
||||
return word32Slice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int64Type:
|
||||
if len(o.int64s) == 0 {
|
||||
o.int64s = make([]int64, uint64PoolSize)
|
||||
}
|
||||
o.int64s[0] = int64(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
||||
o.int64s = o.int64s[1:]
|
||||
return
|
||||
case uint64Type:
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
||||
o.uint64s = o.uint64s[1:]
|
||||
return
|
||||
case float64Type:
|
||||
if len(o.float64s) == 0 {
|
||||
o.float64s = make([]float64, uint64PoolSize)
|
||||
}
|
||||
o.float64s[0] = math.Float64frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
||||
o.float64s = o.float64s[1:]
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// word64Val is like word32Val but for 64-bit values.
|
||||
type word64Val struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||
switch p.v.Type() {
|
||||
case int64Type:
|
||||
p.v.SetInt(int64(x))
|
||||
return
|
||||
case uint64Type:
|
||||
p.v.SetUint(x)
|
||||
return
|
||||
case float64Type:
|
||||
p.v.SetFloat(math.Float64frombits(x))
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64Val_Get(p word64Val) uint64 {
|
||||
elem := p.v
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||
return word64Val{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
type word64Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word64Slice) Append(x uint64) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
elem.SetInt(int64(int64(x)))
|
||||
case reflect.Uint64:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float64:
|
||||
elem.SetFloat(float64(math.Float64frombits(x)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p word64Slice) Len() int {
|
||||
return p.v.Len()
|
||||
}
|
||||
|
||||
func (p word64Slice) Index(i int) uint64 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return uint64(elem.Uint())
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(float64(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
||||
return word64Slice{structPointer_field(p, f)}
|
||||
}
|
266
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
Normal file
266
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,266 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// NOTE: These type_Foo functions would more idiomatically be methods,
|
||||
// but Go does not allow methods on pointer types, and we must preserve
|
||||
// some pointer type for the garbage collector. We use these
|
||||
// funcs with clunky names as our poor approximation to methods.
|
||||
//
|
||||
// An alternative would be
|
||||
// type structPointer struct { p unsafe.Pointer }
|
||||
// but that does not registerize as well.
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer unsafe.Pointer
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer(unsafe.Pointer(v.Pointer()))
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p == nil
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer, assumed to have element type t,
|
||||
// as an interface value.
|
||||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
|
||||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != ^field(0)
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolVal returns the address of a bool field in the struct.
|
||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringVal returns the address of a string field in the struct.
|
||||
func structPointer_StringVal(p structPointer, f field) *string {
|
||||
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// Map returns the reflect.Value for the address of a map field in the struct.
|
||||
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
|
||||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
||||
type structPointerSlice []structPointer
|
||||
|
||||
func (v *structPointerSlice) Len() int { return len(*v) }
|
||||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
|
||||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
|
||||
|
||||
// A word32 is the address of a "pointer to 32-bit value" field.
|
||||
type word32 **uint32
|
||||
|
||||
// IsNil reports whether *v is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
// Set sets *v to point at a newly allocated word set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
*p = &o.uint32s[0]
|
||||
o.uint32s = o.uint32s[1:]
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by *v.
|
||||
func word32_Get(p word32) uint32 {
|
||||
return **p
|
||||
}
|
||||
|
||||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Val is the address of a 32-bit value field.
|
||||
type word32Val *uint32
|
||||
|
||||
// Set sets *p to x.
|
||||
func word32Val_Set(p word32Val, x uint32) {
|
||||
*p = x
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by p.
|
||||
func word32Val_Get(p word32Val) uint32 {
|
||||
return *p
|
||||
}
|
||||
|
||||
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
type word32Slice []uint32
|
||||
|
||||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
|
||||
func (v *word32Slice) Len() int { return len(*v) }
|
||||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
|
||||
|
||||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
|
||||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 **uint64
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
*p = &o.uint64s[0]
|
||||
o.uint64s = o.uint64s[1:]
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
return **p
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// word64Val is like word32Val but for 64-bit values.
|
||||
type word64Val *uint64
|
||||
|
||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||
*p = x
|
||||
}
|
||||
|
||||
func word64Val_Get(p word64Val) uint64 {
|
||||
return *p
|
||||
}
|
||||
|
||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// word64Slice is like word32Slice but for 64-bit values.
|
||||
type word64Slice []uint64
|
||||
|
||||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
|
||||
func (v *word64Slice) Len() int { return len(*v) }
|
||||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
|
||||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
737
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
generated
vendored
Normal file
737
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
generated
vendored
Normal file
|
@ -0,0 +1,737 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
const startSize = 10 // initial slice/string sizes
|
||||
|
||||
// Encoders are defined in encode.go
|
||||
// An encoder outputs the full representation of a field, including its
|
||||
// tag and encoder type.
|
||||
type encoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||
|
||||
// A valueEncoder encodes a single integer in a particular encoding.
|
||||
type valueEncoder func(o *Buffer, x uint64) error
|
||||
|
||||
// Sizers are defined in encode.go
|
||||
// A sizer returns the encoded size of a field, including its tag and encoder
|
||||
// type.
|
||||
type sizer func(prop *Properties, base structPointer) int
|
||||
|
||||
// A valueSizer returns the encoded size of a single integer in a particular
|
||||
// encoding.
|
||||
type valueSizer func(x uint64) int
|
||||
|
||||
// Decoders are defined in decode.go
|
||||
// A decoder creates a value from its wire representation.
|
||||
// Unrecognized subelements are saved in unrec.
|
||||
type decoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||
|
||||
// A valueDecoder decodes a single integer in a particular encoding.
|
||||
type valueDecoder func(o *Buffer) (x uint64, err error)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
unrecField field // field id of the XXX_unrecognized []byte field
|
||||
extendable bool // is this an extendable proto
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
Required bool
|
||||
Optional bool
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
def_uint64 uint64
|
||||
|
||||
enc encoder
|
||||
valEnc valueEncoder // set for bool and numeric types only
|
||||
field field
|
||||
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
|
||||
tagbuf [8]byte
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
isMarshaler bool
|
||||
isUnmarshaler bool
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
mkeyprop *Properties // set for map types only
|
||||
mvalprop *Properties // set for map types only
|
||||
|
||||
size sizer
|
||||
valSize valueSizer // set for bool and numeric types only
|
||||
|
||||
dec decoder
|
||||
valDec valueDecoder // set for bool and numeric types only
|
||||
|
||||
// If this is a packable field, this will be the decoder for the packed version of the field.
|
||||
packedDec decoder
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s = ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
if p.OrigName != p.Name {
|
||||
s += ",name=" + p.OrigName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeVarint
|
||||
p.valDec = (*Buffer).DecodeVarint
|
||||
p.valSize = sizeVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
p.valEnc = (*Buffer).EncodeFixed32
|
||||
p.valDec = (*Buffer).DecodeFixed32
|
||||
p.valSize = sizeFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
p.valEnc = (*Buffer).EncodeFixed64
|
||||
p.valDec = (*Buffer).DecodeFixed64
|
||||
p.valSize = sizeFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeZigzag32
|
||||
p.valDec = (*Buffer).DecodeZigzag32
|
||||
p.valSize = sizeZigzag32
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeZigzag64
|
||||
p.valDec = (*Buffer).DecodeZigzag64
|
||||
p.valSize = sizeZigzag64
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logNoSliceEnc(t1, t2 reflect.Type) {
|
||||
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// Initialize the fields for encoding and decoding.
|
||||
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
p.enc = nil
|
||||
p.dec = nil
|
||||
p.size = nil
|
||||
|
||||
switch t1 := typ; t1.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
||||
|
||||
// proto3 scalar types
|
||||
|
||||
case reflect.Bool:
|
||||
p.enc = (*Buffer).enc_proto3_bool
|
||||
p.dec = (*Buffer).dec_proto3_bool
|
||||
p.size = size_proto3_bool
|
||||
case reflect.Int32:
|
||||
p.enc = (*Buffer).enc_proto3_int32
|
||||
p.dec = (*Buffer).dec_proto3_int32
|
||||
p.size = size_proto3_int32
|
||||
case reflect.Uint32:
|
||||
p.enc = (*Buffer).enc_proto3_uint32
|
||||
p.dec = (*Buffer).dec_proto3_int32 // can reuse
|
||||
p.size = size_proto3_uint32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
p.enc = (*Buffer).enc_proto3_int64
|
||||
p.dec = (*Buffer).dec_proto3_int64
|
||||
p.size = size_proto3_int64
|
||||
case reflect.Float32:
|
||||
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_proto3_int32
|
||||
p.size = size_proto3_uint32
|
||||
case reflect.Float64:
|
||||
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_proto3_int64
|
||||
p.size = size_proto3_int64
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_proto3_string
|
||||
p.dec = (*Buffer).dec_proto3_string
|
||||
p.size = size_proto3_string
|
||||
|
||||
case reflect.Ptr:
|
||||
switch t2 := t1.Elem(); t2.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
|
||||
break
|
||||
case reflect.Bool:
|
||||
p.enc = (*Buffer).enc_bool
|
||||
p.dec = (*Buffer).dec_bool
|
||||
p.size = size_bool
|
||||
case reflect.Int32:
|
||||
p.enc = (*Buffer).enc_int32
|
||||
p.dec = (*Buffer).dec_int32
|
||||
p.size = size_int32
|
||||
case reflect.Uint32:
|
||||
p.enc = (*Buffer).enc_uint32
|
||||
p.dec = (*Buffer).dec_int32 // can reuse
|
||||
p.size = size_uint32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
p.enc = (*Buffer).enc_int64
|
||||
p.dec = (*Buffer).dec_int64
|
||||
p.size = size_int64
|
||||
case reflect.Float32:
|
||||
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_int32
|
||||
p.size = size_uint32
|
||||
case reflect.Float64:
|
||||
p.enc = (*Buffer).enc_int64 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_int64
|
||||
p.size = size_int64
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_string
|
||||
p.dec = (*Buffer).dec_string
|
||||
p.size = size_string
|
||||
case reflect.Struct:
|
||||
p.stype = t1.Elem()
|
||||
p.isMarshaler = isMarshaler(t1)
|
||||
p.isUnmarshaler = isUnmarshaler(t1)
|
||||
if p.Wire == "bytes" {
|
||||
p.enc = (*Buffer).enc_struct_message
|
||||
p.dec = (*Buffer).dec_struct_message
|
||||
p.size = size_struct_message
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_struct_group
|
||||
p.dec = (*Buffer).dec_struct_group
|
||||
p.size = size_struct_group
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch t2 := t1.Elem(); t2.Kind() {
|
||||
default:
|
||||
logNoSliceEnc(t1, t2)
|
||||
break
|
||||
case reflect.Bool:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_bool
|
||||
p.size = size_slice_packed_bool
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_bool
|
||||
p.size = size_slice_bool
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_bool
|
||||
p.packedDec = (*Buffer).dec_slice_packed_bool
|
||||
case reflect.Int32:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int32
|
||||
p.size = size_slice_packed_int32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int32
|
||||
p.size = size_slice_int32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case reflect.Uint32:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||
p.size = size_slice_packed_uint32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_uint32
|
||||
p.size = size_slice_uint32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int64
|
||||
p.size = size_slice_packed_int64
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int64
|
||||
p.size = size_slice_int64
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_byte
|
||||
p.size = size_slice_byte
|
||||
if p.proto3 {
|
||||
p.enc = (*Buffer).enc_proto3_slice_byte
|
||||
p.size = size_proto3_slice_byte
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch t2.Bits() {
|
||||
case 32:
|
||||
// can just treat them as bits
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||
p.size = size_slice_packed_uint32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_uint32
|
||||
p.size = size_slice_uint32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case 64:
|
||||
// can just treat them as bits
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int64
|
||||
p.size = size_slice_packed_int64
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int64
|
||||
p.size = size_slice_int64
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
default:
|
||||
logNoSliceEnc(t1, t2)
|
||||
break
|
||||
}
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_slice_string
|
||||
p.dec = (*Buffer).dec_slice_string
|
||||
p.size = size_slice_string
|
||||
case reflect.Ptr:
|
||||
switch t3 := t2.Elem(); t3.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
|
||||
break
|
||||
case reflect.Struct:
|
||||
p.stype = t2.Elem()
|
||||
p.isMarshaler = isMarshaler(t2)
|
||||
p.isUnmarshaler = isUnmarshaler(t2)
|
||||
if p.Wire == "bytes" {
|
||||
p.enc = (*Buffer).enc_slice_struct_message
|
||||
p.dec = (*Buffer).dec_slice_struct_message
|
||||
p.size = size_slice_struct_message
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_struct_group
|
||||
p.dec = (*Buffer).dec_slice_struct_group
|
||||
p.size = size_slice_struct_group
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
switch t2.Elem().Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
|
||||
break
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_slice_byte
|
||||
p.size = size_slice_slice_byte
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.enc = (*Buffer).enc_new_map
|
||||
p.dec = (*Buffer).dec_new_map
|
||||
p.size = size_new_map
|
||||
|
||||
p.mtype = t1
|
||||
p.mkeyprop = &Properties{}
|
||||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.mvalprop = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
// precalculate tag code
|
||||
wire := p.WireType
|
||||
if p.Packed {
|
||||
wire = WireBytes
|
||||
}
|
||||
x := uint32(p.Tag)<<3 | uint32(wire)
|
||||
i := 0
|
||||
for i = 0; x > 127; i++ {
|
||||
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
p.tagbuf[i] = uint8(x)
|
||||
p.tagcode = p.tagbuf[0 : i+1]
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// isMarshaler reports whether type t implements Marshaler.
|
||||
func isMarshaler(t reflect.Type) bool {
|
||||
// We're checking for (likely) pointer-receiver methods
|
||||
// so if t is not a pointer, something is very wrong.
|
||||
// The calls above only invoke isMarshaler on pointer types.
|
||||
if t.Kind() != reflect.Ptr {
|
||||
panic("proto: misuse of isMarshaler")
|
||||
}
|
||||
return t.Implements(marshalerType)
|
||||
}
|
||||
|
||||
// isUnmarshaler reports whether type t implements Unmarshaler.
|
||||
func isUnmarshaler(t reflect.Type) bool {
|
||||
// We're checking for (likely) pointer-receiver methods
|
||||
// so if t is not a pointer, something is very wrong.
|
||||
// The calls above only invoke isUnmarshaler on pointer types.
|
||||
if t.Kind() != reflect.Ptr {
|
||||
panic("proto: misuse of isUnmarshaler")
|
||||
}
|
||||
return t.Implements(unmarshalerType)
|
||||
}
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if f != nil {
|
||||
p.field = toField(f)
|
||||
}
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setEncAndDec(typ, f, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
}
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return prop
|
||||
}
|
||||
if collectStats {
|
||||
stats.Cmiss++
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
|
||||
prop.unrecField = invalidField
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
if f.Name == "XXX_extensions" { // special case
|
||||
p.enc = (*Buffer).enc_map
|
||||
p.dec = nil // not needed
|
||||
p.size = size_map
|
||||
}
|
||||
if f.Name == "XXX_unrecognized" { // special case
|
||||
prop.unrecField = toField(&f)
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") {
|
||||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// Return the Properties object for the x[0]'th field of the structure.
|
||||
func propByIndex(t reflect.Type, x []int) *Properties {
|
||||
if len(x) != 1 {
|
||||
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
|
||||
return nil
|
||||
}
|
||||
prop := GetProperties(t)
|
||||
return prop.Prop[x[0]]
|
||||
}
|
||||
|
||||
// Get the address and type of a pointer to a struct from an interface.
|
||||
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
|
||||
if pb == nil {
|
||||
err = ErrNil
|
||||
return
|
||||
}
|
||||
// get the reflect type of the pointer to the struct.
|
||||
t = reflect.TypeOf(pb)
|
||||
// get the address of the struct.
|
||||
value := reflect.ValueOf(pb)
|
||||
b = toStructPointer(value)
|
||||
return
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
44
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile
generated
vendored
Normal file
44
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2014 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
include ../../Make.protobuf
|
||||
|
||||
all: regenerate
|
||||
|
||||
regenerate:
|
||||
rm -f proto3.pb.go
|
||||
make proto3.pb.go
|
||||
|
||||
# The following rules are just aids to development. Not needed for typical testing.
|
||||
|
||||
diff: regenerate
|
||||
git diff proto3.pb.go
|
58
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
generated
vendored
Normal file
58
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package proto3_proto;
|
||||
|
||||
message Message {
|
||||
enum Humour {
|
||||
UNKNOWN = 0;
|
||||
PUNS = 1;
|
||||
SLAPSTICK = 2;
|
||||
BILL_BAILEY = 3;
|
||||
}
|
||||
|
||||
string name = 1;
|
||||
Humour hilarity = 2;
|
||||
uint32 height_in_cm = 3;
|
||||
bytes data = 4;
|
||||
int64 result_count = 7;
|
||||
bool true_scotsman = 8;
|
||||
float score = 9;
|
||||
|
||||
repeated uint64 key = 5;
|
||||
Nested nested = 6;
|
||||
}
|
||||
|
||||
message Nested {
|
||||
string bunny = 1;
|
||||
}
|
93
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
generated
vendored
Normal file
93
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
pb "./proto3_proto"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestProto3ZeroValues(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
m proto.Message
|
||||
}{
|
||||
{"zero message", &pb.Message{}},
|
||||
{"empty bytes field", &pb.Message{Data: []byte{}}},
|
||||
}
|
||||
for _, test := range tests {
|
||||
b, err := proto.Marshal(test.m)
|
||||
if err != nil {
|
||||
t.Errorf("%s: proto.Marshal: %v", test.desc, err)
|
||||
continue
|
||||
}
|
||||
if len(b) > 0 {
|
||||
t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTripProto3(t *testing.T) {
|
||||
m := &pb.Message{
|
||||
Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
|
||||
Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
|
||||
HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
|
||||
Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
|
||||
ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
|
||||
TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
|
||||
Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
|
||||
|
||||
Key: []uint64{1, 0xdeadbeef},
|
||||
Nested: &pb.Nested{
|
||||
Bunny: "Monty",
|
||||
},
|
||||
}
|
||||
t.Logf(" m: %v", m)
|
||||
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
t.Fatalf("proto.Marshal: %v", err)
|
||||
}
|
||||
t.Logf(" b: %q", b)
|
||||
|
||||
m2 := new(pb.Message)
|
||||
if err := proto.Unmarshal(b, m2); err != nil {
|
||||
t.Fatalf("proto.Unmarshal: %v", err)
|
||||
}
|
||||
t.Logf("m2: %v", m2)
|
||||
|
||||
if !proto.Equal(m, m2) {
|
||||
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
|
||||
}
|
||||
}
|
63
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go
generated
vendored
Normal file
63
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// This is a separate file and package from size_test.go because that one uses
|
||||
// generated messages and thus may not be in package proto without having a circular
|
||||
// dependency, whereas this file tests unexported details of size.go.
|
||||
|
||||
func TestVarintSize(t *testing.T) {
|
||||
// Check the edge cases carefully.
|
||||
testCases := []struct {
|
||||
n uint64
|
||||
size int
|
||||
}{
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
{127, 1},
|
||||
{128, 2},
|
||||
{16383, 2},
|
||||
{16384, 3},
|
||||
{1<<63 - 1, 9},
|
||||
{1 << 63, 10},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
size := sizeVarint(tc.n)
|
||||
if size != tc.size {
|
||||
t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
|
||||
}
|
||||
}
|
||||
}
|
135
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
generated
vendored
Normal file
135
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
proto3pb "./proto3_proto"
|
||||
pb "./testdata"
|
||||
. "github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
|
||||
|
||||
// messageWithExtension2 is in equal_test.go.
|
||||
var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
|
||||
|
||||
func init() {
|
||||
if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
|
||||
log.Panicf("SetExtension: %v", err)
|
||||
}
|
||||
if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
|
||||
log.Panicf("SetExtension: %v", err)
|
||||
}
|
||||
|
||||
// Force messageWithExtension3 to have the extension encoded.
|
||||
Marshal(messageWithExtension3)
|
||||
|
||||
}
|
||||
|
||||
var SizeTests = []struct {
|
||||
desc string
|
||||
pb Message
|
||||
}{
|
||||
{"empty", &pb.OtherMessage{}},
|
||||
// Basic types.
|
||||
{"bool", &pb.Defaults{F_Bool: Bool(true)}},
|
||||
{"int32", &pb.Defaults{F_Int32: Int32(12)}},
|
||||
{"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
|
||||
{"small int64", &pb.Defaults{F_Int64: Int64(1)}},
|
||||
{"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
|
||||
{"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
|
||||
{"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
|
||||
{"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
|
||||
{"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
|
||||
{"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
|
||||
{"float", &pb.Defaults{F_Float: Float32(12.6)}},
|
||||
{"double", &pb.Defaults{F_Double: Float64(13.9)}},
|
||||
{"string", &pb.Defaults{F_String: String("niles")}},
|
||||
{"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
|
||||
{"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
|
||||
{"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
|
||||
{"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
|
||||
{"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
|
||||
// Repeated.
|
||||
{"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
|
||||
{"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
|
||||
{"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
|
||||
{"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
|
||||
{"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
|
||||
{"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
|
||||
// Need enough large numbers to verify that the header is counting the number of bytes
|
||||
// for the field, not the number of elements.
|
||||
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
|
||||
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
|
||||
}}},
|
||||
{"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
|
||||
{"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
|
||||
// Nested.
|
||||
{"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
|
||||
{"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
|
||||
// Other things.
|
||||
{"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
|
||||
{"extension (unencoded)", messageWithExtension1},
|
||||
{"extension (encoded)", messageWithExtension3},
|
||||
// proto3 message
|
||||
{"proto3 empty", &proto3pb.Message{}},
|
||||
{"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
|
||||
{"proto3 int64", &proto3pb.Message{ResultCount: 1}},
|
||||
{"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
|
||||
{"proto3 float", &proto3pb.Message{Score: 12.6}},
|
||||
{"proto3 string", &proto3pb.Message{Name: "Snezana"}},
|
||||
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
|
||||
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
|
||||
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
|
||||
|
||||
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
|
||||
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
|
||||
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
for _, tc := range SizeTests {
|
||||
size := Size(tc.pb)
|
||||
b, err := Marshal(tc.pb)
|
||||
if err != nil {
|
||||
t.Errorf("%v: Marshal failed: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
if size != len(b) {
|
||||
t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
|
||||
t.Logf("%v: bytes: %#v", tc.desc, b)
|
||||
}
|
||||
}
|
||||
}
|
50
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile
generated
vendored
Normal file
50
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
include ../../Make.protobuf
|
||||
|
||||
all: regenerate
|
||||
|
||||
regenerate:
|
||||
rm -f test.pb.go
|
||||
make test.pb.go
|
||||
|
||||
# The following rules are just aids to development. Not needed for typical testing.
|
||||
|
||||
diff: regenerate
|
||||
git diff test.pb.go
|
||||
|
||||
restore:
|
||||
cp test.pb.go.golden test.pb.go
|
||||
|
||||
preserve:
|
||||
cp test.pb.go test.pb.go.golden
|
86
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go
generated
vendored
Normal file
86
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Verify that the compiler output for test.proto is unchanged.
|
||||
|
||||
package testdata
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
|
||||
func sum(t *testing.T, name string) string {
|
||||
data, err := ioutil.ReadFile(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("sum(%q): length is %d", name, len(data))
|
||||
hash := sha1.New()
|
||||
_, err = hash.Write(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return fmt.Sprintf("% x", hash.Sum(nil))
|
||||
}
|
||||
|
||||
func run(t *testing.T, name string, args ...string) {
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGolden(t *testing.T) {
|
||||
// Compute the original checksum.
|
||||
goldenSum := sum(t, "test.pb.go")
|
||||
// Run the proto compiler.
|
||||
run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
|
||||
newFile := filepath.Join(os.TempDir(), "test.pb.go")
|
||||
defer os.Remove(newFile)
|
||||
// Compute the new checksum.
|
||||
newSum := sum(t, newFile)
|
||||
// Verify
|
||||
if newSum != goldenSum {
|
||||
run(t, "diff", "-u", "test.pb.go", newFile)
|
||||
t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
|
||||
}
|
||||
}
|
2389
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
generated
vendored
Normal file
2389
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
434
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
generated
vendored
Normal file
434
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
generated
vendored
Normal file
|
@ -0,0 +1,434 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// A feature-rich test file for the protocol compiler and libraries.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package test_proto;
|
||||
|
||||
enum FOO { FOO1 = 1; };
|
||||
|
||||
message GoEnum {
|
||||
required FOO foo = 1;
|
||||
}
|
||||
|
||||
message GoTestField {
|
||||
required string Label = 1;
|
||||
required string Type = 2;
|
||||
}
|
||||
|
||||
message GoTest {
|
||||
// An enum, for completeness.
|
||||
enum KIND {
|
||||
VOID = 0;
|
||||
|
||||
// Basic types
|
||||
BOOL = 1;
|
||||
BYTES = 2;
|
||||
FINGERPRINT = 3;
|
||||
FLOAT = 4;
|
||||
INT = 5;
|
||||
STRING = 6;
|
||||
TIME = 7;
|
||||
|
||||
// Groupings
|
||||
TUPLE = 8;
|
||||
ARRAY = 9;
|
||||
MAP = 10;
|
||||
|
||||
// Table types
|
||||
TABLE = 11;
|
||||
|
||||
// Functions
|
||||
FUNCTION = 12; // last tag
|
||||
};
|
||||
|
||||
// Some typical parameters
|
||||
required KIND Kind = 1;
|
||||
optional string Table = 2;
|
||||
optional int32 Param = 3;
|
||||
|
||||
// Required, repeated and optional foreign fields.
|
||||
required GoTestField RequiredField = 4;
|
||||
repeated GoTestField RepeatedField = 5;
|
||||
optional GoTestField OptionalField = 6;
|
||||
|
||||
// Required fields of all basic types
|
||||
required bool F_Bool_required = 10;
|
||||
required int32 F_Int32_required = 11;
|
||||
required int64 F_Int64_required = 12;
|
||||
required fixed32 F_Fixed32_required = 13;
|
||||
required fixed64 F_Fixed64_required = 14;
|
||||
required uint32 F_Uint32_required = 15;
|
||||
required uint64 F_Uint64_required = 16;
|
||||
required float F_Float_required = 17;
|
||||
required double F_Double_required = 18;
|
||||
required string F_String_required = 19;
|
||||
required bytes F_Bytes_required = 101;
|
||||
required sint32 F_Sint32_required = 102;
|
||||
required sint64 F_Sint64_required = 103;
|
||||
|
||||
// Repeated fields of all basic types
|
||||
repeated bool F_Bool_repeated = 20;
|
||||
repeated int32 F_Int32_repeated = 21;
|
||||
repeated int64 F_Int64_repeated = 22;
|
||||
repeated fixed32 F_Fixed32_repeated = 23;
|
||||
repeated fixed64 F_Fixed64_repeated = 24;
|
||||
repeated uint32 F_Uint32_repeated = 25;
|
||||
repeated uint64 F_Uint64_repeated = 26;
|
||||
repeated float F_Float_repeated = 27;
|
||||
repeated double F_Double_repeated = 28;
|
||||
repeated string F_String_repeated = 29;
|
||||
repeated bytes F_Bytes_repeated = 201;
|
||||
repeated sint32 F_Sint32_repeated = 202;
|
||||
repeated sint64 F_Sint64_repeated = 203;
|
||||
|
||||
// Optional fields of all basic types
|
||||
optional bool F_Bool_optional = 30;
|
||||
optional int32 F_Int32_optional = 31;
|
||||
optional int64 F_Int64_optional = 32;
|
||||
optional fixed32 F_Fixed32_optional = 33;
|
||||
optional fixed64 F_Fixed64_optional = 34;
|
||||
optional uint32 F_Uint32_optional = 35;
|
||||
optional uint64 F_Uint64_optional = 36;
|
||||
optional float F_Float_optional = 37;
|
||||
optional double F_Double_optional = 38;
|
||||
optional string F_String_optional = 39;
|
||||
optional bytes F_Bytes_optional = 301;
|
||||
optional sint32 F_Sint32_optional = 302;
|
||||
optional sint64 F_Sint64_optional = 303;
|
||||
|
||||
// Default-valued fields of all basic types
|
||||
optional bool F_Bool_defaulted = 40 [default=true];
|
||||
optional int32 F_Int32_defaulted = 41 [default=32];
|
||||
optional int64 F_Int64_defaulted = 42 [default=64];
|
||||
optional fixed32 F_Fixed32_defaulted = 43 [default=320];
|
||||
optional fixed64 F_Fixed64_defaulted = 44 [default=640];
|
||||
optional uint32 F_Uint32_defaulted = 45 [default=3200];
|
||||
optional uint64 F_Uint64_defaulted = 46 [default=6400];
|
||||
optional float F_Float_defaulted = 47 [default=314159.];
|
||||
optional double F_Double_defaulted = 48 [default=271828.];
|
||||
optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
|
||||
optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
|
||||
optional sint32 F_Sint32_defaulted = 402 [default = -32];
|
||||
optional sint64 F_Sint64_defaulted = 403 [default = -64];
|
||||
|
||||
// Packed repeated fields (no string or bytes).
|
||||
repeated bool F_Bool_repeated_packed = 50 [packed=true];
|
||||
repeated int32 F_Int32_repeated_packed = 51 [packed=true];
|
||||
repeated int64 F_Int64_repeated_packed = 52 [packed=true];
|
||||
repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
|
||||
repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
|
||||
repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
|
||||
repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
|
||||
repeated float F_Float_repeated_packed = 57 [packed=true];
|
||||
repeated double F_Double_repeated_packed = 58 [packed=true];
|
||||
repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
|
||||
repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
|
||||
|
||||
// Required, repeated, and optional groups.
|
||||
required group RequiredGroup = 70 {
|
||||
required string RequiredField = 71;
|
||||
};
|
||||
|
||||
repeated group RepeatedGroup = 80 {
|
||||
required string RequiredField = 81;
|
||||
};
|
||||
|
||||
optional group OptionalGroup = 90 {
|
||||
required string RequiredField = 91;
|
||||
};
|
||||
}
|
||||
|
||||
// For testing skipping of unrecognized fields.
|
||||
// Numbers are all big, larger than tag numbers in GoTestField,
|
||||
// the message used in the corresponding test.
|
||||
message GoSkipTest {
|
||||
required int32 skip_int32 = 11;
|
||||
required fixed32 skip_fixed32 = 12;
|
||||
required fixed64 skip_fixed64 = 13;
|
||||
required string skip_string = 14;
|
||||
required group SkipGroup = 15 {
|
||||
required int32 group_int32 = 16;
|
||||
required string group_string = 17;
|
||||
}
|
||||
}
|
||||
|
||||
// For testing packed/non-packed decoder switching.
|
||||
// A serialized instance of one should be deserializable as the other.
|
||||
message NonPackedTest {
|
||||
repeated int32 a = 1;
|
||||
}
|
||||
|
||||
message PackedTest {
|
||||
repeated int32 b = 1 [packed=true];
|
||||
}
|
||||
|
||||
message MaxTag {
|
||||
// Maximum possible tag number.
|
||||
optional string last_field = 536870911;
|
||||
}
|
||||
|
||||
message OldMessage {
|
||||
message Nested {
|
||||
optional string name = 1;
|
||||
}
|
||||
optional Nested nested = 1;
|
||||
|
||||
optional int32 num = 2;
|
||||
}
|
||||
|
||||
// NewMessage is wire compatible with OldMessage;
|
||||
// imagine it as a future version.
|
||||
message NewMessage {
|
||||
message Nested {
|
||||
optional string name = 1;
|
||||
optional string food_group = 2;
|
||||
}
|
||||
optional Nested nested = 1;
|
||||
|
||||
// This is an int32 in OldMessage.
|
||||
optional int64 num = 2;
|
||||
}
|
||||
|
||||
// Smaller tests for ASCII formatting.
|
||||
|
||||
message InnerMessage {
|
||||
required string host = 1;
|
||||
optional int32 port = 2 [default=4000];
|
||||
optional bool connected = 3;
|
||||
}
|
||||
|
||||
message OtherMessage {
|
||||
optional int64 key = 1;
|
||||
optional bytes value = 2;
|
||||
optional float weight = 3;
|
||||
optional InnerMessage inner = 4;
|
||||
}
|
||||
|
||||
message MyMessage {
|
||||
required int32 count = 1;
|
||||
optional string name = 2;
|
||||
optional string quote = 3;
|
||||
repeated string pet = 4;
|
||||
optional InnerMessage inner = 5;
|
||||
repeated OtherMessage others = 6;
|
||||
repeated InnerMessage rep_inner = 12;
|
||||
|
||||
enum Color {
|
||||
RED = 0;
|
||||
GREEN = 1;
|
||||
BLUE = 2;
|
||||
};
|
||||
optional Color bikeshed = 7;
|
||||
|
||||
optional group SomeGroup = 8 {
|
||||
optional int32 group_field = 9;
|
||||
}
|
||||
|
||||
// This field becomes [][]byte in the generated code.
|
||||
repeated bytes rep_bytes = 10;
|
||||
|
||||
optional double bigfloat = 11;
|
||||
|
||||
extensions 100 to max;
|
||||
}
|
||||
|
||||
message Ext {
|
||||
extend MyMessage {
|
||||
optional Ext more = 103;
|
||||
optional string text = 104;
|
||||
optional int32 number = 105;
|
||||
}
|
||||
|
||||
optional string data = 1;
|
||||
}
|
||||
|
||||
extend MyMessage {
|
||||
repeated string greeting = 106;
|
||||
}
|
||||
|
||||
message MyMessageSet {
|
||||
option message_set_wire_format = true;
|
||||
extensions 100 to max;
|
||||
}
|
||||
|
||||
message Empty {
|
||||
}
|
||||
|
||||
extend MyMessageSet {
|
||||
optional Empty x201 = 201;
|
||||
optional Empty x202 = 202;
|
||||
optional Empty x203 = 203;
|
||||
optional Empty x204 = 204;
|
||||
optional Empty x205 = 205;
|
||||
optional Empty x206 = 206;
|
||||
optional Empty x207 = 207;
|
||||
optional Empty x208 = 208;
|
||||
optional Empty x209 = 209;
|
||||
optional Empty x210 = 210;
|
||||
optional Empty x211 = 211;
|
||||
optional Empty x212 = 212;
|
||||
optional Empty x213 = 213;
|
||||
optional Empty x214 = 214;
|
||||
optional Empty x215 = 215;
|
||||
optional Empty x216 = 216;
|
||||
optional Empty x217 = 217;
|
||||
optional Empty x218 = 218;
|
||||
optional Empty x219 = 219;
|
||||
optional Empty x220 = 220;
|
||||
optional Empty x221 = 221;
|
||||
optional Empty x222 = 222;
|
||||
optional Empty x223 = 223;
|
||||
optional Empty x224 = 224;
|
||||
optional Empty x225 = 225;
|
||||
optional Empty x226 = 226;
|
||||
optional Empty x227 = 227;
|
||||
optional Empty x228 = 228;
|
||||
optional Empty x229 = 229;
|
||||
optional Empty x230 = 230;
|
||||
optional Empty x231 = 231;
|
||||
optional Empty x232 = 232;
|
||||
optional Empty x233 = 233;
|
||||
optional Empty x234 = 234;
|
||||
optional Empty x235 = 235;
|
||||
optional Empty x236 = 236;
|
||||
optional Empty x237 = 237;
|
||||
optional Empty x238 = 238;
|
||||
optional Empty x239 = 239;
|
||||
optional Empty x240 = 240;
|
||||
optional Empty x241 = 241;
|
||||
optional Empty x242 = 242;
|
||||
optional Empty x243 = 243;
|
||||
optional Empty x244 = 244;
|
||||
optional Empty x245 = 245;
|
||||
optional Empty x246 = 246;
|
||||
optional Empty x247 = 247;
|
||||
optional Empty x248 = 248;
|
||||
optional Empty x249 = 249;
|
||||
optional Empty x250 = 250;
|
||||
}
|
||||
|
||||
message MessageList {
|
||||
repeated group Message = 1 {
|
||||
required string name = 2;
|
||||
required int32 count = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message Strings {
|
||||
optional string string_field = 1;
|
||||
optional bytes bytes_field = 2;
|
||||
}
|
||||
|
||||
message Defaults {
|
||||
enum Color {
|
||||
RED = 0;
|
||||
GREEN = 1;
|
||||
BLUE = 2;
|
||||
}
|
||||
|
||||
// Default-valued fields of all basic types.
|
||||
// Same as GoTest, but copied here to make testing easier.
|
||||
optional bool F_Bool = 1 [default=true];
|
||||
optional int32 F_Int32 = 2 [default=32];
|
||||
optional int64 F_Int64 = 3 [default=64];
|
||||
optional fixed32 F_Fixed32 = 4 [default=320];
|
||||
optional fixed64 F_Fixed64 = 5 [default=640];
|
||||
optional uint32 F_Uint32 = 6 [default=3200];
|
||||
optional uint64 F_Uint64 = 7 [default=6400];
|
||||
optional float F_Float = 8 [default=314159.];
|
||||
optional double F_Double = 9 [default=271828.];
|
||||
optional string F_String = 10 [default="hello, \"world!\"\n"];
|
||||
optional bytes F_Bytes = 11 [default="Bignose"];
|
||||
optional sint32 F_Sint32 = 12 [default=-32];
|
||||
optional sint64 F_Sint64 = 13 [default=-64];
|
||||
optional Color F_Enum = 14 [default=GREEN];
|
||||
|
||||
// More fields with crazy defaults.
|
||||
optional float F_Pinf = 15 [default=inf];
|
||||
optional float F_Ninf = 16 [default=-inf];
|
||||
optional float F_Nan = 17 [default=nan];
|
||||
|
||||
// Sub-message.
|
||||
optional SubDefaults sub = 18;
|
||||
|
||||
// Redundant but explicit defaults.
|
||||
optional string str_zero = 19 [default=""];
|
||||
}
|
||||
|
||||
message SubDefaults {
|
||||
optional int64 n = 1 [default=7];
|
||||
}
|
||||
|
||||
message RepeatedEnum {
|
||||
enum Color {
|
||||
RED = 1;
|
||||
}
|
||||
repeated Color color = 1;
|
||||
}
|
||||
|
||||
message MoreRepeated {
|
||||
repeated bool bools = 1;
|
||||
repeated bool bools_packed = 2 [packed=true];
|
||||
repeated int32 ints = 3;
|
||||
repeated int32 ints_packed = 4 [packed=true];
|
||||
repeated int64 int64s_packed = 7 [packed=true];
|
||||
repeated string strings = 5;
|
||||
repeated fixed32 fixeds = 6;
|
||||
}
|
||||
|
||||
// GroupOld and GroupNew have the same wire format.
|
||||
// GroupNew has a new field inside a group.
|
||||
|
||||
message GroupOld {
|
||||
optional group G = 101 {
|
||||
optional int32 x = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message GroupNew {
|
||||
optional group G = 101 {
|
||||
optional int32 x = 2;
|
||||
optional int32 y = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message FloatingPoint {
|
||||
required double f = 1;
|
||||
}
|
||||
|
||||
message MessageWithMap {
|
||||
map<int32, string> name_mapping = 1;
|
||||
map<sint64, FloatingPoint> msg_mapping = 2;
|
||||
map<bool, bytes> byte_mapping = 3;
|
||||
}
|
789
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
generated
vendored
Normal file
789
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
generated
vendored
Normal file
|
@ -0,0 +1,789 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
gtNewline = []byte(">\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Printf("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()
|
||||
)
|
||||
|
||||
// raw is the interface satisfied by RawMessage.
|
||||
type raw interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
func writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if sv.Type() == messageSetType {
|
||||
return writeMessageSet(w, sv.Addr().Interface().(*MessageSet))
|
||||
}
|
||||
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Map {
|
||||
// Map fields are rendered as a repeated struct with key/value fields.
|
||||
keys := fv.MapKeys() // TODO: should we sort these for deterministic output?
|
||||
sort.Sort(mapKeys(keys))
|
||||
for _, key := range keys {
|
||||
val := fv.MapIndex(key)
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// open struct
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
// key
|
||||
if _, err := w.WriteString("key:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// value
|
||||
if _, err := w.WriteString("value:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// close struct
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||
// empty bytes field
|
||||
continue
|
||||
}
|
||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||
// proto3 non-repeated scalar field; skip if zero value
|
||||
switch fv.Kind() {
|
||||
case reflect.Bool:
|
||||
if !fv.Bool() {
|
||||
continue
|
||||
}
|
||||
case reflect.Int32, reflect.Int64:
|
||||
if fv.Int() == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
if fv.Uint() == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if fv.Float() == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.String:
|
||||
if fv.String() == "" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if b, ok := fv.Interface().(raw); ok {
|
||||
if err := writeRaw(w, b.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if pv.Type().Implements(extendableProtoType) {
|
||||
if err := writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeRaw writes an uninterpreted raw message.
|
||||
func writeRaw(w *textWriter, b []byte) error {
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if err := writeUnknownStruct(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeMessageSet(w *textWriter, ms *MessageSet) error {
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
if msd, ok := messageSetMap[id]; ok {
|
||||
// Known message set type.
|
||||
if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil {
|
||||
return err
|
||||
}
|
||||
w.indent()
|
||||
|
||||
pb := reflect.New(msd.t.Elem())
|
||||
if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := writeStruct(w, pb.Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Unknown type.
|
||||
if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil {
|
||||
return err
|
||||
}
|
||||
w.indent()
|
||||
if err := writeUnknownStruct(w, item.Message); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if _, err := w.Write(gtNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep := pv.Interface().(extendableProto)
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m := ep.ExtensionMap()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
func marshalText(w io.Writer, pb Message, compact bool) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: compact,
|
||||
}
|
||||
|
||||
if tm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error {
|
||||
return marshalText(w, pb, false)
|
||||
}
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
marshalText(&buf, pb, false)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
marshalText(&buf, pb, true)
|
||||
return buf.String()
|
||||
}
|
757
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
Normal file
757
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
Normal file
|
@ -0,0 +1,757 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %v", p.s[0:i+1])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
errBadHex = errors.New("proto: bad hexadecimal")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
base := 8
|
||||
ss := s[:2]
|
||||
s = s[2:]
|
||||
if r == 'x' || r == 'X' {
|
||||
base = 16
|
||||
} else {
|
||||
ss = string(r) + ss
|
||||
}
|
||||
i, err := strconv.ParseUint(ss, base, 8)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'u', 'U':
|
||||
n := 4
|
||||
if r == 'U' {
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
|
||||
}
|
||||
|
||||
bs := make([]byte, n/2)
|
||||
for i := 0; i < n; i += 2 {
|
||||
a, ok1 := unhex(s[i])
|
||||
b, ok2 := unhex(s[i+1])
|
||||
if !ok1 || !ok2 {
|
||||
return "", "", errBadHex
|
||||
}
|
||||
bs[i/2] = a<<4 | b
|
||||
}
|
||||
s = s[n:]
|
||||
return string(bs), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Adapted from src/pkg/strconv/quote.go.
|
||||
func unhex(b byte) (v byte, ok bool) {
|
||||
switch {
|
||||
case '0' <= b && b <= '9':
|
||||
return b - '0', true
|
||||
case 'a' <= b && b <= 'f':
|
||||
return b - 'a' + 10, true
|
||||
case 'A' <= b && b <= 'F':
|
||||
return b - 'A' + 10, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || p.s[0] != '"' {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {
|
||||
sprops := GetProperties(st)
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.String {
|
||||
// The proto3 exception is for a string field,
|
||||
// which requires a colon.
|
||||
break
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
reqCount := GetProperties(st).reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]".
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == tok.value {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", tok.value)
|
||||
}
|
||||
// Check the extension terminator.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != "]" {
|
||||
return p.errorf("unrecognized extension terminator %q", tok.value)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(extendableProto)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
} else {
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
fi, props, ok := structFieldByName(st, name)
|
||||
if !ok {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
dst := sv.Field(fi)
|
||||
|
||||
if dst.Kind() == reflect.Map {
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the map if it doesn't already exist.
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
key := reflect.New(dst.Type().Key()).Elem()
|
||||
val := reflect.New(dst.Type().Elem()).Elem()
|
||||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// Technically the "key" and "value" could come in any order,
|
||||
// but in practice they won't.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
if err := p.consumeToken("key"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken("value"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken(terminator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, st.Field(fi).Type); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
} else if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
}
|
||||
|
||||
// For backward compatibility, permit a semicolon or comma after a field.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field. May already exist.
|
||||
flen := fv.Len()
|
||||
if flen == fv.Cap() {
|
||||
nav := reflect.MakeSlice(at, flen, 2*flen+1)
|
||||
reflect.Copy(nav, fv)
|
||||
fv.Set(nav)
|
||||
}
|
||||
fv.SetLen(flen + 1)
|
||||
|
||||
// Read one.
|
||||
p.back()
|
||||
return p.readAny(fv.Index(flen), props)
|
||||
case reflect.Bool:
|
||||
// Either "true", "false", 1 or 0.
|
||||
switch tok.value {
|
||||
case "true", "1":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
err := um.UnmarshalText([]byte(s))
|
||||
return err
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
|
||||
return pe
|
||||
}
|
||||
return nil
|
||||
}
|
511
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
generated
vendored
Normal file
511
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
generated
vendored
Normal file
|
@ -0,0 +1,511 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
proto3pb "./proto3_proto"
|
||||
. "./testdata"
|
||||
. "github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
type UnmarshalTextTest struct {
|
||||
in string
|
||||
err string // if "", no error expected
|
||||
out *MyMessage
|
||||
}
|
||||
|
||||
func buildExtStructTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
SetExtension(msg, E_Ext_More, &Ext{
|
||||
Data: String("Hello, world!"),
|
||||
})
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
func buildExtDataTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
SetExtension(msg, E_Ext_Text, String("Hello, world!"))
|
||||
SetExtension(msg, E_Ext_Number, Int32(1729))
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
func buildExtRepStringTest(text string) UnmarshalTextTest {
|
||||
msg := &MyMessage{
|
||||
Count: Int32(42),
|
||||
}
|
||||
if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return UnmarshalTextTest{in: text, out: msg}
|
||||
}
|
||||
|
||||
var unMarshalTextTests = []UnmarshalTextTest{
|
||||
// Basic
|
||||
{
|
||||
in: " count:42\n name:\"Dave\" ",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
},
|
||||
},
|
||||
|
||||
// Empty quoted string
|
||||
{
|
||||
in: `count:42 name:""`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(""),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string concatenation
|
||||
{
|
||||
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("My name is elsewhere"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with escaped apostrophe
|
||||
{
|
||||
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("HOLIDAY - New Year's Day"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with single quote
|
||||
{
|
||||
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(`Roger "The Ramster" Ramjet`),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with all the accepted special characters from the C++ test
|
||||
{
|
||||
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with quoted backslash
|
||||
{
|
||||
in: `count:42 name: "\\'xyz"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(`\'xyz`),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with UTF-8 bytes.
|
||||
{
|
||||
in: "count:42 name: '\303\277\302\201\xAB'",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("\303\277\302\201\xAB"),
|
||||
},
|
||||
},
|
||||
|
||||
// Bad quoted string
|
||||
{
|
||||
in: `inner: < host: "\0" >` + "\n",
|
||||
err: `line 1.15: invalid quoted string "\0"`,
|
||||
},
|
||||
|
||||
// Number too large for int64
|
||||
{
|
||||
in: "count: 1 others { key: 123456789012345678901 }",
|
||||
err: "line 1.23: invalid int64: 123456789012345678901",
|
||||
},
|
||||
|
||||
// Number too large for int32
|
||||
{
|
||||
in: "count: 1234567890123",
|
||||
err: "line 1.7: invalid int32: 1234567890123",
|
||||
},
|
||||
|
||||
// Number in hexadecimal
|
||||
{
|
||||
in: "count: 0x2beef",
|
||||
out: &MyMessage{
|
||||
Count: Int32(0x2beef),
|
||||
},
|
||||
},
|
||||
|
||||
// Number in octal
|
||||
{
|
||||
in: "count: 024601",
|
||||
out: &MyMessage{
|
||||
Count: Int32(024601),
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point number with "f" suffix
|
||||
{
|
||||
in: "count: 4 others:< weight: 17.0f >",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Others: []*OtherMessage{
|
||||
{
|
||||
Weight: Float32(17),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point positive infinity
|
||||
{
|
||||
in: "count: 4 bigfloat: inf",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(1)),
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point negative infinity
|
||||
{
|
||||
in: "count: 4 bigfloat: -inf",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
|
||||
// Number too large for float32
|
||||
{
|
||||
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
|
||||
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
|
||||
},
|
||||
|
||||
// Number posing as a quoted string
|
||||
{
|
||||
in: `inner: < host: 12 >` + "\n",
|
||||
err: `line 1.15: invalid string: 12`,
|
||||
},
|
||||
|
||||
// Quoted string posing as int32
|
||||
{
|
||||
in: `count: "12"`,
|
||||
err: `line 1.7: invalid int32: "12"`,
|
||||
},
|
||||
|
||||
// Quoted string posing a float32
|
||||
{
|
||||
in: `others:< weight: "17.4" >`,
|
||||
err: `line 1.17: invalid float32: "17.4"`,
|
||||
},
|
||||
|
||||
// Enum
|
||||
{
|
||||
in: `count:42 bikeshed: BLUE`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Bikeshed: MyMessage_BLUE.Enum(),
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated field
|
||||
{
|
||||
in: `count:42 pet: "horsey" pet:"bunny"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Pet: []string{"horsey", "bunny"},
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated message with/without colon and <>/{}
|
||||
{
|
||||
in: `count:42 others:{} others{} others:<> others:{}`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Others: []*OtherMessage{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Missing colon for inner message
|
||||
{
|
||||
in: `count:42 inner < host: "cauchy.syd" >`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("cauchy.syd"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Missing colon for string field
|
||||
{
|
||||
in: `name "Dave"`,
|
||||
err: `line 1.5: expected ':', found "\"Dave\""`,
|
||||
},
|
||||
|
||||
// Missing colon for int32 field
|
||||
{
|
||||
in: `count 42`,
|
||||
err: `line 1.6: expected ':', found "42"`,
|
||||
},
|
||||
|
||||
// Missing required field
|
||||
{
|
||||
in: `name: "Pawel"`,
|
||||
err: `proto: required field "testdata.MyMessage.count" not set`,
|
||||
out: &MyMessage{
|
||||
Name: String("Pawel"),
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated non-repeated field
|
||||
{
|
||||
in: `name: "Rob" name: "Russ"`,
|
||||
err: `line 1.12: non-repeated field "name" was repeated`,
|
||||
},
|
||||
|
||||
// Group
|
||||
{
|
||||
in: `count: 17 SomeGroup { group_field: 12 }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(17),
|
||||
Somegroup: &MyMessage_SomeGroup{
|
||||
GroupField: Int32(12),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Semicolon between fields
|
||||
{
|
||||
in: `count:3;name:"Calvin"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(3),
|
||||
Name: String("Calvin"),
|
||||
},
|
||||
},
|
||||
// Comma between fields
|
||||
{
|
||||
in: `count:4,name:"Ezekiel"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Name: String("Ezekiel"),
|
||||
},
|
||||
},
|
||||
|
||||
// Extension
|
||||
buildExtStructTest(`count: 42 [testdata.Ext.more]:<data:"Hello, world!" >`),
|
||||
buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
|
||||
buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
|
||||
buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
|
||||
|
||||
// Big all-in-one
|
||||
{
|
||||
in: "count:42 # Meaning\n" +
|
||||
`name:"Dave" ` +
|
||||
`quote:"\"I didn't want to go.\"" ` +
|
||||
`pet:"bunny" ` +
|
||||
`pet:"kitty" ` +
|
||||
`pet:"horsey" ` +
|
||||
`inner:<` +
|
||||
` host:"footrest.syd" ` +
|
||||
` port:7001 ` +
|
||||
` connected:true ` +
|
||||
`> ` +
|
||||
`others:<` +
|
||||
` key:3735928559 ` +
|
||||
` value:"\x01A\a\f" ` +
|
||||
`> ` +
|
||||
`others:<` +
|
||||
" weight:58.9 # Atomic weight of Co\n" +
|
||||
` inner:<` +
|
||||
` host:"lesha.mtv" ` +
|
||||
` port:8002 ` +
|
||||
` >` +
|
||||
`>`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
Quote: String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &InnerMessage{
|
||||
Host: String("footrest.syd"),
|
||||
Port: Int32(7001),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
Others: []*OtherMessage{
|
||||
{
|
||||
Key: Int64(3735928559),
|
||||
Value: []byte{0x1, 'A', '\a', '\f'},
|
||||
},
|
||||
{
|
||||
Weight: Float32(58.9),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("lesha.mtv"),
|
||||
Port: Int32(8002),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestUnmarshalText(t *testing.T) {
|
||||
for i, test := range unMarshalTextTests {
|
||||
pb := new(MyMessage)
|
||||
err := UnmarshalText(test.in, pb)
|
||||
if test.err == "" {
|
||||
// We don't expect failure.
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: Unexpected error: %v", i, err)
|
||||
} else if !reflect.DeepEqual(pb, test.out) {
|
||||
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
|
||||
i, pb, test.out)
|
||||
}
|
||||
} else {
|
||||
// We do expect failure.
|
||||
if err == nil {
|
||||
t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
|
||||
} else if err.Error() != test.err {
|
||||
t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
|
||||
i, err.Error(), test.err)
|
||||
} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
|
||||
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
|
||||
i, pb, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalTextCustomMessage(t *testing.T) {
|
||||
msg := &textMessage{}
|
||||
if err := UnmarshalText("custom", msg); err != nil {
|
||||
t.Errorf("Unexpected error from custom unmarshal: %v", err)
|
||||
}
|
||||
if UnmarshalText("not custom", msg) == nil {
|
||||
t.Errorf("Didn't get expected error from custom unmarshal")
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test; this caused a panic.
|
||||
func TestRepeatedEnum(t *testing.T) {
|
||||
pb := new(RepeatedEnum)
|
||||
if err := UnmarshalText("color: RED", pb); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exp := &RepeatedEnum{
|
||||
Color: []RepeatedEnum_Color{RepeatedEnum_RED},
|
||||
}
|
||||
if !Equal(pb, exp) {
|
||||
t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProto3TextParsing(t *testing.T) {
|
||||
m := new(proto3pb.Message)
|
||||
const in = `name: "Wallace" true_scotsman: true`
|
||||
want := &proto3pb.Message{
|
||||
Name: "Wallace",
|
||||
TrueScotsman: true,
|
||||
}
|
||||
if err := UnmarshalText(in, m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !Equal(m, want) {
|
||||
t.Errorf("\n got %v\nwant %v", m, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapParsing(t *testing.T) {
|
||||
m := new(MessageWithMap)
|
||||
const in = `name_mapping:<key:1234 value:"Feist"> name_mapping:<key:1 value:"Beatles">` +
|
||||
`msg_mapping:<key:-4 value:<f: 2.0>>` +
|
||||
`msg_mapping<key:-2 value<f: 4.0>>` + // no colon after "value"
|
||||
`byte_mapping:<key:true value:"so be it">`
|
||||
want := &MessageWithMap{
|
||||
NameMapping: map[int32]string{
|
||||
1: "Beatles",
|
||||
1234: "Feist",
|
||||
},
|
||||
MsgMapping: map[int64]*FloatingPoint{
|
||||
-4: {F: Float64(2.0)},
|
||||
-2: {F: Float64(4.0)},
|
||||
},
|
||||
ByteMapping: map[bool][]byte{
|
||||
true: []byte("so be it"),
|
||||
},
|
||||
}
|
||||
if err := UnmarshalText(in, m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !Equal(m, want) {
|
||||
t.Errorf("\n got %v\nwant %v", m, want)
|
||||
}
|
||||
}
|
||||
|
||||
var benchInput string
|
||||
|
||||
func init() {
|
||||
benchInput = "count: 4\n"
|
||||
for i := 0; i < 1000; i++ {
|
||||
benchInput += "pet: \"fido\"\n"
|
||||
}
|
||||
|
||||
// Check it is valid input.
|
||||
pb := new(MyMessage)
|
||||
err := UnmarshalText(benchInput, pb)
|
||||
if err != nil {
|
||||
panic("Bad benchmark input: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalText(b *testing.B) {
|
||||
pb := new(MyMessage)
|
||||
for i := 0; i < b.N; i++ {
|
||||
UnmarshalText(benchInput, pb)
|
||||
}
|
||||
b.SetBytes(int64(len(benchInput)))
|
||||
}
|
436
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
generated
vendored
Normal file
436
libnetwork/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
generated
vendored
Normal file
|
@ -0,0 +1,436 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
proto3pb "./proto3_proto"
|
||||
pb "./testdata"
|
||||
)
|
||||
|
||||
// textMessage implements the methods that allow it to marshal and unmarshal
|
||||
// itself as text.
|
||||
type textMessage struct {
|
||||
}
|
||||
|
||||
func (*textMessage) MarshalText() ([]byte, error) {
|
||||
return []byte("custom"), nil
|
||||
}
|
||||
|
||||
func (*textMessage) UnmarshalText(bytes []byte) error {
|
||||
if string(bytes) != "custom" {
|
||||
return errors.New("expected 'custom'")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*textMessage) Reset() {}
|
||||
func (*textMessage) String() string { return "" }
|
||||
func (*textMessage) ProtoMessage() {}
|
||||
|
||||
func newTestMessage() *pb.MyMessage {
|
||||
msg := &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Quote: proto.String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("footrest.syd"),
|
||||
Port: proto.Int32(7001),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(0xdeadbeef),
|
||||
Value: []byte{1, 65, 7, 12},
|
||||
},
|
||||
{
|
||||
Weight: proto.Float32(6.022),
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("lesha.mtv"),
|
||||
Port: proto.Int32(8002),
|
||||
},
|
||||
},
|
||||
},
|
||||
Bikeshed: pb.MyMessage_BLUE.Enum(),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(8),
|
||||
},
|
||||
// One normally wouldn't do this.
|
||||
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
|
||||
XXX_unrecognized: []byte{13<<3 | 0, 4},
|
||||
}
|
||||
ext := &pb.Ext{
|
||||
Data: proto.String("Big gobs for big rats"),
|
||||
}
|
||||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
greetings := []string{"adg", "easy", "cow"}
|
||||
if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Add an unknown extension. We marshal a pb.Ext, and fake the ID.
|
||||
b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
|
||||
proto.SetRawExtension(msg, 201, b)
|
||||
|
||||
// Extensions can be plain fields, too, so let's test that.
|
||||
b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
|
||||
proto.SetRawExtension(msg, 202, b)
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
const text = `count: 42
|
||||
name: "Dave"
|
||||
quote: "\"I didn't want to go.\""
|
||||
pet: "bunny"
|
||||
pet: "kitty"
|
||||
pet: "horsey"
|
||||
inner: <
|
||||
host: "footrest.syd"
|
||||
port: 7001
|
||||
connected: true
|
||||
>
|
||||
others: <
|
||||
key: 3735928559
|
||||
value: "\001A\007\014"
|
||||
>
|
||||
others: <
|
||||
weight: 6.022
|
||||
inner: <
|
||||
host: "lesha.mtv"
|
||||
port: 8002
|
||||
>
|
||||
>
|
||||
bikeshed: BLUE
|
||||
SomeGroup {
|
||||
group_field: 8
|
||||
}
|
||||
/* 2 unknown bytes */
|
||||
13: 4
|
||||
[testdata.Ext.more]: <
|
||||
data: "Big gobs for big rats"
|
||||
>
|
||||
[testdata.greeting]: "adg"
|
||||
[testdata.greeting]: "easy"
|
||||
[testdata.greeting]: "cow"
|
||||
/* 13 unknown bytes */
|
||||
201: "\t3G skiing"
|
||||
/* 3 unknown bytes */
|
||||
202: 19
|
||||
`
|
||||
|
||||
func TestMarshalText(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, newTestMessage()); err != nil {
|
||||
t.Fatalf("proto.MarshalText: %v", err)
|
||||
}
|
||||
s := buf.String()
|
||||
if s != text {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalTextCustomMessage(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
|
||||
t.Fatalf("proto.MarshalText: %v", err)
|
||||
}
|
||||
s := buf.String()
|
||||
if s != "custom" {
|
||||
t.Errorf("Got %q, expected %q", s, "custom")
|
||||
}
|
||||
}
|
||||
func TestMarshalTextNil(t *testing.T) {
|
||||
want := "<nil>"
|
||||
tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
|
||||
for i, test := range tests {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, test); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := buf.String(); got != want {
|
||||
t.Errorf("%d: got %q want %q", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalTextUnknownEnum(t *testing.T) {
|
||||
// The Color enum only specifies values 0-2.
|
||||
m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
|
||||
got := m.String()
|
||||
const want = `bikeshed:3 `
|
||||
if got != want {
|
||||
t.Errorf("\n got %q\nwant %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalTextBuffered(b *testing.B) {
|
||||
buf := new(bytes.Buffer)
|
||||
m := newTestMessage()
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
proto.MarshalText(buf, m)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalTextUnbuffered(b *testing.B) {
|
||||
w := ioutil.Discard
|
||||
m := newTestMessage()
|
||||
for i := 0; i < b.N; i++ {
|
||||
proto.MarshalText(w, m)
|
||||
}
|
||||
}
|
||||
|
||||
func compact(src string) string {
|
||||
// s/[ \n]+/ /g; s/ $//;
|
||||
dst := make([]byte, len(src))
|
||||
space, comment := false, false
|
||||
j := 0
|
||||
for i := 0; i < len(src); i++ {
|
||||
if strings.HasPrefix(src[i:], "/*") {
|
||||
comment = true
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if comment && strings.HasPrefix(src[i:], "*/") {
|
||||
comment = false
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if comment {
|
||||
continue
|
||||
}
|
||||
c := src[i]
|
||||
if c == ' ' || c == '\n' {
|
||||
space = true
|
||||
continue
|
||||
}
|
||||
if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
|
||||
space = false
|
||||
}
|
||||
if c == '{' {
|
||||
space = false
|
||||
}
|
||||
if space {
|
||||
dst[j] = ' '
|
||||
j++
|
||||
space = false
|
||||
}
|
||||
dst[j] = c
|
||||
j++
|
||||
}
|
||||
if space {
|
||||
dst[j] = ' '
|
||||
j++
|
||||
}
|
||||
return string(dst[0:j])
|
||||
}
|
||||
|
||||
var compactText = compact(text)
|
||||
|
||||
func TestCompactText(t *testing.T) {
|
||||
s := proto.CompactTextString(newTestMessage())
|
||||
if s != compactText {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringEscaping(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in *pb.Strings
|
||||
out string
|
||||
}{
|
||||
{
|
||||
// Test data from C++ test (TextFormatTest.StringEscape).
|
||||
// Single divergence: we don't escape apostrophes.
|
||||
&pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
|
||||
"string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
|
||||
},
|
||||
{
|
||||
// Test data from the same C++ test.
|
||||
&pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
|
||||
"string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
|
||||
},
|
||||
{
|
||||
// Some UTF-8.
|
||||
&pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
|
||||
`string_field: "\000\001\377\201"` + "\n",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
var buf bytes.Buffer
|
||||
if err := proto.MarshalText(&buf, tc.in); err != nil {
|
||||
t.Errorf("proto.MarsalText: %v", err)
|
||||
continue
|
||||
}
|
||||
s := buf.String()
|
||||
if s != tc.out {
|
||||
t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check round-trip.
|
||||
pb := new(pb.Strings)
|
||||
if err := proto.UnmarshalText(s, pb); err != nil {
|
||||
t.Errorf("#%d: UnmarshalText: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !proto.Equal(pb, tc.in) {
|
||||
t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A limitedWriter accepts some output before it fails.
|
||||
// This is a proxy for something like a nearly-full or imminently-failing disk,
|
||||
// or a network connection that is about to die.
|
||||
type limitedWriter struct {
|
||||
b bytes.Buffer
|
||||
limit int
|
||||
}
|
||||
|
||||
var outOfSpace = errors.New("proto: insufficient space")
|
||||
|
||||
func (w *limitedWriter) Write(p []byte) (n int, err error) {
|
||||
var avail = w.limit - w.b.Len()
|
||||
if avail <= 0 {
|
||||
return 0, outOfSpace
|
||||
}
|
||||
if len(p) <= avail {
|
||||
return w.b.Write(p)
|
||||
}
|
||||
n, _ = w.b.Write(p[:avail])
|
||||
return n, outOfSpace
|
||||
}
|
||||
|
||||
func TestMarshalTextFailing(t *testing.T) {
|
||||
// Try lots of different sizes to exercise more error code-paths.
|
||||
for lim := 0; lim < len(text); lim++ {
|
||||
buf := new(limitedWriter)
|
||||
buf.limit = lim
|
||||
err := proto.MarshalText(buf, newTestMessage())
|
||||
// We expect a certain error, but also some partial results in the buffer.
|
||||
if err != outOfSpace {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
|
||||
}
|
||||
s := buf.b.String()
|
||||
x := text[:buf.limit]
|
||||
if s != x {
|
||||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloats(t *testing.T) {
|
||||
tests := []struct {
|
||||
f float64
|
||||
want string
|
||||
}{
|
||||
{0, "0"},
|
||||
{4.7, "4.7"},
|
||||
{math.Inf(1), "inf"},
|
||||
{math.Inf(-1), "-inf"},
|
||||
{math.NaN(), "nan"},
|
||||
}
|
||||
for _, test := range tests {
|
||||
msg := &pb.FloatingPoint{F: &test.f}
|
||||
got := strings.TrimSpace(msg.String())
|
||||
want := `f:` + test.want
|
||||
if got != want {
|
||||
t.Errorf("f=%f: got %q, want %q", test.f, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedNilText(t *testing.T) {
|
||||
m := &pb.MessageList{
|
||||
Message: []*pb.MessageList_Message{
|
||||
nil,
|
||||
&pb.MessageList_Message{
|
||||
Name: proto.String("Horse"),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
want := `Message <nil>
|
||||
Message {
|
||||
name: "Horse"
|
||||
}
|
||||
Message <nil>
|
||||
`
|
||||
if s := proto.MarshalTextString(m); s != want {
|
||||
t.Errorf(" got: %s\nwant: %s", s, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProto3Text(t *testing.T) {
|
||||
tests := []struct {
|
||||
m proto.Message
|
||||
want string
|
||||
}{
|
||||
// zero message
|
||||
{&proto3pb.Message{}, ``},
|
||||
// zero message except for an empty byte slice
|
||||
{&proto3pb.Message{Data: []byte{}}, ``},
|
||||
// trivial case
|
||||
{&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
|
||||
// empty map
|
||||
{&pb.MessageWithMap{}, ``},
|
||||
// non-empty map; current map format is the same as a repeated struct
|
||||
{
|
||||
&pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}},
|
||||
`name_mapping:<key:1234 value:"Feist" >`,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
got := strings.TrimSpace(test.m.String())
|
||||
if got != test.want {
|
||||
t.Errorf("\n got %s\nwant %s", got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
152
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
Normal file
152
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
Normal file
|
@ -0,0 +1,152 @@
|
|||
Libcontainer provides a native Go implementation for creating containers
|
||||
with namespaces, cgroups, capabilities, and filesystem access controls.
|
||||
It allows you to manage the lifecycle of the container performing additional operations
|
||||
after the container is created.
|
||||
|
||||
|
||||
#### Container
|
||||
A container is a self contained execution environment that shares the kernel of the
|
||||
host system and which is (optionally) isolated from other containers in the system.
|
||||
|
||||
#### Using libcontainer
|
||||
|
||||
To create a container you first have to initialize an instance of a factory
|
||||
that will handle the creation and initialization for a container.
|
||||
|
||||
Because containers are spawned in a two step process you will need to provide
|
||||
arguments to a binary that will be executed as the init process for the container.
|
||||
To use the current binary that is spawning the containers and acting as the parent
|
||||
you can use `os.Args[0]` and we have a command called `init` setup.
|
||||
|
||||
```go
|
||||
root, err := libcontainer.New("/var/lib/container", libcontainer.InitArgs(os.Args[0], "init"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
Once you have an instance of the factory created we can create a configuration
|
||||
struct describing how the container is to be created. A sample would look similar to this:
|
||||
|
||||
```go
|
||||
config := &configs.Config{
|
||||
Rootfs: rootfs,
|
||||
Capabilities: []string{
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FSETID",
|
||||
"FOWNER",
|
||||
"MKNOD",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SETFCAP",
|
||||
"SETPCAP",
|
||||
"NET_BIND_SERVICE",
|
||||
"SYS_CHROOT",
|
||||
"KILL",
|
||||
"AUDIT_WRITE",
|
||||
},
|
||||
Namespaces: configs.Namespaces([]configs.Namespace{
|
||||
{Type: configs.NEWNS},
|
||||
{Type: configs.NEWUTS},
|
||||
{Type: configs.NEWIPC},
|
||||
{Type: configs.NEWPID},
|
||||
{Type: configs.NEWNET},
|
||||
}),
|
||||
Cgroups: &configs.Cgroup{
|
||||
Name: "test-container",
|
||||
Parent: "system",
|
||||
AllowAllDevices: false,
|
||||
AllowedDevices: configs.DefaultAllowedDevices,
|
||||
},
|
||||
|
||||
Devices: configs.DefaultAutoCreatedDevices,
|
||||
Hostname: "testing",
|
||||
Networks: []*configs.Network{
|
||||
{
|
||||
Type: "loopback",
|
||||
Address: "127.0.0.1/0",
|
||||
Gateway: "localhost",
|
||||
},
|
||||
},
|
||||
Rlimits: []configs.Rlimit{
|
||||
{
|
||||
Type: syscall.RLIMIT_NOFILE,
|
||||
Hard: uint64(1024),
|
||||
Soft: uint64(1024),
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Once you have the configuration populated you can create a container:
|
||||
|
||||
```go
|
||||
container, err := root.Create("container-id", config)
|
||||
```
|
||||
|
||||
To spawn bash as the initial process inside the container and have the
|
||||
processes pid returned in order to wait, signal, or kill the process:
|
||||
|
||||
```go
|
||||
process := &libcontainer.Process{
|
||||
Args: []string{"/bin/bash"},
|
||||
Env: []string{"PATH=/bin"},
|
||||
User: "daemon",
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
}
|
||||
|
||||
err := container.Start(process)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for the process to finish.
|
||||
status, err := process.Wait()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// destroy the container.
|
||||
container.Destroy()
|
||||
```
|
||||
|
||||
Additional ways to interact with a running container are:
|
||||
|
||||
```go
|
||||
// return all the pids for all processes running inside the container.
|
||||
processes, err := container.Processes()
|
||||
|
||||
// get detailed cpu, memory, io, and network statistics for the container and
|
||||
// it's processes.
|
||||
stats, err := container.Stats()
|
||||
|
||||
|
||||
// pause all processes inside the container.
|
||||
container.Pause()
|
||||
|
||||
// resume all paused processes.
|
||||
container.Resume()
|
||||
```
|
||||
|
||||
|
||||
#### Checkpoint & Restore
|
||||
|
||||
libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers.
|
||||
This let's you save the state of a process running inside a container to disk, and then restore
|
||||
that state into a new process, on the same machine or on another machine.
|
||||
|
||||
`criu` version 1.5.2 or higher is required to use checkpoint and restore.
|
||||
If you don't already have `criu` installed, you can build it from source, following the
|
||||
[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image
|
||||
generated when building libcontainer with docker.
|
||||
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
|
||||
Docs released under Creative commons.
|
||||
|
334
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/SPEC.md
generated
vendored
Normal file
334
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/SPEC.md
generated
vendored
Normal file
|
@ -0,0 +1,334 @@
|
|||
## Container Specification - v1
|
||||
|
||||
This is the standard configuration for version 1 containers. It includes
|
||||
namespaces, standard filesystem setup, a default Linux capability set, and
|
||||
information about resource reservations. It also has information about any
|
||||
populated environment settings for the processes running inside a container.
|
||||
|
||||
Along with the configuration of how a container is created the standard also
|
||||
discusses actions that can be performed on a container to manage and inspect
|
||||
information about the processes running inside.
|
||||
|
||||
The v1 profile is meant to be able to accommodate the majority of applications
|
||||
with a strong security configuration.
|
||||
|
||||
### System Requirements and Compatibility
|
||||
|
||||
Minimum requirements:
|
||||
* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches)
|
||||
* Mounted cgroups with each subsystem in its own hierarchy
|
||||
|
||||
|
||||
### Namespaces
|
||||
|
||||
| Flag | Enabled |
|
||||
| ------------ | ------- |
|
||||
| CLONE_NEWPID | 1 |
|
||||
| CLONE_NEWUTS | 1 |
|
||||
| CLONE_NEWIPC | 1 |
|
||||
| CLONE_NEWNET | 1 |
|
||||
| CLONE_NEWNS | 1 |
|
||||
| CLONE_NEWUSER | 1 |
|
||||
|
||||
Namespaces are created for the container via the `clone` syscall.
|
||||
|
||||
|
||||
### Filesystem
|
||||
|
||||
A root filesystem must be provided to a container for execution. The container
|
||||
will use this root filesystem (rootfs) to jail and spawn processes inside where
|
||||
the binaries and system libraries are local to that directory. Any binaries
|
||||
to be executed must be contained within this rootfs.
|
||||
|
||||
Mounts that happen inside the container are automatically cleaned up when the
|
||||
container exits as the mount namespace is destroyed and the kernel will
|
||||
unmount all the mounts that were setup within that namespace.
|
||||
|
||||
For a container to execute properly there are certain filesystems that
|
||||
are required to be mounted within the rootfs that the runtime will setup.
|
||||
|
||||
| Path | Type | Flags | Data |
|
||||
| ----------- | ------ | -------------------------------------- | ---------------------------------------- |
|
||||
| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
|
||||
| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 |
|
||||
| /dev/shm | tmpfs | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k |
|
||||
| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
|
||||
| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid=5 |
|
||||
| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | |
|
||||
|
||||
|
||||
After a container's filesystems are mounted within the newly created
|
||||
mount namespace `/dev` will need to be populated with a set of device nodes.
|
||||
It is expected that a rootfs does not need to have any device nodes specified
|
||||
for `/dev` witin the rootfs as the container will setup the correct devices
|
||||
that are required for executing a container's process.
|
||||
|
||||
| Path | Mode | Access |
|
||||
| ------------ | ---- | ---------- |
|
||||
| /dev/null | 0666 | rwm |
|
||||
| /dev/zero | 0666 | rwm |
|
||||
| /dev/full | 0666 | rwm |
|
||||
| /dev/tty | 0666 | rwm |
|
||||
| /dev/random | 0666 | rwm |
|
||||
| /dev/urandom | 0666 | rwm |
|
||||
| /dev/fuse | 0666 | rwm |
|
||||
|
||||
|
||||
**ptmx**
|
||||
`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within
|
||||
the container.
|
||||
|
||||
The use of a pseudo TTY is optional within a container and it should support both.
|
||||
If a pseudo is provided to the container `/dev/console` will need to be
|
||||
setup by binding the console in `/dev/` after it has been populated and mounted
|
||||
in tmpfs.
|
||||
|
||||
| Source | Destination | UID GID | Mode | Type |
|
||||
| --------------- | ------------ | ------- | ---- | ---- |
|
||||
| *pty host path* | /dev/console | 0 0 | 0600 | bind |
|
||||
|
||||
|
||||
After `/dev/null` has been setup we check for any external links between
|
||||
the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing
|
||||
to `/dev/null` outside the container we close and `dup2` the the `/dev/null`
|
||||
that is local to the container's rootfs.
|
||||
|
||||
|
||||
After the container has `/proc` mounted a few standard symlinks are setup
|
||||
within `/dev/` for the io.
|
||||
|
||||
| Source | Destination |
|
||||
| ------------ | ----------- |
|
||||
| /proc/1/fd | /dev/fd |
|
||||
| /proc/1/fd/0 | /dev/stdin |
|
||||
| /proc/1/fd/1 | /dev/stdout |
|
||||
| /proc/1/fd/2 | /dev/stderr |
|
||||
|
||||
A `pivot_root` is used to change the root for the process, effectively
|
||||
jailing the process inside the rootfs.
|
||||
|
||||
```c
|
||||
put_old = mkdir(...);
|
||||
pivot_root(rootfs, put_old);
|
||||
chdir("/");
|
||||
unmount(put_old, MS_DETACH);
|
||||
rmdir(put_old);
|
||||
```
|
||||
|
||||
For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined
|
||||
with a `chroot` is required as `pivot_root` is not supported in `ramfs`.
|
||||
|
||||
```c
|
||||
mount(rootfs, "/", NULL, MS_MOVE, NULL);
|
||||
chroot(".");
|
||||
chdir("/");
|
||||
```
|
||||
|
||||
The `umask` is set back to `0022` after the filesystem setup has been completed.
|
||||
|
||||
### Resources
|
||||
|
||||
Cgroups are used to handle resource allocation for containers. This includes
|
||||
system resources like cpu, memory, and device access.
|
||||
|
||||
| Subsystem | Enabled |
|
||||
| ---------- | ------- |
|
||||
| devices | 1 |
|
||||
| memory | 1 |
|
||||
| cpu | 1 |
|
||||
| cpuacct | 1 |
|
||||
| cpuset | 1 |
|
||||
| blkio | 1 |
|
||||
| perf_event | 1 |
|
||||
| freezer | 1 |
|
||||
| hugetlb | 1 |
|
||||
|
||||
|
||||
All cgroup subsystem are joined so that statistics can be collected from
|
||||
each of the subsystems. Freezer does not expose any stats but is joined
|
||||
so that containers can be paused and resumed.
|
||||
|
||||
The parent process of the container's init must place the init pid inside
|
||||
the correct cgroups before the initialization begins. This is done so
|
||||
that no processes or threads escape the cgroups. This sync is
|
||||
done via a pipe ( specified in the runtime section below ) that the container's
|
||||
init process will block waiting for the parent to finish setup.
|
||||
|
||||
### Security
|
||||
|
||||
The standard set of Linux capabilities that are set in a container
|
||||
provide a good default for security and flexibility for the applications.
|
||||
|
||||
|
||||
| Capability | Enabled |
|
||||
| -------------------- | ------- |
|
||||
| CAP_NET_RAW | 1 |
|
||||
| CAP_NET_BIND_SERVICE | 1 |
|
||||
| CAP_AUDIT_READ | 1 |
|
||||
| CAP_AUDIT_WRITE | 1 |
|
||||
| CAP_DAC_OVERRIDE | 1 |
|
||||
| CAP_SETFCAP | 1 |
|
||||
| CAP_SETPCAP | 1 |
|
||||
| CAP_SETGID | 1 |
|
||||
| CAP_SETUID | 1 |
|
||||
| CAP_MKNOD | 1 |
|
||||
| CAP_CHOWN | 1 |
|
||||
| CAP_FOWNER | 1 |
|
||||
| CAP_FSETID | 1 |
|
||||
| CAP_KILL | 1 |
|
||||
| CAP_SYS_CHROOT | 1 |
|
||||
| CAP_NET_BROADCAST | 0 |
|
||||
| CAP_SYS_MODULE | 0 |
|
||||
| CAP_SYS_RAWIO | 0 |
|
||||
| CAP_SYS_PACCT | 0 |
|
||||
| CAP_SYS_ADMIN | 0 |
|
||||
| CAP_SYS_NICE | 0 |
|
||||
| CAP_SYS_RESOURCE | 0 |
|
||||
| CAP_SYS_TIME | 0 |
|
||||
| CAP_SYS_TTY_CONFIG | 0 |
|
||||
| CAP_AUDIT_CONTROL | 0 |
|
||||
| CAP_MAC_OVERRIDE | 0 |
|
||||
| CAP_MAC_ADMIN | 0 |
|
||||
| CAP_NET_ADMIN | 0 |
|
||||
| CAP_SYSLOG | 0 |
|
||||
| CAP_DAC_READ_SEARCH | 0 |
|
||||
| CAP_LINUX_IMMUTABLE | 0 |
|
||||
| CAP_IPC_LOCK | 0 |
|
||||
| CAP_IPC_OWNER | 0 |
|
||||
| CAP_SYS_PTRACE | 0 |
|
||||
| CAP_SYS_BOOT | 0 |
|
||||
| CAP_LEASE | 0 |
|
||||
| CAP_WAKE_ALARM | 0 |
|
||||
| CAP_BLOCK_SUSPE | 0 |
|
||||
|
||||
|
||||
Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor)
|
||||
and [selinux](http://selinuxproject.org/page/Main_Page) can be used with
|
||||
the containers. A container should support setting an apparmor profile or
|
||||
selinux process and mount labels if provided in the configuration.
|
||||
|
||||
Standard apparmor profile:
|
||||
```c
|
||||
#include <tunables/global>
|
||||
profile <profile_name> flags=(attach_disconnected,mediate_deleted) {
|
||||
#include <abstractions/base>
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
|
||||
deny @{PROC}/sys/fs/** wklx,
|
||||
deny @{PROC}/sysrq-trigger rwklx,
|
||||
deny @{PROC}/mem rwklx,
|
||||
deny @{PROC}/kmem rwklx,
|
||||
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
|
||||
deny @{PROC}/sys/kernel/*/** wklx,
|
||||
|
||||
deny mount,
|
||||
|
||||
deny /sys/[^f]*/** wklx,
|
||||
deny /sys/f[^s]*/** wklx,
|
||||
deny /sys/fs/[^c]*/** wklx,
|
||||
deny /sys/fs/c[^g]*/** wklx,
|
||||
deny /sys/fs/cg[^r]*/** wklx,
|
||||
deny /sys/firmware/efi/efivars/** rwklx,
|
||||
deny /sys/kernel/security/** rwklx,
|
||||
}
|
||||
```
|
||||
|
||||
*TODO: seccomp work is being done to find a good default config*
|
||||
|
||||
### Runtime and Init Process
|
||||
|
||||
During container creation the parent process needs to talk to the container's init
|
||||
process and have a form of synchronization. This is accomplished by creating
|
||||
a pipe that is passed to the container's init. When the init process first spawns
|
||||
it will block on its side of the pipe until the parent closes its side. This
|
||||
allows the parent to have time to set the new process inside a cgroup hierarchy
|
||||
and/or write any uid/gid mappings required for user namespaces.
|
||||
The pipe is passed to the init process via FD 3.
|
||||
|
||||
The application consuming libcontainer should be compiled statically. libcontainer
|
||||
does not define any init process and the arguments provided are used to `exec` the
|
||||
process inside the application. There should be no long running init within the
|
||||
container spec.
|
||||
|
||||
If a pseudo tty is provided to a container it will open and `dup2` the console
|
||||
as the container's STDIN, STDOUT, STDERR as well as mounting the console
|
||||
as `/dev/console`.
|
||||
|
||||
An extra set of mounts are provided to a container and setup for use. A container's
|
||||
rootfs can contain some non portable files inside that can cause side effects during
|
||||
execution of a process. These files are usually created and populated with the container
|
||||
specific information via the runtime.
|
||||
|
||||
**Extra runtime files:**
|
||||
* /etc/hosts
|
||||
* /etc/resolv.conf
|
||||
* /etc/hostname
|
||||
* /etc/localtime
|
||||
|
||||
|
||||
#### Defaults
|
||||
|
||||
There are a few defaults that can be overridden by users, but in their omission
|
||||
these apply to processes within a container.
|
||||
|
||||
| Type | Value |
|
||||
| ------------------- | ------------------------------ |
|
||||
| Parent Death Signal | SIGKILL |
|
||||
| UID | 0 |
|
||||
| GID | 0 |
|
||||
| GROUPS | 0, NULL |
|
||||
| CWD | "/" |
|
||||
| $HOME | Current user's home dir or "/" |
|
||||
| Readonly rootfs | false |
|
||||
| Pseudo TTY | false |
|
||||
|
||||
|
||||
## Actions
|
||||
|
||||
After a container is created there is a standard set of actions that can
|
||||
be done to the container. These actions are part of the public API for
|
||||
a container.
|
||||
|
||||
| Action | Description |
|
||||
| -------------- | ------------------------------------------------------------------ |
|
||||
| Get processes | Return all the pids for processes running inside a container |
|
||||
| Get Stats | Return resource statistics for the container as a whole |
|
||||
| Wait | Wait waits on the container's init process ( pid 1 ) |
|
||||
| Wait Process | Wait on any of the container's processes returning the exit status |
|
||||
| Destroy | Kill the container's init process and remove any filesystem state |
|
||||
| Signal | Send a signal to the container's init process |
|
||||
| Signal Process | Send a signal to any of the container's processes |
|
||||
| Pause | Pause all processes inside the container |
|
||||
| Resume | Resume all processes inside the container if paused |
|
||||
| Exec | Execute a new process inside of the container ( requires setns ) |
|
||||
| Set | Setup configs of the container after it's created |
|
||||
|
||||
### Execute a new process inside of a running container.
|
||||
|
||||
User can execute a new process inside of a running container. Any binaries to be
|
||||
executed must be accessible within the container's rootfs.
|
||||
|
||||
The started process will run inside the container's rootfs. Any changes
|
||||
made by the process to the container's filesystem will persist after the
|
||||
process finished executing.
|
||||
|
||||
The started process will join all the container's existing namespaces. When the
|
||||
container is paused, the process will also be paused and will resume when
|
||||
the container is unpaused. The started process will only run when the container's
|
||||
primary process (PID 1) is running, and will not be restarted when the container
|
||||
is restarted.
|
||||
|
||||
#### Planned additions
|
||||
|
||||
The started process will have its own cgroups nested inside the container's
|
||||
cgroups. This is used for process tracking and optionally resource allocation
|
||||
handling for the new process. Freezer cgroup is required, the rest of the cgroups
|
||||
are optional. The process executor must place its pid inside the correct
|
||||
cgroups before starting the process. This is done so that no child processes or
|
||||
threads can escape the cgroups.
|
||||
|
||||
When the process is stopped, the process executor will try (in a best-effort way)
|
||||
to stop all its children and remove the sub-cgroups.
|
38
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
generated
vendored
Normal file
38
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// +build apparmor,linux
|
||||
|
||||
package apparmor
|
||||
|
||||
// #cgo LDFLAGS: -lapparmor
|
||||
// #include <sys/apparmor.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsEnabled returns true if apparmor is enabled for the host.
|
||||
func IsEnabled() bool {
|
||||
if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
|
||||
if _, err = os.Stat("/sbin/apparmor_parser"); err == nil {
|
||||
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
|
||||
return err == nil && len(buf) > 1 && buf[0] == 'Y'
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ApplyProfile will apply the profile with the specified name to the process after
|
||||
// the next exec.
|
||||
func ApplyProfile(name string) error {
|
||||
if name == "" {
|
||||
return nil
|
||||
}
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
if _, err := C.aa_change_onexec(cName); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
11
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go
generated
vendored
Normal file
11
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build !apparmor !linux
|
||||
|
||||
package apparmor
|
||||
|
||||
func IsEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func ApplyProfile(name string) error {
|
||||
return nil
|
||||
}
|
91
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/capabilities_linux.go
generated
vendored
Normal file
91
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/capabilities_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
const allCapabilityTypes = capability.CAPS | capability.BOUNDS
|
||||
|
||||
var capabilityList = map[string]capability.Cap{
|
||||
"SETPCAP": capability.CAP_SETPCAP,
|
||||
"SYS_MODULE": capability.CAP_SYS_MODULE,
|
||||
"SYS_RAWIO": capability.CAP_SYS_RAWIO,
|
||||
"SYS_PACCT": capability.CAP_SYS_PACCT,
|
||||
"SYS_ADMIN": capability.CAP_SYS_ADMIN,
|
||||
"SYS_NICE": capability.CAP_SYS_NICE,
|
||||
"SYS_RESOURCE": capability.CAP_SYS_RESOURCE,
|
||||
"SYS_TIME": capability.CAP_SYS_TIME,
|
||||
"SYS_TTY_CONFIG": capability.CAP_SYS_TTY_CONFIG,
|
||||
"MKNOD": capability.CAP_MKNOD,
|
||||
"AUDIT_WRITE": capability.CAP_AUDIT_WRITE,
|
||||
"AUDIT_CONTROL": capability.CAP_AUDIT_CONTROL,
|
||||
"MAC_OVERRIDE": capability.CAP_MAC_OVERRIDE,
|
||||
"MAC_ADMIN": capability.CAP_MAC_ADMIN,
|
||||
"NET_ADMIN": capability.CAP_NET_ADMIN,
|
||||
"SYSLOG": capability.CAP_SYSLOG,
|
||||
"CHOWN": capability.CAP_CHOWN,
|
||||
"NET_RAW": capability.CAP_NET_RAW,
|
||||
"DAC_OVERRIDE": capability.CAP_DAC_OVERRIDE,
|
||||
"FOWNER": capability.CAP_FOWNER,
|
||||
"DAC_READ_SEARCH": capability.CAP_DAC_READ_SEARCH,
|
||||
"FSETID": capability.CAP_FSETID,
|
||||
"KILL": capability.CAP_KILL,
|
||||
"SETGID": capability.CAP_SETGID,
|
||||
"SETUID": capability.CAP_SETUID,
|
||||
"LINUX_IMMUTABLE": capability.CAP_LINUX_IMMUTABLE,
|
||||
"NET_BIND_SERVICE": capability.CAP_NET_BIND_SERVICE,
|
||||
"NET_BROADCAST": capability.CAP_NET_BROADCAST,
|
||||
"IPC_LOCK": capability.CAP_IPC_LOCK,
|
||||
"IPC_OWNER": capability.CAP_IPC_OWNER,
|
||||
"SYS_CHROOT": capability.CAP_SYS_CHROOT,
|
||||
"SYS_PTRACE": capability.CAP_SYS_PTRACE,
|
||||
"SYS_BOOT": capability.CAP_SYS_BOOT,
|
||||
"LEASE": capability.CAP_LEASE,
|
||||
"SETFCAP": capability.CAP_SETFCAP,
|
||||
"WAKE_ALARM": capability.CAP_WAKE_ALARM,
|
||||
"BLOCK_SUSPEND": capability.CAP_BLOCK_SUSPEND,
|
||||
"AUDIT_READ": capability.CAP_AUDIT_READ,
|
||||
}
|
||||
|
||||
func newCapWhitelist(caps []string) (*whitelist, error) {
|
||||
l := []capability.Cap{}
|
||||
for _, c := range caps {
|
||||
v, ok := capabilityList[c]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown capability %q", c)
|
||||
}
|
||||
l = append(l, v)
|
||||
}
|
||||
pid, err := capability.NewPid(os.Getpid())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &whitelist{
|
||||
keep: l,
|
||||
pid: pid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type whitelist struct {
|
||||
pid capability.Capabilities
|
||||
keep []capability.Cap
|
||||
}
|
||||
|
||||
// dropBoundingSet drops the capability bounding set to those specified in the whitelist.
|
||||
func (w *whitelist) dropBoundingSet() error {
|
||||
w.pid.Clear(capability.BOUNDS)
|
||||
w.pid.Set(capability.BOUNDS, w.keep...)
|
||||
return w.pid.Apply(capability.BOUNDS)
|
||||
}
|
||||
|
||||
// drop drops all capabilities for the current process except those specified in the whitelist.
|
||||
func (w *whitelist) drop() error {
|
||||
w.pid.Clear(allCapabilityTypes)
|
||||
w.pid.Set(allCapabilityTypes, w.keep...)
|
||||
return w.pid.Apply(allCapabilityTypes)
|
||||
}
|
61
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
generated
vendored
Normal file
61
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
// +build linux
|
||||
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
// Apply cgroup configuration to the process with the specified pid
|
||||
Apply(pid int) error
|
||||
|
||||
// Returns the PIDs inside the cgroup set
|
||||
GetPids() ([]int, error)
|
||||
|
||||
// Returns statistics for the cgroup set
|
||||
GetStats() (*Stats, error)
|
||||
|
||||
// Toggles the freezer cgroup according with specified state
|
||||
Freeze(state configs.FreezerState) error
|
||||
|
||||
// Destroys the cgroup set
|
||||
Destroy() error
|
||||
|
||||
// NewCgroupManager() and LoadCgroupManager() require following attributes:
|
||||
// Paths map[string]string
|
||||
// Cgroups *cgroups.Cgroup
|
||||
// Paths maps cgroup subsystem to path at which it is mounted.
|
||||
// Cgroups specifies specific cgroup settings for the various subsystems
|
||||
|
||||
// Returns cgroup paths to save in a state file and to be able to
|
||||
// restore the object later.
|
||||
GetPaths() map[string]string
|
||||
|
||||
// Set the cgroup as configured.
|
||||
Set(container *configs.Config) error
|
||||
}
|
||||
|
||||
type NotFoundError struct {
|
||||
Subsystem string
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Error() string {
|
||||
return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
|
||||
}
|
||||
|
||||
func NewNotFoundError(sub string) error {
|
||||
return &NotFoundError{
|
||||
Subsystem: sub,
|
||||
}
|
||||
}
|
||||
|
||||
func IsNotFound(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := err.(*NotFoundError)
|
||||
return ok
|
||||
}
|
29
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_test.go
generated
vendored
Normal file
29
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_test.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
// +build linux
|
||||
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
cgroupsContents = `11:hugetlb:/
|
||||
10:perf_event:/
|
||||
9:blkio:/
|
||||
8:net_cls:/
|
||||
7:freezer:/
|
||||
6:devices:/
|
||||
5:memory:/
|
||||
4:cpuacct,cpu:/
|
||||
3:cpuset:/
|
||||
2:name=systemd:/user.slice/user-1000.slice/session-16.scope`
|
||||
)
|
||||
|
||||
func TestParseCgroups(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(cgroupsContents))
|
||||
_, err := ParseCgroupFile("blkio", r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
3
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
generated
vendored
Normal file
3
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
// +build !linux
|
||||
|
||||
package cgroups
|
333
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
generated
vendored
Normal file
333
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
generated
vendored
Normal file
|
@ -0,0 +1,333 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
var (
|
||||
subsystems = map[string]subsystem{
|
||||
"devices": &DevicesGroup{},
|
||||
"memory": &MemoryGroup{},
|
||||
"cpu": &CpuGroup{},
|
||||
"cpuset": &CpusetGroup{},
|
||||
"cpuacct": &CpuacctGroup{},
|
||||
"blkio": &BlkioGroup{},
|
||||
"hugetlb": &HugetlbGroup{},
|
||||
"net_cls": &NetClsGroup{},
|
||||
"net_prio": &NetPrioGroup{},
|
||||
"perf_event": &PerfEventGroup{},
|
||||
"freezer": &FreezerGroup{},
|
||||
}
|
||||
CgroupProcesses = "cgroup.procs"
|
||||
HugePageSizes, _ = cgroups.GetHugePageSize()
|
||||
)
|
||||
|
||||
type subsystem interface {
|
||||
// Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
|
||||
GetStats(path string, stats *cgroups.Stats) error
|
||||
// Removes the cgroup represented by 'data'.
|
||||
Remove(*data) error
|
||||
// Creates and joins the cgroup represented by data.
|
||||
Apply(*data) error
|
||||
// Set the cgroup represented by cgroup.
|
||||
Set(path string, cgroup *configs.Cgroup) error
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
mu sync.Mutex
|
||||
Cgroups *configs.Cgroup
|
||||
Paths map[string]string
|
||||
}
|
||||
|
||||
// The absolute path to the root of the cgroup hierarchies.
|
||||
var cgroupRootLock sync.Mutex
|
||||
var cgroupRoot string
|
||||
|
||||
// Gets the cgroupRoot.
|
||||
func getCgroupRoot() (string, error) {
|
||||
cgroupRootLock.Lock()
|
||||
defer cgroupRootLock.Unlock()
|
||||
|
||||
if cgroupRoot != "" {
|
||||
return cgroupRoot, nil
|
||||
}
|
||||
|
||||
root, err := cgroups.FindCgroupMountpointDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(root); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cgroupRoot = root
|
||||
return cgroupRoot, nil
|
||||
}
|
||||
|
||||
type data struct {
|
||||
root string
|
||||
cgroup string
|
||||
c *configs.Cgroup
|
||||
pid int
|
||||
}
|
||||
|
||||
func (m *Manager) Apply(pid int) error {
|
||||
if m.Cgroups == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var c = m.Cgroups
|
||||
|
||||
d, err := getCgroupData(m.Cgroups, pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
paths := make(map[string]string)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cgroups.RemovePaths(paths)
|
||||
}
|
||||
}()
|
||||
for name, sys := range subsystems {
|
||||
if err := sys.Apply(d); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: Apply should, ideally, be reentrant or be broken up into a separate
|
||||
// create and join phase so that the cgroup hierarchy for a container can be
|
||||
// created then join consists of writing the process pids to cgroup.procs
|
||||
p, err := d.path(name)
|
||||
if err != nil {
|
||||
if cgroups.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
paths[name] = p
|
||||
}
|
||||
m.Paths = paths
|
||||
|
||||
if paths["cpu"] != "" {
|
||||
if err := CheckCpushares(paths["cpu"], c.CpuShares); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) Destroy() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if err := cgroups.RemovePaths(m.Paths); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Paths = make(map[string]string)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) GetPaths() map[string]string {
|
||||
m.mu.Lock()
|
||||
paths := m.Paths
|
||||
m.mu.Unlock()
|
||||
return paths
|
||||
}
|
||||
|
||||
func (m *Manager) GetStats() (*cgroups.Stats, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
stats := cgroups.NewStats()
|
||||
for name, path := range m.Paths {
|
||||
sys, ok := subsystems[name]
|
||||
if !ok || !cgroups.PathExists(path) {
|
||||
continue
|
||||
}
|
||||
if err := sys.GetStats(path, stats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (m *Manager) Set(container *configs.Config) error {
|
||||
for name, path := range m.Paths {
|
||||
sys, ok := subsystems[name]
|
||||
if !ok || !cgroups.PathExists(path) {
|
||||
continue
|
||||
}
|
||||
if err := sys.Set(path, container.Cgroups); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Freeze toggles the container's freezer cgroup depending on the state
|
||||
// provided
|
||||
func (m *Manager) Freeze(state configs.FreezerState) error {
|
||||
d, err := getCgroupData(m.Cgroups, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dir, err := d.path("freezer")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
prevState := m.Cgroups.Freezer
|
||||
m.Cgroups.Freezer = state
|
||||
|
||||
freezer := subsystems["freezer"]
|
||||
err = freezer.Set(dir, m.Cgroups)
|
||||
if err != nil {
|
||||
m.Cgroups.Freezer = prevState
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) GetPids() ([]int, error) {
|
||||
d, err := getCgroupData(m.Cgroups, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dir, err := d.path("devices")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cgroups.ReadProcsFile(dir)
|
||||
}
|
||||
|
||||
func getCgroupData(c *configs.Cgroup, pid int) (*data, error) {
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cgroup := c.Name
|
||||
if c.Parent != "" {
|
||||
cgroup = filepath.Join(c.Parent, cgroup)
|
||||
}
|
||||
|
||||
return &data{
|
||||
root: root,
|
||||
cgroup: cgroup,
|
||||
c: c,
|
||||
pid: pid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (raw *data) parent(subsystem, mountpoint, src string) (string, error) {
|
||||
initPath, err := cgroups.GetInitCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
relDir, err := filepath.Rel(src, initPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(mountpoint, relDir), nil
|
||||
}
|
||||
|
||||
func (raw *data) path(subsystem string) (string, error) {
|
||||
mnt, src, err := cgroups.FindCgroupMountpointAndSource(subsystem)
|
||||
// If we didn't mount the subsystem, there is no point we make the path.
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
|
||||
if filepath.IsAbs(raw.cgroup) {
|
||||
return filepath.Join(raw.root, filepath.Base(mnt), raw.cgroup), nil
|
||||
}
|
||||
|
||||
parent, err := raw.parent(subsystem, mnt, src)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(parent, raw.cgroup), nil
|
||||
}
|
||||
|
||||
func (raw *data) join(subsystem string) (string, error) {
|
||||
path, err := raw.path(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func writeFile(dir, file, data string) error {
|
||||
// Normally dir should not be empty, one case is that cgroup subsystem
|
||||
// is not mounted, we will get empty dir, and we want it fail here.
|
||||
if dir == "" {
|
||||
return fmt.Errorf("no such directory for %s.", file)
|
||||
}
|
||||
return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
|
||||
}
|
||||
|
||||
func readFile(dir, file string) (string, error) {
|
||||
data, err := ioutil.ReadFile(filepath.Join(dir, file))
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
func removePath(p string, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p != "" {
|
||||
return os.RemoveAll(p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckCpushares(path string, c int64) error {
|
||||
var cpuShares int64
|
||||
|
||||
if c == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fd, err := os.Open(filepath.Join(path, "cpu.shares"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
_, err = fmt.Fscanf(fd, "%d", &cpuShares)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
if c > cpuShares {
|
||||
return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares)
|
||||
} else if c < cpuShares {
|
||||
return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
230
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
generated
vendored
Normal file
230
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type BlkioGroup struct {
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Apply(d *data) error {
|
||||
dir, err := d.join("blkio")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Set(dir, d.c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.BlkioWeight != 0 {
|
||||
if err := writeFile(path, "blkio.weight", strconv.FormatInt(cgroup.BlkioWeight, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cgroup.BlkioWeightDevice != "" {
|
||||
if err := writeFile(path, "blkio.weight_device", cgroup.BlkioWeightDevice); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.BlkioThrottleReadBpsDevice != "" {
|
||||
if err := writeFile(path, "blkio.throttle.read_bps_device", cgroup.BlkioThrottleReadBpsDevice); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.BlkioThrottleWriteBpsDevice != "" {
|
||||
if err := writeFile(path, "blkio.throttle.write_bps_device", cgroup.BlkioThrottleWriteBpsDevice); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.BlkioThrottleReadIOpsDevice != "" {
|
||||
if err := writeFile(path, "blkio.throttle.read_iops_device", cgroup.BlkioThrottleReadIOpsDevice); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.BlkioThrottleWriteIOpsDevice != "" {
|
||||
if err := writeFile(path, "blkio.throttle.write_iops_device", cgroup.BlkioThrottleWriteIOpsDevice); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) Remove(d *data) error {
|
||||
return removePath(d.path("blkio"))
|
||||
}
|
||||
|
||||
/*
|
||||
examples:
|
||||
|
||||
blkio.sectors
|
||||
8:0 6792
|
||||
|
||||
blkio.io_service_bytes
|
||||
8:0 Read 1282048
|
||||
8:0 Write 2195456
|
||||
8:0 Sync 2195456
|
||||
8:0 Async 1282048
|
||||
8:0 Total 3477504
|
||||
Total 3477504
|
||||
|
||||
blkio.io_serviced
|
||||
8:0 Read 124
|
||||
8:0 Write 104
|
||||
8:0 Sync 104
|
||||
8:0 Async 124
|
||||
8:0 Total 228
|
||||
Total 228
|
||||
|
||||
blkio.io_queued
|
||||
8:0 Read 0
|
||||
8:0 Write 0
|
||||
8:0 Sync 0
|
||||
8:0 Async 0
|
||||
8:0 Total 0
|
||||
Total 0
|
||||
*/
|
||||
|
||||
func splitBlkioStatLine(r rune) bool {
|
||||
return r == ' ' || r == ':'
|
||||
}
|
||||
|
||||
func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
|
||||
var blkioStats []cgroups.BlkioStatEntry
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return blkioStats, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
// format: dev type amount
|
||||
fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
|
||||
if len(fields) < 3 {
|
||||
if len(fields) == 2 && fields[0] == "Total" {
|
||||
// skip total line
|
||||
continue
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
|
||||
}
|
||||
}
|
||||
|
||||
v, err := strconv.ParseUint(fields[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
major := v
|
||||
|
||||
v, err = strconv.ParseUint(fields[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
minor := v
|
||||
|
||||
op := ""
|
||||
valueField := 2
|
||||
if len(fields) == 4 {
|
||||
op = fields[2]
|
||||
valueField = 3
|
||||
}
|
||||
v, err = strconv.ParseUint(fields[valueField], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
|
||||
}
|
||||
|
||||
return blkioStats, nil
|
||||
}
|
||||
|
||||
func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
// Try to read CFQ stats available on all CFQ enabled kernels first
|
||||
if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil {
|
||||
return getCFQStats(path, stats)
|
||||
}
|
||||
return getStats(path, stats) // Use generic stats as fallback
|
||||
}
|
||||
|
||||
func getCFQStats(path string, stats *cgroups.Stats) error {
|
||||
var blkioStats []cgroups.BlkioStatEntry
|
||||
var err error
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.SectorsRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoServiceBytesRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoServicedRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoQueuedRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoServiceTimeRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoWaitTimeRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoMergedRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoTimeRecursive = blkioStats
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getStats(path string, stats *cgroups.Stats) error {
|
||||
var blkioStats []cgroups.BlkioStatEntry
|
||||
var err error
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoServiceBytesRecursive = blkioStats
|
||||
|
||||
if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil {
|
||||
return err
|
||||
}
|
||||
stats.BlkioStats.IoServicedRecursive = blkioStats
|
||||
|
||||
return nil
|
||||
}
|
570
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio_test.go
generated
vendored
Normal file
570
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio_test.go
generated
vendored
Normal file
|
@ -0,0 +1,570 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
const (
|
||||
sectorsRecursiveContents = `8:0 1024`
|
||||
serviceBytesRecursiveContents = `8:0 Read 100
|
||||
8:0 Write 200
|
||||
8:0 Sync 300
|
||||
8:0 Async 500
|
||||
8:0 Total 500
|
||||
Total 500`
|
||||
servicedRecursiveContents = `8:0 Read 10
|
||||
8:0 Write 40
|
||||
8:0 Sync 20
|
||||
8:0 Async 30
|
||||
8:0 Total 50
|
||||
Total 50`
|
||||
queuedRecursiveContents = `8:0 Read 1
|
||||
8:0 Write 4
|
||||
8:0 Sync 2
|
||||
8:0 Async 3
|
||||
8:0 Total 5
|
||||
Total 5`
|
||||
serviceTimeRecursiveContents = `8:0 Read 173959
|
||||
8:0 Write 0
|
||||
8:0 Sync 0
|
||||
8:0 Async 173959
|
||||
8:0 Total 17395
|
||||
Total 17395`
|
||||
waitTimeRecursiveContents = `8:0 Read 15571
|
||||
8:0 Write 0
|
||||
8:0 Sync 0
|
||||
8:0 Async 15571
|
||||
8:0 Total 15571`
|
||||
mergedRecursiveContents = `8:0 Read 5
|
||||
8:0 Write 10
|
||||
8:0 Sync 0
|
||||
8:0 Async 0
|
||||
8:0 Total 15
|
||||
Total 15`
|
||||
timeRecursiveContents = `8:0 8`
|
||||
throttleServiceBytes = `8:0 Read 11030528
|
||||
8:0 Write 23
|
||||
8:0 Sync 42
|
||||
8:0 Async 11030528
|
||||
8:0 Total 11030528
|
||||
252:0 Read 11030528
|
||||
252:0 Write 23
|
||||
252:0 Sync 42
|
||||
252:0 Async 11030528
|
||||
252:0 Total 11030528
|
||||
Total 22061056`
|
||||
throttleServiced = `8:0 Read 164
|
||||
8:0 Write 23
|
||||
8:0 Sync 42
|
||||
8:0 Async 164
|
||||
8:0 Total 164
|
||||
252:0 Read 164
|
||||
252:0 Write 23
|
||||
252:0 Sync 42
|
||||
252:0 Async 164
|
||||
252:0 Total 164
|
||||
Total 328`
|
||||
throttleBefore = `8:0 1024`
|
||||
throttleAfter = `8:0 2048`
|
||||
)
|
||||
|
||||
func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) {
|
||||
*blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op})
|
||||
}
|
||||
|
||||
func TestBlkioSetWeight(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
weightBefore = 100
|
||||
weightAfter = 200
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.weight": strconv.Itoa(weightBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.BlkioWeight = weightAfter
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "blkio.weight")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.weight - %s", err)
|
||||
}
|
||||
|
||||
if value != weightAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.weight failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioSetWeightDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
weightDeviceBefore = "8:0 400"
|
||||
weightDeviceAfter = "8:0 500"
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.weight_device": weightDeviceBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.c.BlkioWeightDevice = weightDeviceAfter
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.weight_device - %s", err)
|
||||
}
|
||||
|
||||
if value != weightDeviceAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.weight_device failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify expected stats.
|
||||
expectedStats := cgroups.BlkioStats{}
|
||||
appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "")
|
||||
|
||||
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoSectorsFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoServiceBytesFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoServicedFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoQueuedFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoServiceTimeFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoWaitTimeFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoMergedFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsNoTimeFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed unexpectedly: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": "8:0 Read 100 100",
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected to fail, but did not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlkioStatsUnexpectedFieldType(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": "8:0 Read Write",
|
||||
"blkio.io_serviced_recursive": servicedRecursiveContents,
|
||||
"blkio.io_queued_recursive": queuedRecursiveContents,
|
||||
"blkio.sectors_recursive": sectorsRecursiveContents,
|
||||
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
|
||||
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
|
||||
"blkio.io_merged_recursive": mergedRecursiveContents,
|
||||
"blkio.time_recursive": timeRecursiveContents,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected to fail, but did not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonCFQBlkioStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.io_service_bytes_recursive": "",
|
||||
"blkio.io_serviced_recursive": "",
|
||||
"blkio.io_queued_recursive": "",
|
||||
"blkio.sectors_recursive": "",
|
||||
"blkio.io_service_time_recursive": "",
|
||||
"blkio.io_wait_time_recursive": "",
|
||||
"blkio.io_merged_recursive": "",
|
||||
"blkio.time_recursive": "",
|
||||
"blkio.throttle.io_service_bytes": throttleServiceBytes,
|
||||
"blkio.throttle.io_serviced": throttleServiced,
|
||||
})
|
||||
|
||||
blkio := &BlkioGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := blkio.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify expected stats.
|
||||
expectedStats := cgroups.BlkioStats{}
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total")
|
||||
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async")
|
||||
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total")
|
||||
|
||||
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
|
||||
}
|
||||
|
||||
func TestBlkioSetThrottleReadBpsDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.throttle.read_bps_device": throttleBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.c.BlkioThrottleReadBpsDevice = throttleAfter
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_bps_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.throttle.read_bps_device - %s", err)
|
||||
}
|
||||
|
||||
if value != throttleAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.throttle.read_bps_device failed.")
|
||||
}
|
||||
}
|
||||
func TestBlkioSetThrottleWriteBpsDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.throttle.write_bps_device": throttleBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.c.BlkioThrottleWriteBpsDevice = throttleAfter
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_bps_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.throttle.write_bps_device - %s", err)
|
||||
}
|
||||
|
||||
if value != throttleAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.throttle.write_bps_device failed.")
|
||||
}
|
||||
}
|
||||
func TestBlkioSetThrottleReadIOpsDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.throttle.read_iops_device": throttleBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.c.BlkioThrottleReadIOpsDevice = throttleAfter
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_iops_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.throttle.read_iops_device - %s", err)
|
||||
}
|
||||
|
||||
if value != throttleAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.throttle.read_iops_device failed.")
|
||||
}
|
||||
}
|
||||
func TestBlkioSetThrottleWriteIOpsDevice(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("blkio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"blkio.throttle.write_iops_device": throttleBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.c.BlkioThrottleWriteIOpsDevice = throttleAfter
|
||||
blkio := &BlkioGroup{}
|
||||
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_iops_device")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse blkio.throttle.write_iops_device - %s", err)
|
||||
}
|
||||
|
||||
if value != throttleAfter {
|
||||
t.Fatal("Got the wrong value, set blkio.throttle.write_iops_device failed.")
|
||||
}
|
||||
}
|
95
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
generated
vendored
Normal file
95
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type CpuGroup struct {
|
||||
}
|
||||
|
||||
func (s *CpuGroup) Apply(d *data) error {
|
||||
// We always want to join the cpu group, to allow fair cpu scheduling
|
||||
// on a container basis
|
||||
dir, err := d.join("cpu")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Set(dir, d.c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.CpuShares != 0 {
|
||||
if err := writeFile(path, "cpu.shares", strconv.FormatInt(cgroup.CpuShares, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.CpuPeriod != 0 {
|
||||
if err := writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(cgroup.CpuPeriod, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.CpuQuota != 0 {
|
||||
if err := writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.CpuQuota, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.CpuRtPeriod != 0 {
|
||||
if err := writeFile(path, "cpu.rt_period_us", strconv.FormatInt(cgroup.CpuRtPeriod, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.CpuRtRuntime != 0 {
|
||||
if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.CpuRtRuntime, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuGroup) Remove(d *data) error {
|
||||
return removePath(d.path("cpu"))
|
||||
}
|
||||
|
||||
func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
f, err := os.Open(filepath.Join(path, "cpu.stat"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
t, v, err := getCgroupParamKeyValue(sc.Text())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch t {
|
||||
case "nr_periods":
|
||||
stats.CpuStats.ThrottlingData.Periods = v
|
||||
|
||||
case "nr_throttled":
|
||||
stats.CpuStats.ThrottlingData.ThrottledPeriods = v
|
||||
|
||||
case "throttled_time":
|
||||
stats.CpuStats.ThrottlingData.ThrottledTime = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
163
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_test.go
generated
vendored
Normal file
163
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu_test.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
func TestCpuSetShares(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
sharesBefore = 1024
|
||||
sharesAfter = 512
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.shares": strconv.Itoa(sharesBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.CpuShares = sharesAfter
|
||||
cpu := &CpuGroup{}
|
||||
if err := cpu.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "cpu.shares")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.shares - %s", err)
|
||||
}
|
||||
|
||||
if value != sharesAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.shares failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpuSetBandWidth(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
quotaBefore = 8000
|
||||
quotaAfter = 5000
|
||||
periodBefore = 10000
|
||||
periodAfter = 7000
|
||||
rtRuntimeBefore = 8000
|
||||
rtRuntimeAfter = 5000
|
||||
rtPeriodBefore = 10000
|
||||
rtPeriodAfter = 7000
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.cfs_quota_us": strconv.Itoa(quotaBefore),
|
||||
"cpu.cfs_period_us": strconv.Itoa(periodBefore),
|
||||
"cpu.rt_runtime_us": strconv.Itoa(rtRuntimeBefore),
|
||||
"cpu.rt_period_us": strconv.Itoa(rtPeriodBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.CpuQuota = quotaAfter
|
||||
helper.CgroupData.c.CpuPeriod = periodAfter
|
||||
helper.CgroupData.c.CpuRtRuntime = rtRuntimeAfter
|
||||
helper.CgroupData.c.CpuRtPeriod = rtPeriodAfter
|
||||
cpu := &CpuGroup{}
|
||||
if err := cpu.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
quota, err := getCgroupParamUint(helper.CgroupPath, "cpu.cfs_quota_us")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.cfs_quota_us - %s", err)
|
||||
}
|
||||
if quota != quotaAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.cfs_quota_us failed.")
|
||||
}
|
||||
|
||||
period, err := getCgroupParamUint(helper.CgroupPath, "cpu.cfs_period_us")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.cfs_period_us - %s", err)
|
||||
}
|
||||
if period != periodAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.cfs_period_us failed.")
|
||||
}
|
||||
rtRuntime, err := getCgroupParamUint(helper.CgroupPath, "cpu.rt_runtime_us")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.rt_runtime_us - %s", err)
|
||||
}
|
||||
if rtRuntime != rtRuntimeAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.rt_runtime_us failed.")
|
||||
}
|
||||
rtPeriod, err := getCgroupParamUint(helper.CgroupPath, "cpu.rt_period_us")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpu.rt_period_us - %s", err)
|
||||
}
|
||||
if rtPeriod != rtPeriodAfter {
|
||||
t.Fatal("Got the wrong value, set cpu.rt_period_us failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpuStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
kNrPeriods = 2000
|
||||
kNrThrottled = 200
|
||||
kThrottledTime = uint64(18446744073709551615)
|
||||
)
|
||||
|
||||
cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n",
|
||||
kNrPeriods, kNrThrottled, kThrottledTime)
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.stat": cpuStatContent,
|
||||
})
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedStats := cgroups.ThrottlingData{
|
||||
Periods: kNrPeriods,
|
||||
ThrottledPeriods: kNrThrottled,
|
||||
ThrottledTime: kThrottledTime}
|
||||
|
||||
expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData)
|
||||
}
|
||||
|
||||
func TestNoCpuStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal("Expected not to fail, but did")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCpuStat(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpu", t)
|
||||
defer helper.cleanup()
|
||||
cpuStatContent := `nr_periods 2000
|
||||
nr_throttled 200
|
||||
throttled_time fortytwo`
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpu.stat": cpuStatContent,
|
||||
})
|
||||
|
||||
cpu := &CpuGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := cpu.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failed stat parsing.")
|
||||
}
|
||||
}
|
117
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
generated
vendored
Normal file
117
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
|
||||
const (
|
||||
cgroupCpuacctStat = "cpuacct.stat"
|
||||
nanosecondsInSecond = 1000000000
|
||||
)
|
||||
|
||||
var clockTicks = uint64(system.GetClockTicks())
|
||||
|
||||
type CpuacctGroup struct {
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Apply(d *data) error {
|
||||
// we just want to join this group even though we don't set anything
|
||||
if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) Remove(d *data) error {
|
||||
return removePath(d.path("cpuacct"))
|
||||
}
|
||||
|
||||
func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalUsage, err := getCgroupParamUint(path, "cpuacct.usage")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
percpuUsage, err := getPercpuUsage(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stats.CpuStats.CpuUsage.TotalUsage = totalUsage
|
||||
stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
|
||||
stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage
|
||||
stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns user and kernel usage breakdown in nanoseconds.
|
||||
func getCpuUsageBreakdown(path string) (uint64, uint64, error) {
|
||||
userModeUsage := uint64(0)
|
||||
kernelModeUsage := uint64(0)
|
||||
const (
|
||||
userField = "user"
|
||||
systemField = "system"
|
||||
)
|
||||
|
||||
// Expected format:
|
||||
// user <usage in ticks>
|
||||
// system <usage in ticks>
|
||||
data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat))
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
fields := strings.Fields(string(data))
|
||||
if len(fields) != 4 {
|
||||
return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat))
|
||||
}
|
||||
if fields[0] != userField {
|
||||
return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField)
|
||||
}
|
||||
if fields[2] != systemField {
|
||||
return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField)
|
||||
}
|
||||
if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil
|
||||
}
|
||||
|
||||
func getPercpuUsage(path string) ([]uint64, error) {
|
||||
percpuUsage := []uint64{}
|
||||
data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
|
||||
if err != nil {
|
||||
return percpuUsage, err
|
||||
}
|
||||
for _, value := range strings.Fields(string(data)) {
|
||||
value, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err)
|
||||
}
|
||||
percpuUsage = append(percpuUsage, value)
|
||||
}
|
||||
return percpuUsage, nil
|
||||
}
|
135
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
generated
vendored
Normal file
135
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type CpusetGroup struct {
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Apply(d *data) error {
|
||||
dir, err := d.path("cpuset")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return s.ApplyDir(dir, d.c, d.pid)
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.CpusetCpus != "" {
|
||||
if err := writeFile(path, "cpuset.cpus", cgroup.CpusetCpus); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.CpusetMems != "" {
|
||||
if err := writeFile(path, "cpuset.mems", cgroup.CpusetMems); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) Remove(d *data) error {
|
||||
return removePath(d.path("cpuset"))
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
|
||||
// This might happen if we have no cpuset cgroup mounted.
|
||||
// Just do nothing and don't fail.
|
||||
if dir == "" {
|
||||
return nil
|
||||
}
|
||||
root, err := getCgroupRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.ensureParent(dir, root); err != nil {
|
||||
return err
|
||||
}
|
||||
// because we are not using d.join we need to place the pid into the procs file
|
||||
// unlike the other subsystems
|
||||
if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the default values inherit from parent cgroup are already set in
|
||||
// s.ensureParent, cover these if we have our own
|
||||
if err := s.Set(dir, cgroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
|
||||
if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
|
||||
return
|
||||
}
|
||||
if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil {
|
||||
return
|
||||
}
|
||||
return cpus, mems, nil
|
||||
}
|
||||
|
||||
// ensureParent makes sure that the parent directory of current is created
|
||||
// and populated with the proper cpus and mems files copied from
|
||||
// it's parent.
|
||||
func (s *CpusetGroup) ensureParent(current, root string) error {
|
||||
parent := filepath.Dir(current)
|
||||
if filepath.Clean(parent) == root {
|
||||
return nil
|
||||
}
|
||||
if err := s.ensureParent(parent, root); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(current, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.copyIfNeeded(current, parent)
|
||||
}
|
||||
|
||||
// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
|
||||
// directory to the current directory if the file's contents are 0
|
||||
func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
|
||||
var (
|
||||
err error
|
||||
currentCpus, currentMems []byte
|
||||
parentCpus, parentMems []byte
|
||||
)
|
||||
|
||||
if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil {
|
||||
return err
|
||||
}
|
||||
if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.isEmpty(currentCpus) {
|
||||
if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if s.isEmpty(currentMems) {
|
||||
if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CpusetGroup) isEmpty(b []byte) bool {
|
||||
return len(bytes.Trim(b, "\n")) == 0
|
||||
}
|
65
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_test.go
generated
vendored
Normal file
65
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset_test.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCpusetSetCpus(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpuset", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
cpusBefore = "0"
|
||||
cpusAfter = "1-3"
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpuset.cpus": cpusBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.c.CpusetCpus = cpusAfter
|
||||
cpuset := &CpusetGroup{}
|
||||
if err := cpuset.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "cpuset.cpus")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpuset.cpus - %s", err)
|
||||
}
|
||||
|
||||
if value != cpusAfter {
|
||||
t.Fatal("Got the wrong value, set cpuset.cpus failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpusetSetMems(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("cpuset", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
memsBefore = "0"
|
||||
memsAfter = "1"
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"cpuset.mems": memsBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.c.CpusetMems = memsAfter
|
||||
cpuset := &CpusetGroup{}
|
||||
if err := cpuset.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "cpuset.mems")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse cpuset.mems - %s", err)
|
||||
}
|
||||
|
||||
if value != memsAfter {
|
||||
t.Fatal("Got the wrong value, set cpuset.mems failed.")
|
||||
}
|
||||
}
|
61
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
generated
vendored
Normal file
61
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type DevicesGroup struct {
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) Apply(d *data) error {
|
||||
dir, err := d.join("devices")
|
||||
if err != nil {
|
||||
// We will return error even it's `not found` error, devices
|
||||
// cgroup is hard requirement for container's security.
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Set(dir, d.c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if !cgroup.AllowAllDevices {
|
||||
if err := writeFile(path, "devices.deny", "a"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dev := range cgroup.AllowedDevices {
|
||||
if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := writeFile(path, "devices.allow", "a"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dev := range cgroup.DeniedDevices {
|
||||
if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) Remove(d *data) error {
|
||||
return removePath(d.path("devices"))
|
||||
}
|
||||
|
||||
func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
84
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices_test.go
generated
vendored
Normal file
84
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices_test.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
var (
|
||||
allowedDevices = []*configs.Device{
|
||||
{
|
||||
Path: "/dev/zero",
|
||||
Type: 'c',
|
||||
Major: 1,
|
||||
Minor: 5,
|
||||
Permissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
}
|
||||
allowedList = "c 1:5 rwm"
|
||||
deniedDevices = []*configs.Device{
|
||||
{
|
||||
Path: "/dev/null",
|
||||
Type: 'c',
|
||||
Major: 1,
|
||||
Minor: 3,
|
||||
Permissions: "rwm",
|
||||
FileMode: 0666,
|
||||
},
|
||||
}
|
||||
deniedList = "c 1:3 rwm"
|
||||
)
|
||||
|
||||
func TestDevicesSetAllow(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("devices", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"devices.deny": "a",
|
||||
})
|
||||
|
||||
helper.CgroupData.c.AllowAllDevices = false
|
||||
helper.CgroupData.c.AllowedDevices = allowedDevices
|
||||
devices := &DevicesGroup{}
|
||||
if err := devices.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "devices.allow")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse devices.allow - %s", err)
|
||||
}
|
||||
|
||||
if value != allowedList {
|
||||
t.Fatal("Got the wrong value, set devices.allow failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDevicesSetDeny(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("devices", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"devices.allow": "a",
|
||||
})
|
||||
|
||||
helper.CgroupData.c.AllowAllDevices = true
|
||||
helper.CgroupData.c.DeniedDevices = deniedDevices
|
||||
devices := &DevicesGroup{}
|
||||
if err := devices.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "devices.deny")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse devices.deny - %s", err)
|
||||
}
|
||||
|
||||
if value != deniedList {
|
||||
t.Fatal("Got the wrong value, set devices.deny failed.")
|
||||
}
|
||||
}
|
62
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
generated
vendored
Normal file
62
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type FreezerGroup struct {
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Apply(d *data) error {
|
||||
dir, err := d.join("freezer")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Set(dir, d.c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
switch cgroup.Freezer {
|
||||
case configs.Frozen, configs.Thawed:
|
||||
if err := writeFile(path, "freezer.state", string(cgroup.Freezer)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
state, err := readFile(path, "freezer.state")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(state) == string(cgroup.Freezer) {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
case configs.Undefined:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Freezer))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) Remove(d *data) error {
|
||||
return removePath(d.path("freezer"))
|
||||
}
|
||||
|
||||
func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
45
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_test.go
generated
vendored
Normal file
45
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
func TestFreezerSetState(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("freezer", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"freezer.state": string(configs.Frozen),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.Freezer = configs.Thawed
|
||||
freezer := &FreezerGroup{}
|
||||
if err := freezer.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "freezer.state")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse freezer.state - %s", err)
|
||||
}
|
||||
if value != string(configs.Thawed) {
|
||||
t.Fatal("Got the wrong value, set freezer.state failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreezerSetInvalidState(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("freezer", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
invalidArg configs.FreezerState = "Invalid"
|
||||
)
|
||||
|
||||
helper.CgroupData.c.Freezer = invalidArg
|
||||
freezer := &FreezerGroup{}
|
||||
if err := freezer.Set(helper.CgroupPath, helper.CgroupData.c); err == nil {
|
||||
t.Fatal("Failed to return invalid argument error")
|
||||
}
|
||||
}
|
3
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
generated
vendored
Normal file
3
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
// +build !linux
|
||||
|
||||
package fs
|
72
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
generated
vendored
Normal file
72
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type HugetlbGroup struct {
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) Apply(d *data) error {
|
||||
dir, err := d.join("hugetlb")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Set(dir, d.c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
for _, hugetlb := range cgroup.HugetlbLimit {
|
||||
if err := writeFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.Itoa(hugetlb.Limit)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) Remove(d *data) error {
|
||||
return removePath(d.path("hugetlb"))
|
||||
}
|
||||
|
||||
func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
hugetlbStats := cgroups.HugetlbStats{}
|
||||
for _, pageSize := range HugePageSizes {
|
||||
usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".")
|
||||
value, err := getCgroupParamUint(path, usage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s - %v", usage, err)
|
||||
}
|
||||
hugetlbStats.Usage = value
|
||||
|
||||
maxUsage := strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, ".")
|
||||
value, err = getCgroupParamUint(path, maxUsage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s - %v", maxUsage, err)
|
||||
}
|
||||
hugetlbStats.MaxUsage = value
|
||||
|
||||
failcnt := strings.Join([]string{"hugetlb", pageSize, "failcnt"}, ".")
|
||||
value, err = getCgroupParamUint(path, failcnt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s - %v", failcnt, err)
|
||||
}
|
||||
hugetlbStats.Failcnt = value
|
||||
|
||||
stats.HugetlbStats[pageSize] = hugetlbStats
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
138
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb_test.go
generated
vendored
Normal file
138
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb_test.go
generated
vendored
Normal file
|
@ -0,0 +1,138 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
const (
|
||||
hugetlbUsageContents = "128\n"
|
||||
hugetlbMaxUsageContents = "256\n"
|
||||
hugetlbFailcnt = "100\n"
|
||||
)
|
||||
|
||||
var (
|
||||
hugePageSize, _ = cgroups.GetHugePageSize()
|
||||
usage = strings.Join([]string{"hugetlb", hugePageSize[0], "usage_in_bytes"}, ".")
|
||||
limit = strings.Join([]string{"hugetlb", hugePageSize[0], "limit_in_bytes"}, ".")
|
||||
maxUsage = strings.Join([]string{"hugetlb", hugePageSize[0], "max_usage_in_bytes"}, ".")
|
||||
failcnt = strings.Join([]string{"hugetlb", hugePageSize[0], "failcnt"}, ".")
|
||||
)
|
||||
|
||||
func TestHugetlbSetHugetlb(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
hugetlbBefore = 256
|
||||
hugetlbAfter = 512
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
limit: strconv.Itoa(hugetlbBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.HugetlbLimit = []*configs.HugepageLimit{
|
||||
{
|
||||
Pagesize: hugePageSize[0],
|
||||
Limit: hugetlbAfter,
|
||||
},
|
||||
}
|
||||
hugetlb := &HugetlbGroup{}
|
||||
if err := hugetlb.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, limit)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse %s - %s", limit, err)
|
||||
}
|
||||
if value != hugetlbAfter {
|
||||
t.Fatalf("Set hugetlb.limit_in_bytes failed. Expected: %v, Got: %v", hugetlbAfter, value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
usage: hugetlbUsageContents,
|
||||
maxUsage: hugetlbMaxUsageContents,
|
||||
failcnt: hugetlbFailcnt,
|
||||
})
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedStats := cgroups.HugetlbStats{Usage: 128, MaxUsage: 256, Failcnt: 100}
|
||||
expectHugetlbStatEquals(t, expectedStats, actualStats.HugetlbStats[hugePageSize[0]])
|
||||
}
|
||||
|
||||
func TestHugetlbStatsNoUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
maxUsage: hugetlbMaxUsageContents,
|
||||
})
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStatsNoMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
usage: hugetlbUsageContents,
|
||||
})
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStatsBadUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
usage: "bad",
|
||||
maxUsage: hugetlbMaxUsageContents,
|
||||
})
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHugetlbStatsBadMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("hugetlb", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
usage: hugetlbUsageContents,
|
||||
maxUsage: "bad",
|
||||
})
|
||||
|
||||
hugetlb := &HugetlbGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
171
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
generated
vendored
Normal file
171
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
generated
vendored
Normal file
|
@ -0,0 +1,171 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type MemoryGroup struct {
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) Apply(d *data) error {
|
||||
path, err := d.path("memory")
|
||||
if err != nil {
|
||||
if cgroups.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.Set(path, d.c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We need to join memory cgroup after set memory limits, because
|
||||
// kmem.limit_in_bytes can only be set when the cgroup is empty.
|
||||
_, err = d.join("memory")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.Memory != 0 {
|
||||
if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Memory, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.MemoryReservation != 0 {
|
||||
if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.MemoryReservation, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.MemorySwap > 0 {
|
||||
if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.MemorySwap, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.KernelMemory > 0 {
|
||||
if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.KernelMemory, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cgroup.OomKillDisable {
|
||||
if err := writeFile(path, "memory.oom_control", "1"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cgroup.MemorySwappiness >= 0 && cgroup.MemorySwappiness <= 100 {
|
||||
if err := writeFile(path, "memory.swappiness", strconv.FormatInt(cgroup.MemorySwappiness, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if cgroup.MemorySwappiness == -1 {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", cgroup.MemorySwappiness)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) Remove(d *data) error {
|
||||
return removePath(d.path("memory"))
|
||||
}
|
||||
|
||||
func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
// Set stats from memory.stat.
|
||||
statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer statsFile.Close()
|
||||
|
||||
sc := bufio.NewScanner(statsFile)
|
||||
for sc.Scan() {
|
||||
t, v, err := getCgroupParamKeyValue(sc.Text())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err)
|
||||
}
|
||||
stats.MemoryStats.Stats[t] = v
|
||||
}
|
||||
stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
|
||||
|
||||
memoryUsage, err := getMemoryData(path, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stats.MemoryStats.Usage = memoryUsage
|
||||
swapUsage, err := getMemoryData(path, "memsw")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stats.MemoryStats.SwapUsage = swapUsage
|
||||
kernelUsage, err := getMemoryData(path, "kmem")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stats.MemoryStats.KernelUsage = kernelUsage
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMemoryData(path, name string) (cgroups.MemoryData, error) {
|
||||
memoryData := cgroups.MemoryData{}
|
||||
|
||||
moduleName := "memory"
|
||||
if name != "" {
|
||||
moduleName = strings.Join([]string{"memory", name}, ".")
|
||||
}
|
||||
usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
|
||||
maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
|
||||
failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
|
||||
|
||||
value, err := getCgroupParamUint(path, usage)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err)
|
||||
}
|
||||
memoryData.Usage = value
|
||||
value, err = getCgroupParamUint(path, maxUsage)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err)
|
||||
}
|
||||
memoryData.MaxUsage = value
|
||||
value, err = getCgroupParamUint(path, failcnt)
|
||||
if err != nil {
|
||||
if moduleName != "memory" && os.IsNotExist(err) {
|
||||
return cgroups.MemoryData{}, nil
|
||||
}
|
||||
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
|
||||
}
|
||||
memoryData.Failcnt = value
|
||||
|
||||
return memoryData, nil
|
||||
}
|
294
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_test.go
generated
vendored
Normal file
294
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory_test.go
generated
vendored
Normal file
|
@ -0,0 +1,294 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
const (
|
||||
memoryStatContents = `cache 512
|
||||
rss 1024`
|
||||
memoryUsageContents = "2048\n"
|
||||
memoryMaxUsageContents = "4096\n"
|
||||
memoryFailcnt = "100\n"
|
||||
)
|
||||
|
||||
func TestMemorySetMemory(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
memoryBefore = 314572800 // 300M
|
||||
memoryAfter = 524288000 // 500M
|
||||
reservationBefore = 209715200 // 200M
|
||||
reservationAfter = 314572800 // 300M
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.limit_in_bytes": strconv.Itoa(memoryBefore),
|
||||
"memory.soft_limit_in_bytes": strconv.Itoa(reservationBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.Memory = memoryAfter
|
||||
helper.CgroupData.c.MemoryReservation = reservationAfter
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.limit_in_bytes")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.limit_in_bytes - %s", err)
|
||||
}
|
||||
if value != memoryAfter {
|
||||
t.Fatal("Got the wrong value, set memory.limit_in_bytes failed.")
|
||||
}
|
||||
|
||||
value, err = getCgroupParamUint(helper.CgroupPath, "memory.soft_limit_in_bytes")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.soft_limit_in_bytes - %s", err)
|
||||
}
|
||||
if value != reservationAfter {
|
||||
t.Fatal("Got the wrong value, set memory.soft_limit_in_bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemorySetMemoryswap(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
memoryswapBefore = 314572800 // 300M
|
||||
memoryswapAfter = 524288000 // 500M
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.memsw.limit_in_bytes": strconv.Itoa(memoryswapBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.MemorySwap = memoryswapAfter
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.memsw.limit_in_bytes")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.memsw.limit_in_bytes - %s", err)
|
||||
}
|
||||
if value != memoryswapAfter {
|
||||
t.Fatal("Got the wrong value, set memory.memsw.limit_in_bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemorySetKernelMemory(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
kernelMemoryBefore = 314572800 // 300M
|
||||
kernelMemoryAfter = 524288000 // 500M
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.kmem.limit_in_bytes": strconv.Itoa(kernelMemoryBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.KernelMemory = kernelMemoryAfter
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.kmem.limit_in_bytes")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.kmem.limit_in_bytes - %s", err)
|
||||
}
|
||||
if value != kernelMemoryAfter {
|
||||
t.Fatal("Got the wrong value, set memory.kmem.limit_in_bytes failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemorySetMemorySwappinessDefault(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
swappinessBefore = 60 //deafult is 60
|
||||
swappinessAfter = 0
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.swappiness": strconv.Itoa(swappinessBefore),
|
||||
})
|
||||
|
||||
helper.CgroupData.c.Memory = swappinessAfter
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.swappiness")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.swappiness - %s", err)
|
||||
}
|
||||
if value != swappinessAfter {
|
||||
t.Fatal("Got the wrong value, set memory.swappiness failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStats(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.failcnt": memoryFailcnt,
|
||||
"memory.memsw.usage_in_bytes": memoryUsageContents,
|
||||
"memory.memsw.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.memsw.failcnt": memoryFailcnt,
|
||||
"memory.kmem.usage_in_bytes": memoryUsageContents,
|
||||
"memory.kmem.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
"memory.kmem.failcnt": memoryFailcnt,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedStats := cgroups.MemoryStats{Cache: 512, Usage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100}, SwapUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100}, KernelUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100}, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
|
||||
expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsNoMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadStatFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": "rss rss",
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": "bad",
|
||||
"memory.max_usage_in_bytes": memoryMaxUsageContents,
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStatsBadMaxUsageFile(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.stat": memoryStatContents,
|
||||
"memory.usage_in_bytes": memoryUsageContents,
|
||||
"memory.max_usage_in_bytes": "bad",
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
actualStats := *cgroups.NewStats()
|
||||
err := memory.GetStats(helper.CgroupPath, &actualStats)
|
||||
if err == nil {
|
||||
t.Fatal("Expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemorySetOomControl(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("memory", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
const (
|
||||
oom_kill_disable = 1 // disable oom killer, default is 0
|
||||
)
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"memory.oom_control": strconv.Itoa(oom_kill_disable),
|
||||
})
|
||||
|
||||
memory := &MemoryGroup{}
|
||||
if err := memory.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamUint(helper.CgroupPath, "memory.oom_control")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse memory.oom_control - %s", err)
|
||||
}
|
||||
|
||||
if value != oom_kill_disable {
|
||||
t.Fatalf("Got the wrong value, set memory.oom_control failed.")
|
||||
}
|
||||
}
|
40
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
generated
vendored
Normal file
40
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type NetClsGroup struct {
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) Apply(d *data) error {
|
||||
dir, err := d.join("net_cls")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Set(dir, d.c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
if cgroup.NetClsClassid != "" {
|
||||
if err := writeFile(path, "net_cls.classid", cgroup.NetClsClassid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) Remove(d *data) error {
|
||||
return removePath(d.path("net_cls"))
|
||||
}
|
||||
|
||||
func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
36
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls_test.go
generated
vendored
Normal file
36
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls_test.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
classidBefore = "0x100002"
|
||||
classidAfter = "0x100001"
|
||||
)
|
||||
|
||||
func TestNetClsSetClassid(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("net_cls", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.writeFileContents(map[string]string{
|
||||
"net_cls.classid": classidBefore,
|
||||
})
|
||||
|
||||
helper.CgroupData.c.NetClsClassid = classidAfter
|
||||
netcls := &NetClsGroup{}
|
||||
if err := netcls.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// As we are in mock environment, we can't get correct value of classid from
|
||||
// net_cls.classid.
|
||||
// So. we just judge if we successfully write classid into file
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "net_cls.classid")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse net_cls.classid - %s", err)
|
||||
}
|
||||
if value != classidAfter {
|
||||
t.Fatal("Got the wrong value, set net_cls.classid failed.")
|
||||
}
|
||||
}
|
40
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
generated
vendored
Normal file
40
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type NetPrioGroup struct {
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) Apply(d *data) error {
|
||||
dir, err := d.join("net_prio")
|
||||
if err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Set(dir, d.c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
for _, prioMap := range cgroup.NetPrioIfpriomap {
|
||||
if err := writeFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) Remove(d *data) error {
|
||||
return removePath(d.path("net_prio"))
|
||||
}
|
||||
|
||||
func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
36
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio_test.go
generated
vendored
Normal file
36
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio_test.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
var (
|
||||
prioMap = []*configs.IfPrioMap{
|
||||
{
|
||||
Interface: "test",
|
||||
Priority: 5,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestNetPrioSetIfPrio(t *testing.T) {
|
||||
helper := NewCgroupTestUtil("net_prio", t)
|
||||
defer helper.cleanup()
|
||||
|
||||
helper.CgroupData.c.NetPrioIfpriomap = prioMap
|
||||
netPrio := &NetPrioGroup{}
|
||||
if err := netPrio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
value, err := getCgroupParamString(helper.CgroupPath, "net_prio.ifpriomap")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse net_prio.ifpriomap - %s", err)
|
||||
}
|
||||
if !strings.Contains(value, "test 5") {
|
||||
t.Fatal("Got the wrong value, set net_prio.ifpriomap failed.")
|
||||
}
|
||||
}
|
31
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
generated
vendored
Normal file
31
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type PerfEventGroup struct {
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Apply(d *data) error {
|
||||
// we just want to join this group even though we don't set anything
|
||||
if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) Remove(d *data) error {
|
||||
return removePath(d.path("perf_event"))
|
||||
}
|
||||
|
||||
func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
|
||||
return nil
|
||||
}
|
113
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/stats_util_test.go
generated
vendored
Normal file
113
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/stats_util_test.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
)
|
||||
|
||||
func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error {
|
||||
if len(expected) != len(actual) {
|
||||
return fmt.Errorf("blkioStatEntries length do not match")
|
||||
}
|
||||
for i, expValue := range expected {
|
||||
actValue := actual[i]
|
||||
if expValue != actValue {
|
||||
return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
|
||||
if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
|
||||
logrus.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
|
||||
logrus.Printf("blkio IoServicedRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
|
||||
logrus.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
|
||||
logrus.Printf("blkio SectorsRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil {
|
||||
logrus.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil {
|
||||
logrus.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil {
|
||||
logrus.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil {
|
||||
logrus.Printf("blkio IoTimeRecursive do not match - %s\n", err)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
|
||||
if expected != actual {
|
||||
logrus.Printf("Expected throttling data %v but found %v\n", expected, actual)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func expectHugetlbStatEquals(t *testing.T, expected, actual cgroups.HugetlbStats) {
|
||||
if expected != actual {
|
||||
logrus.Printf("Expected hugetlb stats %v but found %v\n", expected, actual)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
|
||||
expectMemoryDataEquals(t, expected.Usage, actual.Usage)
|
||||
expectMemoryDataEquals(t, expected.SwapUsage, actual.SwapUsage)
|
||||
expectMemoryDataEquals(t, expected.KernelUsage, actual.KernelUsage)
|
||||
|
||||
for key, expValue := range expected.Stats {
|
||||
actValue, ok := actual.Stats[key]
|
||||
if !ok {
|
||||
logrus.Printf("Expected memory stat key %s not found\n", key)
|
||||
t.Fail()
|
||||
}
|
||||
if expValue != actValue {
|
||||
logrus.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expectMemoryDataEquals(t *testing.T, expected, actual cgroups.MemoryData) {
|
||||
if expected.Usage != actual.Usage {
|
||||
logrus.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
|
||||
t.Fail()
|
||||
}
|
||||
if expected.MaxUsage != actual.MaxUsage {
|
||||
logrus.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
|
||||
t.Fail()
|
||||
}
|
||||
if expected.Failcnt != actual.Failcnt {
|
||||
logrus.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
66
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/util_test.go
generated
vendored
Normal file
66
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/util_test.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Utility for testing cgroup operations.
|
||||
|
||||
Creates a mock of the cgroup filesystem for the duration of the test.
|
||||
*/
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type cgroupTestUtil struct {
|
||||
// data to use in tests.
|
||||
CgroupData *data
|
||||
|
||||
// Path to the mock cgroup directory.
|
||||
CgroupPath string
|
||||
|
||||
// Temporary directory to store mock cgroup filesystem.
|
||||
tempDir string
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// Creates a new test util for the specified subsystem
|
||||
func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil {
|
||||
d := &data{
|
||||
c: &configs.Cgroup{},
|
||||
}
|
||||
tempDir, err := ioutil.TempDir("", "cgroup_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d.root = tempDir
|
||||
testCgroupPath := filepath.Join(d.root, subsystem)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure the full mock cgroup path exists.
|
||||
err = os.MkdirAll(testCgroupPath, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t}
|
||||
}
|
||||
|
||||
func (c *cgroupTestUtil) cleanup() {
|
||||
os.RemoveAll(c.tempDir)
|
||||
}
|
||||
|
||||
// Write the specified contents on the mock of the specified cgroup files.
|
||||
func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) {
|
||||
for file, contents := range fileContents {
|
||||
err := writeFile(c.CgroupPath, file, contents)
|
||||
if err != nil {
|
||||
c.t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
74
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
generated
vendored
Normal file
74
libnetwork/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotSupportStat = errors.New("stats are not supported for subsystem")
|
||||
ErrNotValidFormat = errors.New("line is not a valid key value format")
|
||||
)
|
||||
|
||||
// Saturates negative values at zero and returns a uint64.
|
||||
// Due to kernel bugs, some of the memory cgroup stats can be negative.
|
||||
func parseUint(s string, base, bitSize int) (uint64, error) {
|
||||
value, err := strconv.ParseUint(s, base, bitSize)
|
||||
if err != nil {
|
||||
intValue, intErr := strconv.ParseInt(s, base, bitSize)
|
||||
// 1. Handle negative values greater than MinInt64 (and)
|
||||
// 2. Handle negative values lesser than MinInt64
|
||||
if intErr == nil && intValue < 0 {
|
||||
return 0, nil
|
||||
} else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return value, err
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Parses a cgroup param and returns as name, value
|
||||
// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234
|
||||
func getCgroupParamKeyValue(t string) (string, uint64, error) {
|
||||
parts := strings.Fields(t)
|
||||
switch len(parts) {
|
||||
case 2:
|
||||
value, err := parseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("Unable to convert param value (%q) to uint64: %v", parts[1], err)
|
||||
}
|
||||
|
||||
return parts[0], value, nil
|
||||
default:
|
||||
return "", 0, ErrNotValidFormat
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a single uint64 value from the specified cgroup file.
|
||||
func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) {
|
||||
contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return parseUint(strings.TrimSpace(string(contents)), 10, 64)
|
||||
}
|
||||
|
||||
// Gets a string value from the specified cgroup file
|
||||
func getCgroupParamString(cgroupPath, cgroupFile string) (string, error) {
|
||||
contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(contents)), nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue