Vendor dependency cycle-free swarmkit
Moby imports Swarmkit; Swarmkit no longer imports Moby. In order to accomplish this feat, Swarmkit has introduced a new plugin.Getter interface so it could stop importing our pkg/plugingetter package. This new interface is not entirely compatible with our plugingetter.PluginGetter interface, necessitating a thin adapter. Swarmkit had to jettison the CNM network allocator to stop having to import libnetwork as the cnmallocator package is deeply tied to libnetwork. Move the CNM network allocator into libnetwork, where it belongs. The package had a short an uninteresting Git history in the Swarmkit repository so no effort was made to retain history. Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
parent
3ca1d751e5
commit
7b0ab1011c
113 changed files with 29815 additions and 298 deletions
42
daemon/cluster/convert/pluginadapter.go
Normal file
42
daemon/cluster/convert/pluginadapter.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
// SwarmPluginGetter adapts a plugingetter.PluginGetter to a Swarmkit plugin.Getter.
|
||||
func SwarmPluginGetter(pg plugingetter.PluginGetter) plugin.Getter {
|
||||
return pluginGetter{pg}
|
||||
}
|
||||
|
||||
type pluginGetter struct {
|
||||
pg plugingetter.PluginGetter
|
||||
}
|
||||
|
||||
var _ plugin.Getter = (*pluginGetter)(nil)
|
||||
|
||||
type swarmPlugin struct {
|
||||
plugingetter.CompatPlugin
|
||||
}
|
||||
|
||||
func (p swarmPlugin) Client() plugin.Client {
|
||||
return p.CompatPlugin.Client()
|
||||
}
|
||||
|
||||
func (g pluginGetter) Get(name string, capability string) (plugin.Plugin, error) {
|
||||
p, err := g.pg.Get(name, capability, plugingetter.Lookup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return swarmPlugin{p}, nil
|
||||
}
|
||||
|
||||
func (g pluginGetter) GetAllManagedPluginsByCap(capability string) []plugin.Plugin {
|
||||
pp := g.pg.GetAllManagedPluginsByCap(capability)
|
||||
ret := make([]plugin.Plugin, len(pp))
|
||||
for i, p := range pp {
|
||||
ret[i] = swarmPlugin{p}
|
||||
}
|
||||
return ret
|
||||
}
|
|
@ -52,7 +52,7 @@ func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBac
|
|||
pluginBackend: p,
|
||||
imageBackend: i,
|
||||
volumeBackend: v,
|
||||
dependencies: agent.NewDependencyManager(b.PluginGetter()),
|
||||
dependencies: agent.NewDependencyManager(convert.SwarmPluginGetter(b.PluginGetter())),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,10 +10,12 @@ import (
|
|||
|
||||
"github.com/containerd/log"
|
||||
types "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/daemon/cluster/convert"
|
||||
"github.com/docker/docker/daemon/cluster/executor/container"
|
||||
lncluster "github.com/docker/docker/libnetwork/cluster"
|
||||
"github.com/docker/docker/libnetwork/cnmallocator"
|
||||
swarmapi "github.com/moby/swarmkit/v2/api"
|
||||
swarmallocator "github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
swarmnode "github.com/moby/swarmkit/v2/node"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -123,7 +125,7 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
|||
ListenControlAPI: control,
|
||||
ListenRemoteAPI: conf.ListenAddr,
|
||||
AdvertiseRemoteAPI: conf.AdvertiseAddr,
|
||||
NetworkConfig: &swarmallocator.NetworkConfig{
|
||||
NetworkConfig: &networkallocator.Config{
|
||||
DefaultAddrPool: conf.DefaultAddressPool,
|
||||
SubnetSize: conf.SubnetSize,
|
||||
VXLANUDPPort: conf.DataPathPort,
|
||||
|
@ -144,7 +146,8 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
|||
ElectionTick: n.cluster.config.RaftElectionTick,
|
||||
UnlockKey: conf.lockKey,
|
||||
AutoLockManagers: conf.autolock,
|
||||
PluginGetter: n.cluster.config.Backend.PluginGetter(),
|
||||
PluginGetter: convert.SwarmPluginGetter(n.cluster.config.Backend.PluginGetter()),
|
||||
NetworkProvider: cnmallocator.NewProvider(n.cluster.config.Backend.PluginGetter()),
|
||||
}
|
||||
if conf.availability != "" {
|
||||
avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
|
||||
|
|
14
libnetwork/cnmallocator/allocator_test.go
Normal file
14
libnetwork/cnmallocator/allocator_test.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
package cnmallocator
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/moby/swarmkit/v2/manager/allocator"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func TestAllocator(t *testing.T) {
|
||||
skip.If(t, runtime.GOOS == "windows", "Allocator tests are hardcoded to use Linux network driver names")
|
||||
allocator.RunAllocatorTests(t, NewProvider(nil))
|
||||
}
|
|
@ -11,6 +11,6 @@ var initializers = map[string]func(driverapi.Registerer) error{
|
|||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
func (*Provider) PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
|
@ -5,14 +5,15 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
builtinIpam "github.com/docker/docker/libnetwork/ipams/builtin"
|
||||
nullIpam "github.com/docker/docker/libnetwork/ipams/null"
|
||||
"github.com/docker/docker/libnetwork/ipamutils"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
)
|
||||
|
||||
func initIPAMDrivers(r ipamapi.Registerer, netConfig *NetworkConfig) error {
|
||||
func initIPAMDrivers(r ipamapi.Registerer, netConfig *networkallocator.Config) error {
|
||||
var addressPool []*ipamutils.NetworkToSplit
|
||||
var str strings.Builder
|
||||
str.WriteString("Subnetlist - ")
|
|
@ -19,7 +19,7 @@ var initializers = map[string]func(driverapi.Registerer) error{
|
|||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
func (*Provider) PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return []networkallocator.PredefinedNetworkData{
|
||||
{Name: "bridge", Driver: "bridge"},
|
||||
{Name: "host", Driver: "host"},
|
|
@ -14,7 +14,7 @@ var initializers = map[string]func(driverapi.Registerer) error{
|
|||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
func (*Provider) PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return []networkallocator.PredefinedNetworkData{
|
||||
{Name: "nat", Driver: "nat"},
|
||||
}
|
|
@ -10,6 +10,6 @@ import (
|
|||
const initializers = nil
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
func (*Provider) PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/drivers/remote"
|
||||
"github.com/docker/docker/libnetwork/drvregistry"
|
||||
|
@ -15,7 +16,6 @@ import (
|
|||
"github.com/docker/docker/libnetwork/scope"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -40,9 +40,6 @@ type cnmNetworkAllocator struct {
|
|||
// The driver registry for all internal and external network drivers.
|
||||
networkRegistry drvregistry.Networks
|
||||
|
||||
// The port allocator instance for allocating node ports
|
||||
portAllocator *portAllocator
|
||||
|
||||
// Local network state used by cnmNetworkAllocator to do network management.
|
||||
networks map[string]*network
|
||||
|
||||
|
@ -87,27 +84,14 @@ type networkDriver struct {
|
|||
capability *driverapi.Capability
|
||||
}
|
||||
|
||||
// NetworkConfig is used to store network related cluster config in the Manager.
|
||||
type NetworkConfig struct {
|
||||
// DefaultAddrPool specifies default subnet pool for global scope networks
|
||||
DefaultAddrPool []string
|
||||
|
||||
// SubnetSize specifies the subnet size of the networks created from
|
||||
// the default subnet pool
|
||||
SubnetSize uint32
|
||||
|
||||
// VXLANUDPPort specifies the UDP port number for VXLAN traffic
|
||||
VXLANUDPPort uint32
|
||||
}
|
||||
|
||||
// New returns a new NetworkAllocator handle
|
||||
func New(pg plugingetter.PluginGetter, netConfig *NetworkConfig) (networkallocator.NetworkAllocator, error) {
|
||||
// NewAllocator returns a new NetworkAllocator handle
|
||||
func (p *Provider) NewAllocator(netConfig *networkallocator.Config) (networkallocator.NetworkAllocator, error) {
|
||||
na := &cnmNetworkAllocator{
|
||||
networks: make(map[string]*network),
|
||||
services: make(map[string]struct{}),
|
||||
tasks: make(map[string]struct{}),
|
||||
nodes: make(map[string]map[string]struct{}),
|
||||
pg: pg,
|
||||
pg: p.pg,
|
||||
}
|
||||
|
||||
for ntype, i := range initializers {
|
||||
|
@ -115,23 +99,17 @@ func New(pg plugingetter.PluginGetter, netConfig *NetworkConfig) (networkallocat
|
|||
return nil, fmt.Errorf("failed to register %q network driver: %w", ntype, err)
|
||||
}
|
||||
}
|
||||
if err := remote.Register(&na.networkRegistry, pg); err != nil {
|
||||
if err := remote.Register(&na.networkRegistry, p.pg); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize network driver plugins: %w", err)
|
||||
}
|
||||
|
||||
if err := initIPAMDrivers(&na.ipamRegistry, netConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := remoteipam.Register(&na.ipamRegistry, pg); err != nil {
|
||||
if err := remoteipam.Register(&na.ipamRegistry, p.pg); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize IPAM driver plugins: %w", err)
|
||||
}
|
||||
|
||||
pa, err := newPortAllocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
na.portAllocator = pa
|
||||
return na, nil
|
||||
}
|
||||
|
||||
|
@ -209,11 +187,8 @@ func (na *cnmNetworkAllocator) Deallocate(n *api.Network) error {
|
|||
}
|
||||
|
||||
// AllocateService allocates all the network resources such as virtual
|
||||
// IP and ports needed by the service.
|
||||
// IP needed by the service.
|
||||
func (na *cnmNetworkAllocator) AllocateService(s *api.Service) (err error) {
|
||||
if err = na.portAllocator.serviceAllocatePorts(s); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
na.DeallocateService(s)
|
||||
|
@ -300,7 +275,7 @@ networkLoop:
|
|||
}
|
||||
|
||||
// DeallocateService de-allocates all the network resources such as
|
||||
// virtual IP and ports associated with the service.
|
||||
// virtual IP associated with the service.
|
||||
func (na *cnmNetworkAllocator) DeallocateService(s *api.Service) error {
|
||||
if s.Endpoint == nil {
|
||||
return nil
|
||||
|
@ -316,7 +291,6 @@ func (na *cnmNetworkAllocator) DeallocateService(s *api.Service) error {
|
|||
}
|
||||
s.Endpoint.VirtualIPs = nil
|
||||
|
||||
na.portAllocator.serviceDeallocatePorts(s)
|
||||
delete(na.services, s.ID)
|
||||
|
||||
return nil
|
||||
|
@ -373,19 +347,8 @@ func (na *cnmNetworkAllocator) IsTaskAllocated(t *api.Task) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// HostPublishPortsNeedUpdate returns true if the passed service needs
|
||||
// allocations for its published ports in host (non ingress) mode
|
||||
func (na *cnmNetworkAllocator) HostPublishPortsNeedUpdate(s *api.Service) bool {
|
||||
return na.portAllocator.hostPublishPortsNeedUpdate(s)
|
||||
}
|
||||
|
||||
// IsServiceAllocated returns false if the passed service needs to have network resources allocated/updated.
|
||||
func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(*networkallocator.ServiceAllocationOpts)) bool {
|
||||
var options networkallocator.ServiceAllocationOpts
|
||||
for _, flag := range flags {
|
||||
flag(&options)
|
||||
}
|
||||
|
||||
specNetworks := serviceNetworks(s)
|
||||
|
||||
// If endpoint mode is VIP and allocator does not have the
|
||||
|
@ -447,10 +410,6 @@ func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(
|
|||
}
|
||||
}
|
||||
|
||||
if (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||
|
||||
(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {
|
||||
return na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
788
libnetwork/cnmallocator/networkallocator_test.go
Normal file
788
libnetwork/cnmallocator/networkallocator_test.go
Normal file
|
@ -0,0 +1,788 @@
|
|||
package cnmallocator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newNetworkAllocator(t *testing.T) networkallocator.NetworkAllocator {
|
||||
na, err := (&Provider{}).NewAllocator(nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, na)
|
||||
return na
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
newNetworkAllocator(t)
|
||||
}
|
||||
|
||||
func TestAllocateInvalidIPAM(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{
|
||||
Name: "invalidipam,",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := na.Allocate(n)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestAllocateInvalidDriver(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{
|
||||
Name: "invaliddriver",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNetworkDoubleAllocate(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = na.Allocate(n)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestAllocateEmptyConfig(t *testing.T) {
|
||||
na1 := newNetworkAllocator(t)
|
||||
na2 := newNetworkAllocator(t)
|
||||
n1 := &api.Network{
|
||||
ID: "testID1",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n2 := &api.Network{
|
||||
ID: "testID2",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na1.Allocate(n1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, n1.IPAM.Configs, nil)
|
||||
assert.Equal(t, len(n1.IPAM.Configs), 1)
|
||||
assert.Equal(t, n1.IPAM.Configs[0].Range, "")
|
||||
assert.Equal(t, len(n1.IPAM.Configs[0].Reserved), 0)
|
||||
|
||||
_, subnet11, err := net.ParseCIDR(n1.IPAM.Configs[0].Subnet)
|
||||
assert.NoError(t, err)
|
||||
|
||||
gwip11 := net.ParseIP(n1.IPAM.Configs[0].Gateway)
|
||||
assert.NotEqual(t, gwip11, nil)
|
||||
|
||||
err = na1.Allocate(n2)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, n2.IPAM.Configs, nil)
|
||||
assert.Equal(t, len(n2.IPAM.Configs), 1)
|
||||
assert.Equal(t, n2.IPAM.Configs[0].Range, "")
|
||||
assert.Equal(t, len(n2.IPAM.Configs[0].Reserved), 0)
|
||||
|
||||
_, subnet21, err := net.ParseCIDR(n2.IPAM.Configs[0].Subnet)
|
||||
assert.NoError(t, err)
|
||||
|
||||
gwip21 := net.ParseIP(n2.IPAM.Configs[0].Gateway)
|
||||
assert.NotEqual(t, gwip21, nil)
|
||||
|
||||
// Allocate n1 ans n2 with another allocator instance but in
|
||||
// intentionally reverse order.
|
||||
err = na2.Allocate(n2)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, n2.IPAM.Configs, nil)
|
||||
assert.Equal(t, len(n2.IPAM.Configs), 1)
|
||||
assert.Equal(t, n2.IPAM.Configs[0].Range, "")
|
||||
assert.Equal(t, len(n2.IPAM.Configs[0].Reserved), 0)
|
||||
|
||||
_, subnet22, err := net.ParseCIDR(n2.IPAM.Configs[0].Subnet)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, subnet21, subnet22)
|
||||
|
||||
gwip22 := net.ParseIP(n2.IPAM.Configs[0].Gateway)
|
||||
assert.Equal(t, gwip21, gwip22)
|
||||
|
||||
err = na2.Allocate(n1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, n1.IPAM.Configs, nil)
|
||||
assert.Equal(t, len(n1.IPAM.Configs), 1)
|
||||
assert.Equal(t, n1.IPAM.Configs[0].Range, "")
|
||||
assert.Equal(t, len(n1.IPAM.Configs[0].Reserved), 0)
|
||||
|
||||
_, subnet12, err := net.ParseCIDR(n1.IPAM.Configs[0].Subnet)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, subnet11, subnet12)
|
||||
|
||||
gwip12 := net.ParseIP(n1.IPAM.Configs[0].Gateway)
|
||||
assert.Equal(t, gwip11, gwip12)
|
||||
}
|
||||
|
||||
func TestAllocateWithOneSubnet(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(n.IPAM.Configs), 1)
|
||||
assert.Equal(t, n.IPAM.Configs[0].Range, "")
|
||||
assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0)
|
||||
assert.Equal(t, n.IPAM.Configs[0].Subnet, "192.168.1.0/24")
|
||||
|
||||
ip := net.ParseIP(n.IPAM.Configs[0].Gateway)
|
||||
assert.NotEqual(t, ip, nil)
|
||||
}
|
||||
|
||||
func TestAllocateWithOneSubnetGateway(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(n.IPAM.Configs), 1)
|
||||
assert.Equal(t, n.IPAM.Configs[0].Range, "")
|
||||
assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0)
|
||||
assert.Equal(t, n.IPAM.Configs[0].Subnet, "192.168.1.0/24")
|
||||
assert.Equal(t, n.IPAM.Configs[0].Gateway, "192.168.1.1")
|
||||
}
|
||||
|
||||
func TestAllocateWithOneSubnetInvalidGateway(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.2.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// TestAllocateWithSmallSubnet validates that /32 subnets don't produce an error,
|
||||
// as /31 and /32 subnets are supported by docker daemon, starting with
|
||||
// https://github.com/moby/moby/commit/3a938df4b570aad3bfb4d5342379582e872fc1a3,
|
||||
func TestAllocateWithSmallSubnet(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "1.1.1.1/32",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAllocateWithTwoSubnetsNoGateway(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
},
|
||||
{
|
||||
Subnet: "192.168.2.0/24",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(n.IPAM.Configs), 2)
|
||||
assert.Equal(t, n.IPAM.Configs[0].Range, "")
|
||||
assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0)
|
||||
assert.Equal(t, n.IPAM.Configs[0].Subnet, "192.168.1.0/24")
|
||||
assert.Equal(t, n.IPAM.Configs[1].Range, "")
|
||||
assert.Equal(t, len(n.IPAM.Configs[1].Reserved), 0)
|
||||
assert.Equal(t, n.IPAM.Configs[1].Subnet, "192.168.2.0/24")
|
||||
|
||||
ip := net.ParseIP(n.IPAM.Configs[0].Gateway)
|
||||
assert.NotEqual(t, ip, nil)
|
||||
ip = net.ParseIP(n.IPAM.Configs[1].Gateway)
|
||||
assert.NotEqual(t, ip, nil)
|
||||
}
|
||||
|
||||
func TestFree(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = na.Deallocate(n)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Reallocate again to make sure it succeeds.
|
||||
err = na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAllocateTaskFree(t *testing.T) {
|
||||
na1 := newNetworkAllocator(t)
|
||||
na2 := newNetworkAllocator(t)
|
||||
n1 := &api.Network{
|
||||
ID: "testID1",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test1",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n2 := &api.Network{
|
||||
ID: "testID2",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test2",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.2.0/24",
|
||||
Gateway: "192.168.2.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
task1 := &api.Task{
|
||||
Networks: []*api.NetworkAttachment{
|
||||
{
|
||||
Network: n1,
|
||||
},
|
||||
{
|
||||
Network: n2,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
task2 := &api.Task{
|
||||
Networks: []*api.NetworkAttachment{
|
||||
{
|
||||
Network: n1,
|
||||
},
|
||||
{
|
||||
Network: n2,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na1.Allocate(n1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = na1.Allocate(n2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = na1.AllocateTask(task1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(task1.Networks[0].Addresses), 1)
|
||||
assert.Equal(t, len(task1.Networks[1].Addresses), 1)
|
||||
|
||||
_, subnet1, _ := net.ParseCIDR("192.168.1.0/24")
|
||||
_, subnet2, _ := net.ParseCIDR("192.168.2.0/24")
|
||||
|
||||
// variable coding: network/task/allocator
|
||||
ip111, _, err := net.ParseCIDR(task1.Networks[0].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
ip211, _, err := net.ParseCIDR(task1.Networks[1].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, subnet1.Contains(ip111), true)
|
||||
assert.Equal(t, subnet2.Contains(ip211), true)
|
||||
|
||||
err = na1.AllocateTask(task2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(task2.Networks[0].Addresses), 1)
|
||||
assert.Equal(t, len(task2.Networks[1].Addresses), 1)
|
||||
|
||||
ip121, _, err := net.ParseCIDR(task2.Networks[0].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
ip221, _, err := net.ParseCIDR(task2.Networks[1].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, subnet1.Contains(ip121), true)
|
||||
assert.Equal(t, subnet2.Contains(ip221), true)
|
||||
|
||||
// Now allocate the same the same tasks in a second allocator
|
||||
// but intentionally in reverse order.
|
||||
err = na2.Allocate(n1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = na2.Allocate(n2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = na2.AllocateTask(task2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(task2.Networks[0].Addresses), 1)
|
||||
assert.Equal(t, len(task2.Networks[1].Addresses), 1)
|
||||
|
||||
ip122, _, err := net.ParseCIDR(task2.Networks[0].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
ip222, _, err := net.ParseCIDR(task2.Networks[1].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, subnet1.Contains(ip122), true)
|
||||
assert.Equal(t, subnet2.Contains(ip222), true)
|
||||
assert.Equal(t, ip121, ip122)
|
||||
assert.Equal(t, ip221, ip222)
|
||||
|
||||
err = na2.AllocateTask(task1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(task1.Networks[0].Addresses), 1)
|
||||
assert.Equal(t, len(task1.Networks[1].Addresses), 1)
|
||||
|
||||
ip112, _, err := net.ParseCIDR(task1.Networks[0].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
ip212, _, err := net.ParseCIDR(task1.Networks[1].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, subnet1.Contains(ip112), true)
|
||||
assert.Equal(t, subnet2.Contains(ip212), true)
|
||||
assert.Equal(t, ip111, ip112)
|
||||
assert.Equal(t, ip211, ip212)
|
||||
|
||||
// Deallocate task
|
||||
err = na1.DeallocateTask(task1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(task1.Networks[0].Addresses), 0)
|
||||
assert.Equal(t, len(task1.Networks[1].Addresses), 0)
|
||||
|
||||
// Try allocation after free
|
||||
err = na1.AllocateTask(task1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(task1.Networks[0].Addresses), 1)
|
||||
assert.Equal(t, len(task1.Networks[1].Addresses), 1)
|
||||
|
||||
ip111, _, err = net.ParseCIDR(task1.Networks[0].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
ip211, _, err = net.ParseCIDR(task1.Networks[1].Addresses[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, subnet1.Contains(ip111), true)
|
||||
assert.Equal(t, subnet2.Contains(ip211), true)
|
||||
|
||||
err = na1.DeallocateTask(task1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(task1.Networks[0].Addresses), 0)
|
||||
assert.Equal(t, len(task1.Networks[1].Addresses), 0)
|
||||
|
||||
// Try to free endpoints on an already freed task
|
||||
err = na1.DeallocateTask(task1)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAllocateService(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s := &api.Service{
|
||||
ID: "testID1",
|
||||
Spec: api.ServiceSpec{
|
||||
Task: api.TaskSpec{
|
||||
Networks: []*api.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: "testID",
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoint: &api.EndpointSpec{
|
||||
Ports: []*api.PortConfig{
|
||||
{
|
||||
Name: "http",
|
||||
TargetPort: 80,
|
||||
},
|
||||
{
|
||||
Name: "https",
|
||||
TargetPort: 443,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, n.IPAM.Configs, nil)
|
||||
assert.Equal(t, len(n.IPAM.Configs), 1)
|
||||
assert.Equal(t, n.IPAM.Configs[0].Range, "")
|
||||
assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0)
|
||||
|
||||
_, subnet, err := net.ParseCIDR(n.IPAM.Configs[0].Subnet)
|
||||
assert.NoError(t, err)
|
||||
|
||||
gwip := net.ParseIP(n.IPAM.Configs[0].Gateway)
|
||||
assert.NotEqual(t, gwip, nil)
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, s.Endpoint.Ports, 0) // Network allocator is not responsible for allocating ports.
|
||||
|
||||
assert.Equal(t, 1, len(s.Endpoint.VirtualIPs))
|
||||
|
||||
assert.Equal(t, s.Endpoint.Spec, s.Spec.Endpoint)
|
||||
|
||||
ip, _, err := net.ParseCIDR(s.Endpoint.VirtualIPs[0].Addr)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, true, subnet.Contains(ip))
|
||||
}
|
||||
|
||||
func TestDeallocateServiceAllocateIngressMode(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
|
||||
n := &api.Network{
|
||||
ID: "testNetID1",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
Ingress: true,
|
||||
},
|
||||
}
|
||||
|
||||
err := na.Allocate(n)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s := &api.Service{
|
||||
ID: "testID1",
|
||||
Spec: api.ServiceSpec{
|
||||
Endpoint: &api.EndpointSpec{
|
||||
Ports: []*api.PortConfig{
|
||||
{
|
||||
Name: "some_tcp",
|
||||
TargetPort: 1234,
|
||||
PublishedPort: 1234,
|
||||
PublishMode: api.PublishModeIngress,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoint: &api.Endpoint{},
|
||||
}
|
||||
|
||||
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
|
||||
&api.Endpoint_VirtualIP{NetworkID: n.ID})
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, s.Endpoint.VirtualIPs, 1)
|
||||
|
||||
err = na.DeallocateService(s)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, s.Endpoint.Ports, 0)
|
||||
assert.Len(t, s.Endpoint.VirtualIPs, 0)
|
||||
// Allocate again.
|
||||
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
|
||||
&api.Endpoint_VirtualIP{NetworkID: n.ID})
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, s.Endpoint.VirtualIPs, 1)
|
||||
}
|
||||
|
||||
func TestServiceNetworkUpdate(t *testing.T) {
|
||||
na := newNetworkAllocator(t)
|
||||
|
||||
n1 := &api.Network{
|
||||
ID: "testID1",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n2 := &api.Network{
|
||||
ID: "testID2",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Allocate both networks
|
||||
err := na.Allocate(n1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = na.Allocate(n2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Attach a network to a service spec nd allocate a service
|
||||
s := &api.Service{
|
||||
ID: "testID1",
|
||||
Spec: api.ServiceSpec{
|
||||
Task: api.TaskSpec{
|
||||
Networks: []*api.NetworkAttachmentConfig{
|
||||
{
|
||||
Target: "testID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoint: &api.EndpointSpec{
|
||||
Mode: api.ResolutionModeVirtualIP,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, na.IsServiceAllocated(s))
|
||||
assert.Len(t, s.Endpoint.VirtualIPs, 1)
|
||||
|
||||
// Now update the same service with another network
|
||||
s.Spec.Task.Networks = append(s.Spec.Task.Networks, &api.NetworkAttachmentConfig{Target: "testID2"})
|
||||
|
||||
assert.False(t, na.IsServiceAllocated(s))
|
||||
err = na.AllocateService(s)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, na.IsServiceAllocated(s))
|
||||
assert.Len(t, s.Endpoint.VirtualIPs, 2)
|
||||
|
||||
s.Spec.Task.Networks = s.Spec.Task.Networks[:1]
|
||||
|
||||
// Check if service needs update and allocate with updated service spec
|
||||
assert.False(t, na.IsServiceAllocated(s))
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, na.IsServiceAllocated(s))
|
||||
assert.Len(t, s.Endpoint.VirtualIPs, 1)
|
||||
|
||||
s.Spec.Task.Networks = s.Spec.Task.Networks[:0]
|
||||
// Check if service needs update with all the networks removed and allocate with updated service spec
|
||||
assert.False(t, na.IsServiceAllocated(s))
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, na.IsServiceAllocated(s))
|
||||
assert.Len(t, s.Endpoint.VirtualIPs, 0)
|
||||
|
||||
// Attach a network and allocate service
|
||||
s.Spec.Task.Networks = append(s.Spec.Task.Networks, &api.NetworkAttachmentConfig{Target: "testID2"})
|
||||
assert.False(t, na.IsServiceAllocated(s))
|
||||
|
||||
err = na.AllocateService(s)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, na.IsServiceAllocated(s))
|
||||
assert.Len(t, s.Endpoint.VirtualIPs, 1)
|
||||
|
||||
}
|
||||
|
||||
type mockIpam struct {
|
||||
actualIpamOptions map[string]string
|
||||
}
|
||||
|
||||
func (a *mockIpam) GetDefaultAddressSpaces() (string, string, error) {
|
||||
return "defaultAS", "defaultAS", nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) {
|
||||
a.actualIpamOptions = options
|
||||
|
||||
poolCidr, _ := types.ParseCIDR(pool)
|
||||
return fmt.Sprintf("%s/%s", "defaultAS", pool), poolCidr, nil, nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) ReleasePool(poolID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) RequestAddress(poolID string, ip net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) ReleaseAddress(poolID string, ip net.IP) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *mockIpam) IsBuiltIn() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func TestCorrectlyPassIPAMOptions(t *testing.T) {
|
||||
var err error
|
||||
expectedIpamOptions := map[string]string{"network-name": "freddie"}
|
||||
|
||||
na := newNetworkAllocator(t)
|
||||
ipamDriver := &mockIpam{}
|
||||
|
||||
err = na.(*cnmNetworkAllocator).ipamRegistry.RegisterIpamDriver("mockipam", ipamDriver)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n := &api.Network{
|
||||
ID: "testID",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "test",
|
||||
},
|
||||
DriverConfig: &api.Driver{},
|
||||
IPAM: &api.IPAMOptions{
|
||||
Driver: &api.Driver{
|
||||
Name: "mockipam",
|
||||
Options: expectedIpamOptions,
|
||||
},
|
||||
Configs: []*api.IPAMConfig{
|
||||
{
|
||||
Subnet: "192.168.1.0/24",
|
||||
Gateway: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = na.Allocate(n)
|
||||
|
||||
assert.Equal(t, expectedIpamOptions, ipamDriver.actualIpamOptions)
|
||||
assert.NoError(t, err)
|
||||
}
|
91
libnetwork/cnmallocator/provider.go
Normal file
91
libnetwork/cnmallocator/provider.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package cnmallocator
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/drivers/overlay/overlayutils"
|
||||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
pg plugingetter.PluginGetter
|
||||
}
|
||||
|
||||
var _ networkallocator.Provider = &Provider{}
|
||||
|
||||
// NewProvider returns a new cnmallocator provider.
|
||||
func NewProvider(pg plugingetter.PluginGetter) *Provider {
|
||||
return &Provider{pg: pg}
|
||||
}
|
||||
|
||||
// ValidateIPAMDriver implements networkallocator.NetworkProvider.
|
||||
func (p *Provider) ValidateIPAMDriver(driver *api.Driver) error {
|
||||
if driver == nil {
|
||||
// It is ok to not specify the driver. We will choose
|
||||
// a default driver.
|
||||
return nil
|
||||
}
|
||||
|
||||
if driver.Name == "" {
|
||||
return status.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required")
|
||||
}
|
||||
if strings.ToLower(driver.Name) == ipamapi.DefaultIPAM {
|
||||
return nil
|
||||
}
|
||||
return p.validatePluginDriver(driver, ipamapi.PluginEndpointType)
|
||||
}
|
||||
|
||||
// ValidateIngressNetworkDriver implements networkallocator.NetworkProvider.
|
||||
func (p *Provider) ValidateIngressNetworkDriver(driver *api.Driver) error {
|
||||
if driver != nil && driver.Name != "overlay" {
|
||||
return status.Errorf(codes.Unimplemented, "only overlay driver is currently supported for ingress network")
|
||||
}
|
||||
return p.ValidateNetworkDriver(driver)
|
||||
}
|
||||
|
||||
// ValidateNetworkDriver implements networkallocator.NetworkProvider.
|
||||
func (p *Provider) ValidateNetworkDriver(driver *api.Driver) error {
|
||||
if driver == nil {
|
||||
// It is ok to not specify the driver. We will choose
|
||||
// a default driver.
|
||||
return nil
|
||||
}
|
||||
|
||||
if driver.Name == "" {
|
||||
return status.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required")
|
||||
}
|
||||
|
||||
// First check against the known drivers
|
||||
if IsBuiltInDriver(driver.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.validatePluginDriver(driver, driverapi.NetworkPluginEndpointType)
|
||||
}
|
||||
|
||||
func (p *Provider) validatePluginDriver(driver *api.Driver, pluginType string) error {
|
||||
if p.pg == nil {
|
||||
return status.Errorf(codes.InvalidArgument, "plugin %s not supported", driver.Name)
|
||||
}
|
||||
|
||||
plug, err := p.pg.Get(driver.Name, pluginType, plugingetter.Lookup)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "error during lookup of plugin %s", driver.Name)
|
||||
}
|
||||
|
||||
if plug.IsV1() {
|
||||
return status.Errorf(codes.InvalidArgument, "legacy plugin %s of type %s is not supported in swarm mode", driver.Name, pluginType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) SetDefaultVXLANUDPPort(port uint32) error {
|
||||
return overlayutils.ConfigVXLANUDPPort(port)
|
||||
}
|
30
libnetwork/cnmallocator/provider_test.go
Normal file
30
libnetwork/cnmallocator/provider_test.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package cnmallocator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/testutils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func TestValidateDriver(t *testing.T) {
|
||||
p := NewProvider(nil)
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
validator func(*api.Driver) error
|
||||
}{
|
||||
{"IPAM", p.ValidateIPAMDriver},
|
||||
{"Network", p.ValidateNetworkDriver},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.NoError(t, tt.validator(nil))
|
||||
|
||||
err := tt.validator(&api.Driver{Name: ""})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -66,7 +66,7 @@ require (
|
|||
github.com/moby/locker v1.0.1
|
||||
github.com/moby/patternmatcher v0.6.0
|
||||
github.com/moby/pubsub v1.0.0
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240227173239-911c97650f2e
|
||||
github.com/moby/sys/mount v0.3.3
|
||||
github.com/moby/sys/mountinfo v0.7.1
|
||||
github.com/moby/sys/sequential v0.5.0
|
||||
|
@ -87,6 +87,7 @@ require (
|
|||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/tonistiigi/fsutil v0.0.0-20240223190444-7a889f53dbf6
|
||||
github.com/tonistiigi/go-archvariant v1.0.0
|
||||
github.com/vbatts/tar-split v0.11.5
|
||||
|
@ -132,6 +133,7 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
|
@ -147,6 +149,7 @@ require (
|
|||
github.com/containernetworking/cni v1.1.2 // indirect
|
||||
github.com/containernetworking/plugins v1.4.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
|
@ -179,6 +182,7 @@ require (
|
|||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 // indirect
|
||||
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect
|
||||
github.com/philhofer/fwd v1.1.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
|
@ -221,6 +225,7 @@ require (
|
|||
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
tags.cncf.io/container-device-interface/specs-go v0.6.0 // indirect
|
||||
|
|
|
@ -105,6 +105,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
|||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
|
@ -490,8 +492,8 @@ github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkV
|
|||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/pubsub v1.0.0 h1:jkp/imWsmJz2f6LyFsk7EkVeN2HxR/HTTOY8kHrsxfA=
|
||||
github.com/moby/pubsub v1.0.0/go.mod h1:bXSO+3h5MNXXCaEG+6/NlAIk7MMZbySZlnB+cUQhKKc=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 h1:mjLf2jYrqtIS4LvLzg0gNyJR4rMXS4X5Bg1A4hOhVMs=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261/go.mod h1:oRJU1d0hrkkwCtouwfQGcIAKcVEkclMYoLWocqrg6gI=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240227173239-911c97650f2e h1:4FRRm/5kOaCc+ssRBPmmcQM7b0KHdOgqKob93VnvHPs=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20240227173239-911c97650f2e/go.mod h1:kNy225f/gWAnF8wPftteMc5nbAHhrH+HUfvyjmhFjeQ=
|
||||
github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
|
||||
github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
|
@ -667,6 +669,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
|
|||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tedsuo/ifrit v0.0.0-20230330192023-5cba443a66c4 h1:MGZzzxBuPuK4J0XQo+0uy0NnXQGKzHXhYp5oG1Wy860=
|
||||
github.com/tedsuo/ifrit v0.0.0-20230330192023-5cba443a66c4/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
|
||||
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
|
||||
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
|
|
139
vendor/code.cloudfoundry.org/clock/fakeclock/fake_clock.go
generated
vendored
Normal file
139
vendor/code.cloudfoundry.org/clock/fakeclock/fake_clock.go
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
package fakeclock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"code.cloudfoundry.org/clock"
|
||||
)
|
||||
|
||||
type timeWatcher interface {
|
||||
timeUpdated(time.Time)
|
||||
shouldFire(time.Time) bool
|
||||
repeatable() bool
|
||||
}
|
||||
|
||||
type FakeClock struct {
|
||||
now time.Time
|
||||
|
||||
watchers map[timeWatcher]struct{}
|
||||
cond *sync.Cond
|
||||
}
|
||||
|
||||
func NewFakeClock(now time.Time) *FakeClock {
|
||||
return &FakeClock{
|
||||
now: now,
|
||||
watchers: make(map[timeWatcher]struct{}),
|
||||
cond: &sync.Cond{L: &sync.Mutex{}},
|
||||
}
|
||||
}
|
||||
|
||||
func (clock *FakeClock) Since(t time.Time) time.Duration {
|
||||
return clock.Now().Sub(t)
|
||||
}
|
||||
|
||||
func (clock *FakeClock) Now() time.Time {
|
||||
clock.cond.L.Lock()
|
||||
defer clock.cond.L.Unlock()
|
||||
|
||||
return clock.now
|
||||
}
|
||||
|
||||
func (clock *FakeClock) Increment(duration time.Duration) {
|
||||
clock.increment(duration, false, 0)
|
||||
}
|
||||
|
||||
func (clock *FakeClock) IncrementBySeconds(seconds uint64) {
|
||||
clock.Increment(time.Duration(seconds) * time.Second)
|
||||
}
|
||||
|
||||
func (clock *FakeClock) WaitForWatcherAndIncrement(duration time.Duration) {
|
||||
clock.WaitForNWatchersAndIncrement(duration, 1)
|
||||
}
|
||||
|
||||
func (clock *FakeClock) WaitForNWatchersAndIncrement(duration time.Duration, numWatchers int) {
|
||||
clock.increment(duration, true, numWatchers)
|
||||
}
|
||||
|
||||
func (clock *FakeClock) NewTimer(d time.Duration) clock.Timer {
|
||||
timer := newFakeTimer(clock, d, false)
|
||||
clock.addTimeWatcher(timer)
|
||||
|
||||
return timer
|
||||
}
|
||||
|
||||
func (clock *FakeClock) Sleep(d time.Duration) {
|
||||
<-clock.NewTimer(d).C()
|
||||
}
|
||||
|
||||
func (clock *FakeClock) After(d time.Duration) <-chan time.Time {
|
||||
return clock.NewTimer(d).C()
|
||||
}
|
||||
|
||||
func (clock *FakeClock) NewTicker(d time.Duration) clock.Ticker {
|
||||
if d <= 0 {
|
||||
panic(errors.New("duration must be greater than zero"))
|
||||
}
|
||||
|
||||
timer := newFakeTimer(clock, d, true)
|
||||
clock.addTimeWatcher(timer)
|
||||
|
||||
return newFakeTicker(timer)
|
||||
}
|
||||
|
||||
func (clock *FakeClock) WatcherCount() int {
|
||||
clock.cond.L.Lock()
|
||||
defer clock.cond.L.Unlock()
|
||||
|
||||
return len(clock.watchers)
|
||||
}
|
||||
|
||||
func (clock *FakeClock) increment(duration time.Duration, waitForWatchers bool, numWatchers int) {
|
||||
clock.cond.L.Lock()
|
||||
|
||||
for waitForWatchers && len(clock.watchers) < numWatchers {
|
||||
clock.cond.Wait()
|
||||
}
|
||||
|
||||
now := clock.now.Add(duration)
|
||||
clock.now = now
|
||||
|
||||
watchers := make([]timeWatcher, 0)
|
||||
newWatchers := map[timeWatcher]struct{}{}
|
||||
for w, _ := range clock.watchers {
|
||||
fire := w.shouldFire(now)
|
||||
if fire {
|
||||
watchers = append(watchers, w)
|
||||
}
|
||||
|
||||
if !fire || w.repeatable() {
|
||||
newWatchers[w] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
clock.watchers = newWatchers
|
||||
|
||||
clock.cond.L.Unlock()
|
||||
|
||||
for _, w := range watchers {
|
||||
w.timeUpdated(now)
|
||||
}
|
||||
}
|
||||
|
||||
func (clock *FakeClock) addTimeWatcher(tw timeWatcher) {
|
||||
clock.cond.L.Lock()
|
||||
clock.watchers[tw] = struct{}{}
|
||||
clock.cond.L.Unlock()
|
||||
|
||||
// force the timer to fire
|
||||
clock.Increment(0)
|
||||
|
||||
clock.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (clock *FakeClock) removeTimeWatcher(tw timeWatcher) {
|
||||
clock.cond.L.Lock()
|
||||
delete(clock.watchers, tw)
|
||||
clock.cond.L.Unlock()
|
||||
}
|
25
vendor/code.cloudfoundry.org/clock/fakeclock/fake_ticker.go
generated
vendored
Normal file
25
vendor/code.cloudfoundry.org/clock/fakeclock/fake_ticker.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
package fakeclock
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"code.cloudfoundry.org/clock"
|
||||
)
|
||||
|
||||
type fakeTicker struct {
|
||||
timer clock.Timer
|
||||
}
|
||||
|
||||
func newFakeTicker(timer *fakeTimer) *fakeTicker {
|
||||
return &fakeTicker{
|
||||
timer: timer,
|
||||
}
|
||||
}
|
||||
|
||||
func (ft *fakeTicker) C() <-chan time.Time {
|
||||
return ft.timer.C()
|
||||
}
|
||||
|
||||
func (ft *fakeTicker) Stop() {
|
||||
ft.timer.Stop()
|
||||
}
|
87
vendor/code.cloudfoundry.org/clock/fakeclock/fake_timer.go
generated
vendored
Normal file
87
vendor/code.cloudfoundry.org/clock/fakeclock/fake_timer.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
package fakeclock
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type fakeTimer struct {
|
||||
clock *FakeClock
|
||||
|
||||
mutex sync.Mutex
|
||||
completionTime time.Time
|
||||
channel chan time.Time
|
||||
duration time.Duration
|
||||
repeat bool
|
||||
}
|
||||
|
||||
func newFakeTimer(clock *FakeClock, d time.Duration, repeat bool) *fakeTimer {
|
||||
return &fakeTimer{
|
||||
clock: clock,
|
||||
completionTime: clock.Now().Add(d),
|
||||
channel: make(chan time.Time, 1),
|
||||
duration: d,
|
||||
repeat: repeat,
|
||||
}
|
||||
}
|
||||
|
||||
func (ft *fakeTimer) C() <-chan time.Time {
|
||||
ft.mutex.Lock()
|
||||
defer ft.mutex.Unlock()
|
||||
return ft.channel
|
||||
}
|
||||
|
||||
func (ft *fakeTimer) reset(d time.Duration) bool {
|
||||
currentTime := ft.clock.Now()
|
||||
|
||||
ft.mutex.Lock()
|
||||
active := !ft.completionTime.IsZero()
|
||||
ft.completionTime = currentTime.Add(d)
|
||||
ft.mutex.Unlock()
|
||||
return active
|
||||
}
|
||||
|
||||
func (ft *fakeTimer) Reset(d time.Duration) bool {
|
||||
active := ft.reset(d)
|
||||
ft.clock.addTimeWatcher(ft)
|
||||
return active
|
||||
}
|
||||
|
||||
func (ft *fakeTimer) Stop() bool {
|
||||
ft.mutex.Lock()
|
||||
active := !ft.completionTime.IsZero()
|
||||
ft.mutex.Unlock()
|
||||
|
||||
ft.clock.removeTimeWatcher(ft)
|
||||
|
||||
return active
|
||||
}
|
||||
|
||||
func (ft *fakeTimer) shouldFire(now time.Time) bool {
|
||||
ft.mutex.Lock()
|
||||
defer ft.mutex.Unlock()
|
||||
|
||||
if ft.completionTime.IsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
return now.After(ft.completionTime) || now.Equal(ft.completionTime)
|
||||
}
|
||||
|
||||
func (ft *fakeTimer) repeatable() bool {
|
||||
return ft.repeat
|
||||
}
|
||||
|
||||
func (ft *fakeTimer) timeUpdated(now time.Time) {
|
||||
select {
|
||||
case ft.channel <- now:
|
||||
default:
|
||||
// drop on the floor. timers have a buffered channel anyway. according to
|
||||
// godoc of the `time' package a ticker can loose ticks in case of a slow
|
||||
// receiver
|
||||
}
|
||||
|
||||
if ft.repeatable() {
|
||||
ft.reset(ft.duration)
|
||||
}
|
||||
}
|
1
vendor/code.cloudfoundry.org/clock/fakeclock/package.go
generated
vendored
Normal file
1
vendor/code.cloudfoundry.org/clock/fakeclock/package.go
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
package fakeclock // import "code.cloudfoundry.org/clock/fakeclock"
|
26
vendor/github.com/bits-and-blooms/bitset/.gitignore
generated
vendored
Normal file
26
vendor/github.com/bits-and-blooms/bitset/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
target
|
37
vendor/github.com/bits-and-blooms/bitset/.travis.yml
generated
vendored
Normal file
37
vendor/github.com/bits-and-blooms/bitset/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
branches:
|
||||
except:
|
||||
- release
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- travis
|
||||
|
||||
go:
|
||||
- "1.11.x"
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
before_install:
|
||||
- if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi;
|
||||
- if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi;
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
before_script:
|
||||
- make deps
|
||||
|
||||
script:
|
||||
- make qa
|
||||
|
||||
after_failure:
|
||||
- cat ./target/test/report.xml
|
||||
|
||||
after_success:
|
||||
- if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi;
|
27
vendor/github.com/bits-and-blooms/bitset/LICENSE
generated
vendored
Normal file
27
vendor/github.com/bits-and-blooms/bitset/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2014 Will Fitzgerald. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
159
vendor/github.com/bits-and-blooms/bitset/README.md
generated
vendored
Normal file
159
vendor/github.com/bits-and-blooms/bitset/README.md
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
# bitset
|
||||
|
||||
*Go language library to map between non-negative integers and boolean values*
|
||||
|
||||
[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset)
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc)
|
||||
|
||||
|
||||
This library is part of the [awesome go collection](https://github.com/avelino/awesome-go). It is used in production by several important systems:
|
||||
|
||||
* [beego](https://github.com/beego/beego)
|
||||
* [CubeFS](https://github.com/cubefs/cubefs)
|
||||
* [Amazon EKS Distro](https://github.com/aws/eks-distro)
|
||||
* [sourcegraph](https://github.com/sourcegraph/sourcegraph)
|
||||
* [torrent](https://github.com/anacrolix/torrent)
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
Package bitset implements bitsets, a mapping between non-negative integers and boolean values.
|
||||
It should be more efficient than map[uint] bool.
|
||||
|
||||
It provides methods for setting, clearing, flipping, and testing individual integers.
|
||||
|
||||
But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits.
|
||||
|
||||
BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used.
|
||||
|
||||
Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining.
|
||||
|
||||
### Example use:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/bits-and-blooms/bitset"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Printf("Hello from BitSet!\n")
|
||||
var b bitset.BitSet
|
||||
// play some Go Fish
|
||||
for i := 0; i < 100; i++ {
|
||||
card1 := uint(rand.Intn(52))
|
||||
card2 := uint(rand.Intn(52))
|
||||
b.Set(card1)
|
||||
if b.Test(card2) {
|
||||
fmt.Println("Go Fish!")
|
||||
}
|
||||
b.Clear(card1)
|
||||
}
|
||||
|
||||
// Chaining
|
||||
b.Set(10).Set(11)
|
||||
|
||||
for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) {
|
||||
fmt.Println("The following bit is set:", i)
|
||||
}
|
||||
if b.Intersection(bitset.New(100).Set(10)).Count() == 1 {
|
||||
fmt.Println("Intersection works.")
|
||||
} else {
|
||||
fmt.Println("Intersection doesn't work???")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc
|
||||
|
||||
## Serialization
|
||||
|
||||
|
||||
You may serialize a bitset safely and portably to a stream
|
||||
of bytes as follows:
|
||||
```Go
|
||||
const length = 9585
|
||||
const oneEvery = 97
|
||||
bs := bitset.New(length)
|
||||
// Add some bits
|
||||
for i := uint(0); i < length; i += oneEvery {
|
||||
bs = bs.Set(i)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
n, err := bs.WriteTo(&buf)
|
||||
if err != nil {
|
||||
// failure
|
||||
}
|
||||
// Here n == buf.Len()
|
||||
```
|
||||
You can later deserialize the result as follows:
|
||||
|
||||
```Go
|
||||
// Read back from buf
|
||||
bs = bitset.New()
|
||||
n, err = bs.ReadFrom(&buf)
|
||||
if err != nil {
|
||||
// error
|
||||
}
|
||||
// n is the number of bytes read
|
||||
```
|
||||
|
||||
The `ReadFrom` function attempts to read the data into the existing
|
||||
BitSet instance, to minimize memory allocations.
|
||||
|
||||
|
||||
*Performance tip*:
|
||||
When reading and writing to a file or a network connection, you may get better performance by
|
||||
wrapping your streams with `bufio` instances.
|
||||
|
||||
E.g.,
|
||||
```Go
|
||||
f, err := os.Create("myfile")
|
||||
w := bufio.NewWriter(f)
|
||||
```
|
||||
```Go
|
||||
f, err := os.Open("myfile")
|
||||
r := bufio.NewReader(f)
|
||||
```
|
||||
|
||||
## Memory Usage
|
||||
|
||||
The memory usage of a bitset using `N` bits is at least `N/8` bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring).
|
||||
|
||||
The `roaring` library allows you to go back and forth between compressed Roaring bitmaps and the conventional bitset instances:
|
||||
```Go
|
||||
mybitset := roaringbitmap.ToBitSet()
|
||||
newroaringbitmap := roaring.FromBitSet(mybitset)
|
||||
```
|
||||
|
||||
|
||||
## Implementation Note
|
||||
|
||||
Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed.
|
||||
|
||||
It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `uint64`). If so, the version will be bumped.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/bits-and-blooms/bitset
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)")
|
||||
|
||||
## Running all tests
|
||||
|
||||
Before committing the code, please check if it passes tests, has adequate coverage, etc.
|
||||
```bash
|
||||
go test
|
||||
go test -cover
|
||||
```
|
5
vendor/github.com/bits-and-blooms/bitset/SECURITY.md
generated
vendored
Normal file
5
vendor/github.com/bits-and-blooms/bitset/SECURITY.md
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
You can report privately a vulnerability by email at daniel@lemire.me (current maintainer).
|
39
vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml
generated
vendored
Normal file
39
vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
# Go
|
||||
# Build your Go project.
|
||||
# Add steps that test, save build artifacts, deploy, and more:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/languages/go
|
||||
|
||||
trigger:
|
||||
- master
|
||||
|
||||
pool:
|
||||
vmImage: 'Ubuntu-16.04'
|
||||
|
||||
variables:
|
||||
GOBIN: '$(GOPATH)/bin' # Go binaries path
|
||||
GOROOT: '/usr/local/go1.11' # Go installation path
|
||||
GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
mkdir -p '$(GOBIN)'
|
||||
mkdir -p '$(GOPATH)/pkg'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
displayName: 'Set up the Go workspace'
|
||||
|
||||
- script: |
|
||||
go version
|
||||
go get -v -t -d ./...
|
||||
if [ -f Gopkg.toml ]; then
|
||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure
|
||||
fi
|
||||
go build -v .
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: 'Get dependencies, then build'
|
1184
vendor/github.com/bits-and-blooms/bitset/bitset.go
generated
vendored
Normal file
1184
vendor/github.com/bits-and-blooms/bitset/bitset.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
53
vendor/github.com/bits-and-blooms/bitset/popcnt.go
generated
vendored
Normal file
53
vendor/github.com/bits-and-blooms/bitset/popcnt.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package bitset
|
||||
|
||||
// bit population count, take from
|
||||
// https://code.google.com/p/go/issues/detail?id=4988#c11
|
||||
// credit: https://code.google.com/u/arnehormann/
|
||||
func popcount(x uint64) (n uint64) {
|
||||
x -= (x >> 1) & 0x5555555555555555
|
||||
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
|
||||
x += x >> 4
|
||||
x &= 0x0f0f0f0f0f0f0f0f
|
||||
x *= 0x0101010101010101
|
||||
return x >> 56
|
||||
}
|
||||
|
||||
func popcntSliceGo(s []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for _, x := range s {
|
||||
cnt += popcount(x)
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntMaskSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] &^ m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntAndSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] & m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntOrSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] | m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntXorSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] ^ m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
62
vendor/github.com/bits-and-blooms/bitset/popcnt_19.go
generated
vendored
Normal file
62
vendor/github.com/bits-and-blooms/bitset/popcnt_19.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
var cnt int
|
||||
for _, x := range s {
|
||||
cnt += bits.OnesCount64(x)
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
// this explicit check eliminates a bounds check in the loop
|
||||
if len(m) < len(s) {
|
||||
panic("mask slice is too short")
|
||||
}
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] &^ m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
// this explicit check eliminates a bounds check in the loop
|
||||
if len(m) < len(s) {
|
||||
panic("mask slice is too short")
|
||||
}
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] & m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
// this explicit check eliminates a bounds check in the loop
|
||||
if len(m) < len(s) {
|
||||
panic("mask slice is too short")
|
||||
}
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] | m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
// this explicit check eliminates a bounds check in the loop
|
||||
if len(m) < len(s) {
|
||||
panic("mask slice is too short")
|
||||
}
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] ^ m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
68
vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go
generated
vendored
Normal file
68
vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
//go:build !go1.9 && amd64 && !appengine
|
||||
// +build !go1.9,amd64,!appengine
|
||||
|
||||
package bitset
|
||||
|
||||
// *** the following functions are defined in popcnt_amd64.s
|
||||
|
||||
//go:noescape
|
||||
|
||||
func hasAsm() bool
|
||||
|
||||
// useAsm is a flag used to select the GO or ASM implementation of the popcnt function
|
||||
var useAsm = hasAsm()
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntSliceAsm(s []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntMaskSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntAndSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntOrSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntXorSliceAsm(s, m []uint64) uint64
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntSliceAsm(s)
|
||||
}
|
||||
return popcntSliceGo(s)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntMaskSliceAsm(s, m)
|
||||
}
|
||||
return popcntMaskSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntAndSliceAsm(s, m)
|
||||
}
|
||||
return popcntAndSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntOrSliceAsm(s, m)
|
||||
}
|
||||
return popcntOrSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntXorSliceAsm(s, m)
|
||||
}
|
||||
return popcntXorSliceGo(s, m)
|
||||
}
|
104
vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s
generated
vendored
Normal file
104
vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
// +build !go1.9
|
||||
// +build amd64,!appengine
|
||||
|
||||
TEXT ·hasAsm(SB),4,$0-1
|
||||
MOVQ $1, AX
|
||||
CPUID
|
||||
SHRQ $23, CX
|
||||
ANDQ $1, CX
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
|
||||
#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2
|
||||
|
||||
TEXT ·popcntSliceAsm(SB),4,$0-32
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntSliceEnd
|
||||
popcntSliceLoop:
|
||||
BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
LOOP popcntSliceLoop
|
||||
popcntSliceEnd:
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntMaskSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntMaskSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntMaskSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
NOTQ DX
|
||||
ANDQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntMaskSliceLoop
|
||||
popcntMaskSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntAndSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntAndSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntAndSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
ANDQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntAndSliceLoop
|
||||
popcntAndSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntOrSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntOrSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntOrSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
ORQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntOrSliceLoop
|
||||
popcntOrSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntXorSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntXorSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntXorSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
XORQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntXorSliceLoop
|
||||
popcntXorSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
25
vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go
generated
vendored
Normal file
25
vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
//go:build !go1.9 && (!amd64 || appengine)
|
||||
// +build !go1.9
|
||||
// +build !amd64 appengine
|
||||
|
||||
package bitset
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
return popcntSliceGo(s)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
return popcntMaskSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
return popcntAndSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
return popcntOrSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
return popcntXorSliceGo(s, m)
|
||||
}
|
45
vendor/github.com/bits-and-blooms/bitset/select.go
generated
vendored
Normal file
45
vendor/github.com/bits-and-blooms/bitset/select.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package bitset
|
||||
|
||||
func select64(w uint64, j uint) uint {
|
||||
seen := 0
|
||||
// Divide 64bit
|
||||
part := w & 0xFFFFFFFF
|
||||
n := uint(popcount(part))
|
||||
if n <= j {
|
||||
part = w >> 32
|
||||
seen += 32
|
||||
j -= n
|
||||
}
|
||||
ww := part
|
||||
|
||||
// Divide 32bit
|
||||
part = ww & 0xFFFF
|
||||
|
||||
n = uint(popcount(part))
|
||||
if n <= j {
|
||||
part = ww >> 16
|
||||
seen += 16
|
||||
j -= n
|
||||
}
|
||||
ww = part
|
||||
|
||||
// Divide 16bit
|
||||
part = ww & 0xFF
|
||||
n = uint(popcount(part))
|
||||
if n <= j {
|
||||
part = ww >> 8
|
||||
seen += 8
|
||||
j -= n
|
||||
}
|
||||
ww = part
|
||||
|
||||
// Lookup in final byte
|
||||
counter := 0
|
||||
for ; counter < 8; counter++ {
|
||||
j -= uint((ww >> counter) & 1)
|
||||
if j+1 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return uint(seen + counter)
|
||||
}
|
15
vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go
generated
vendored
Normal file
15
vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
//go:build !go1.9
|
||||
// +build !go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
var deBruijn = [...]byte{
|
||||
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
|
||||
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
|
||||
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
|
||||
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
|
||||
}
|
||||
|
||||
func trailingZeroes64(v uint64) uint {
|
||||
return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58])
|
||||
}
|
10
vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go
generated
vendored
Normal file
10
vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func trailingZeroes64(v uint64) uint {
|
||||
return uint(bits.TrailingZeros64(v))
|
||||
}
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
type flag uintptr
|
||||
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
|
@ -0,0 +1,341 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
|
@ -0,0 +1,509 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
15
vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go
generated
vendored
15
vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go
generated
vendored
|
@ -5,9 +5,8 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -35,15 +34,15 @@ type pluginManager struct {
|
|||
// newNodePluginFunc usually points to NewNodePlugin. However, for testing,
|
||||
// NewNodePlugin can be swapped out with a function that creates fake node
|
||||
// plugins
|
||||
newNodePluginFunc func(string, plugingetter.CompatPlugin, plugingetter.PluginAddr, SecretGetter) NodePlugin
|
||||
newNodePluginFunc func(string, plugin.AddrPlugin, SecretGetter) NodePlugin
|
||||
|
||||
// secrets is a SecretGetter for use by node plugins.
|
||||
secrets SecretGetter
|
||||
|
||||
pg plugingetter.PluginGetter
|
||||
pg plugin.Getter
|
||||
}
|
||||
|
||||
func NewManager(pg plugingetter.PluginGetter, secrets SecretGetter) Manager {
|
||||
func NewManager(pg plugin.Getter, secrets SecretGetter) Manager {
|
||||
return &pluginManager{
|
||||
plugins: map[string]NodePlugin{},
|
||||
newNodePluginFunc: NewNodePlugin,
|
||||
|
@ -104,17 +103,17 @@ func (pm *pluginManager) getPlugin(name string) (NodePlugin, error) {
|
|||
return p, nil
|
||||
}
|
||||
|
||||
pc, err := pm.pg.Get(name, DockerCSIPluginCap, plugingetter.Lookup)
|
||||
pc, err := pm.pg.Get(name, DockerCSIPluginCap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pa, ok := pc.(plugingetter.PluginAddr)
|
||||
pa, ok := pc.(plugin.AddrPlugin)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("plugin does not implement PluginAddr interface")
|
||||
}
|
||||
|
||||
p := pm.newNodePluginFunc(name, pc, pa, pm.secrets)
|
||||
p := pm.newNodePluginFunc(name, pa, pm.secrets)
|
||||
pm.plugins[name] = p
|
||||
return p, nil
|
||||
}
|
||||
|
|
12
vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/plugin.go
generated
vendored
12
vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/plugin.go
generated
vendored
|
@ -11,10 +11,10 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/internal/csi/capability"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
// SecretGetter is a reimplementation of the exec.SecretGetter interface in the
|
||||
|
@ -88,17 +88,17 @@ const (
|
|||
TargetPublishPath string = "/data/published"
|
||||
)
|
||||
|
||||
func NewNodePlugin(name string, pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, secrets SecretGetter) NodePlugin {
|
||||
return newNodePlugin(name, pc, pa, secrets)
|
||||
func NewNodePlugin(name string, p plugin.AddrPlugin, secrets SecretGetter) NodePlugin {
|
||||
return newNodePlugin(name, p, secrets)
|
||||
}
|
||||
|
||||
// newNodePlugin returns a raw nodePlugin object, not behind an interface. this
|
||||
// is useful for testing.
|
||||
func newNodePlugin(name string, pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, secrets SecretGetter) *nodePlugin {
|
||||
func newNodePlugin(name string, p plugin.AddrPlugin, secrets SecretGetter) *nodePlugin {
|
||||
return &nodePlugin{
|
||||
name: name,
|
||||
socket: fmt.Sprintf("%s://%s", pa.Addr().Network(), pa.Addr().String()),
|
||||
scopePath: pc.ScopedPath,
|
||||
socket: fmt.Sprintf("%s://%s", p.Addr().Network(), p.Addr().String()),
|
||||
scopePath: p.ScopedPath,
|
||||
secrets: secrets,
|
||||
volumeMap: map[string]*volumePublishStatus{},
|
||||
}
|
||||
|
|
5
vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go
generated
vendored
5
vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go
generated
vendored
|
@ -6,12 +6,11 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
|
||||
"github.com/moby/swarmkit/v2/agent/csi/plugin"
|
||||
"github.com/moby/swarmkit/v2/agent/exec"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
mobyplugin "github.com/moby/swarmkit/v2/node/plugin"
|
||||
"github.com/moby/swarmkit/v2/volumequeue"
|
||||
)
|
||||
|
||||
|
@ -46,7 +45,7 @@ type volumes struct {
|
|||
}
|
||||
|
||||
// NewManager returns a place to store volumes.
|
||||
func NewManager(pg plugingetter.PluginGetter, secrets exec.SecretGetter) exec.VolumesManager {
|
||||
func NewManager(pg mobyplugin.Getter, secrets exec.SecretGetter) exec.VolumesManager {
|
||||
r := &volumes{
|
||||
volumes: map[string]volumeState{},
|
||||
plugins: plugin.NewManager(pg, secrets),
|
||||
|
|
5
vendor/github.com/moby/swarmkit/v2/agent/dependency.go
generated
vendored
5
vendor/github.com/moby/swarmkit/v2/agent/dependency.go
generated
vendored
|
@ -1,13 +1,12 @@
|
|||
package agent
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
|
||||
"github.com/moby/swarmkit/v2/agent/configs"
|
||||
"github.com/moby/swarmkit/v2/agent/csi"
|
||||
"github.com/moby/swarmkit/v2/agent/exec"
|
||||
"github.com/moby/swarmkit/v2/agent/secrets"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
type dependencyManager struct {
|
||||
|
@ -18,7 +17,7 @@ type dependencyManager struct {
|
|||
|
||||
// NewDependencyManager creates a dependency manager object that wraps
|
||||
// objects which provide access to various dependency types.
|
||||
func NewDependencyManager(pg plugingetter.PluginGetter) exec.DependencyManager {
|
||||
func NewDependencyManager(pg plugin.Getter) exec.DependencyManager {
|
||||
d := &dependencyManager{
|
||||
secrets: secrets.NewManager(),
|
||||
configs: configs.NewManager(),
|
||||
|
|
75
vendor/github.com/moby/swarmkit/v2/internal/idm/idm.go
generated
vendored
75
vendor/github.com/moby/swarmkit/v2/internal/idm/idm.go
generated
vendored
|
@ -5,65 +5,80 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/libnetwork/bitmap"
|
||||
"github.com/bits-and-blooms/bitset"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoBitAvailable is returned when no more bits are available to set
|
||||
ErrNoBitAvailable = errors.New("no bit available")
|
||||
// ErrBitAllocated is returned when the specific bit requested is already set
|
||||
ErrBitAllocated = errors.New("requested bit is already allocated")
|
||||
)
|
||||
|
||||
// IDM manages the reservation/release of numerical ids from a contiguous set.
|
||||
//
|
||||
// An IDM instance is not safe for concurrent use.
|
||||
type IDM struct {
|
||||
start uint64
|
||||
end uint64
|
||||
handle *bitmap.Bitmap
|
||||
start, end uint
|
||||
set *bitset.BitSet
|
||||
next uint // index of the bit to start searching for the next serial allocation from (not offset by start)
|
||||
}
|
||||
|
||||
// New returns an instance of id manager for a [start,end] set of numerical ids.
|
||||
func New(start, end uint64) (*IDM, error) {
|
||||
func New(start, end uint) (*IDM, error) {
|
||||
if end <= start {
|
||||
return nil, fmt.Errorf("invalid set range: [%d, %d]", start, end)
|
||||
}
|
||||
|
||||
return &IDM{start: start, end: end, handle: bitmap.New(1 + end - start)}, nil
|
||||
return &IDM{start: start, end: end, set: bitset.New(1 + end - start)}, nil
|
||||
}
|
||||
|
||||
// GetID returns the first available id in the set.
|
||||
func (i *IDM) GetID(serial bool) (uint64, error) {
|
||||
if i.handle == nil {
|
||||
func (i *IDM) GetID(serial bool) (uint, error) {
|
||||
if i.set == nil {
|
||||
return 0, errors.New("ID set is not initialized")
|
||||
}
|
||||
ordinal, err := i.handle.SetAny(serial)
|
||||
return i.start + ordinal, err
|
||||
var (
|
||||
ordinal uint
|
||||
ok bool
|
||||
)
|
||||
if serial && i.next != 0 {
|
||||
ordinal, ok = i.set.NextClear(i.next)
|
||||
if ok {
|
||||
goto found
|
||||
}
|
||||
}
|
||||
ordinal, ok = i.set.NextClear(0)
|
||||
if !ok {
|
||||
return 0, ErrNoBitAvailable
|
||||
}
|
||||
|
||||
found:
|
||||
i.set.Set(ordinal)
|
||||
i.next = ordinal + 1
|
||||
if i.next > i.end-i.start {
|
||||
i.next = 0
|
||||
}
|
||||
return i.start + ordinal, nil
|
||||
}
|
||||
|
||||
// GetSpecificID tries to reserve the specified id.
|
||||
func (i *IDM) GetSpecificID(id uint64) error {
|
||||
if i.handle == nil {
|
||||
func (i *IDM) GetSpecificID(id uint) error {
|
||||
if i.set == nil {
|
||||
return errors.New("ID set is not initialized")
|
||||
}
|
||||
|
||||
if id < i.start || id > i.end {
|
||||
return errors.New("requested id does not belong to the set")
|
||||
}
|
||||
|
||||
return i.handle.Set(id - i.start)
|
||||
if i.set.Test(id - i.start) {
|
||||
return ErrBitAllocated
|
||||
}
|
||||
|
||||
// GetIDInRange returns the first available id in the set within a [start,end] range.
|
||||
func (i *IDM) GetIDInRange(start, end uint64, serial bool) (uint64, error) {
|
||||
if i.handle == nil {
|
||||
return 0, errors.New("ID set is not initialized")
|
||||
}
|
||||
|
||||
if start < i.start || end > i.end {
|
||||
return 0, errors.New("requested range does not belong to the set")
|
||||
}
|
||||
|
||||
ordinal, err := i.handle.SetAnyInRange(start-i.start, end-i.start, serial)
|
||||
|
||||
return i.start + ordinal, err
|
||||
i.set.Set(id - i.start)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release releases the specified id.
|
||||
func (i *IDM) Release(id uint64) {
|
||||
i.handle.Unset(id - i.start)
|
||||
func (i *IDM) Release(id uint) {
|
||||
i.set.Clear(id - i.start)
|
||||
}
|
||||
|
|
21
vendor/github.com/moby/swarmkit/v2/manager/allocator/allocator.go
generated
vendored
21
vendor/github.com/moby/swarmkit/v2/manager/allocator/allocator.go
generated
vendored
|
@ -4,10 +4,9 @@ import (
|
|||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/go-events"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/state"
|
||||
"github.com/moby/swarmkit/v2/manager/state/store"
|
||||
)
|
||||
|
@ -31,11 +30,7 @@ type Allocator struct {
|
|||
// doneChan is closed when the allocator is finished running.
|
||||
doneChan chan struct{}
|
||||
|
||||
// pluginGetter provides access to docker's plugin inventory.
|
||||
pluginGetter plugingetter.PluginGetter
|
||||
|
||||
// networkConfig stores network related config for the cluster
|
||||
networkConfig *cnmallocator.NetworkConfig
|
||||
nwkAllocator networkallocator.NetworkAllocator
|
||||
}
|
||||
|
||||
// taskBallot controls how the voting for task allocation is
|
||||
|
@ -69,19 +64,19 @@ type allocActor struct {
|
|||
|
||||
// New returns a new instance of Allocator for use during allocation
|
||||
// stage of the manager.
|
||||
func New(store *store.MemoryStore, pg plugingetter.PluginGetter, netConfig *cnmallocator.NetworkConfig) (*Allocator, error) {
|
||||
a := &Allocator{
|
||||
func New(store *store.MemoryStore, na networkallocator.NetworkAllocator) *Allocator {
|
||||
if na == nil {
|
||||
na = networkallocator.Inert{}
|
||||
}
|
||||
return &Allocator{
|
||||
store: store,
|
||||
taskBallot: &taskBallot{
|
||||
votes: make(map[string][]string),
|
||||
},
|
||||
stopChan: make(chan struct{}),
|
||||
doneChan: make(chan struct{}),
|
||||
pluginGetter: pg,
|
||||
networkConfig: netConfig,
|
||||
nwkAllocator: na,
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Run starts all allocator go-routines and waits for Stop to be called.
|
||||
|
|
2209
vendor/github.com/moby/swarmkit/v2/manager/allocator/allocator_test_suite.go
generated
vendored
Normal file
2209
vendor/github.com/moby/swarmkit/v2/manager/allocator/allocator_test_suite.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
89
vendor/github.com/moby/swarmkit/v2/manager/allocator/allocator_test_suite_linux.go
generated
vendored
Normal file
89
vendor/github.com/moby/swarmkit/v2/manager/allocator/allocator_test_suite_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
package allocator
|
||||
|
||||
import (
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/manager/state"
|
||||
"github.com/moby/swarmkit/v2/manager/state/store"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func (suite *testSuite) TestIPAMNotNil() {
|
||||
s := store.NewMemoryStore(nil)
|
||||
suite.NotNil(s)
|
||||
defer s.Close()
|
||||
|
||||
a := suite.newAllocator(s)
|
||||
|
||||
// Predefined node-local network
|
||||
p := &api.Network{
|
||||
ID: "one_unIque_id",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "pred_bridge_network",
|
||||
Labels: map[string]string{
|
||||
"com.docker.swarm.predefined": "true",
|
||||
},
|
||||
},
|
||||
DriverConfig: &api.Driver{Name: "bridge"},
|
||||
},
|
||||
}
|
||||
|
||||
// Node-local swarm scope network
|
||||
nln := &api.Network{
|
||||
ID: "another_unIque_id",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "swarm-macvlan",
|
||||
},
|
||||
DriverConfig: &api.Driver{Name: "macvlan"},
|
||||
},
|
||||
}
|
||||
|
||||
// Try adding some objects to store before allocator is started
|
||||
suite.NoError(s.Update(func(tx store.Tx) error {
|
||||
// populate ingress network
|
||||
in := &api.Network{
|
||||
ID: "ingress-nw-id",
|
||||
Spec: api.NetworkSpec{
|
||||
Annotations: api.Annotations{
|
||||
Name: "default-ingress",
|
||||
},
|
||||
Ingress: true,
|
||||
},
|
||||
}
|
||||
suite.NoError(store.CreateNetwork(tx, in))
|
||||
|
||||
// Create the predefined node-local network with one service
|
||||
suite.NoError(store.CreateNetwork(tx, p))
|
||||
|
||||
// Create the the swarm level node-local network with one service
|
||||
suite.NoError(store.CreateNetwork(tx, nln))
|
||||
|
||||
return nil
|
||||
}))
|
||||
|
||||
netWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateNetwork{}, api.EventDeleteNetwork{})
|
||||
defer cancel()
|
||||
|
||||
defer suite.startAllocator(a)()
|
||||
|
||||
// Now verify if we get network and tasks updated properly
|
||||
watchNetwork(suite.T(), netWatch, false, func(t assert.TestingT, n *api.Network) bool { return true })
|
||||
watchNetwork(suite.T(), netWatch, false, func(t assert.TestingT, n *api.Network) bool { return true })
|
||||
watchNetwork(suite.T(), netWatch, false, func(t assert.TestingT, n *api.Network) bool { return true })
|
||||
|
||||
// Verify no allocation was done for the node-local networks
|
||||
var (
|
||||
ps *api.Network
|
||||
sn *api.Network
|
||||
)
|
||||
s.View(func(tx store.ReadTx) {
|
||||
ps = store.GetNetwork(tx, p.ID)
|
||||
sn = store.GetNetwork(tx, nln.ID)
|
||||
|
||||
})
|
||||
suite.NotNil(ps)
|
||||
suite.NotNil(sn)
|
||||
suite.NotNil(ps.IPAM)
|
||||
suite.NotNil(sn.IPAM)
|
||||
}
|
103
vendor/github.com/moby/swarmkit/v2/manager/allocator/network.go
generated
vendored
103
vendor/github.com/moby/swarmkit/v2/manager/allocator/network.go
generated
vendored
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/docker/go-events"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/state"
|
||||
"github.com/moby/swarmkit/v2/manager/state/store"
|
||||
|
@ -37,6 +36,9 @@ type networkContext struct {
|
|||
// the actual network allocation.
|
||||
nwkAllocator networkallocator.NetworkAllocator
|
||||
|
||||
// The port allocator instance for allocating node ports
|
||||
portAllocator *portAllocator
|
||||
|
||||
// A set of tasks which are ready to be allocated as a batch. This is
|
||||
// distinct from "unallocatedTasks" which are tasks that failed to
|
||||
// allocate on the first try, being held for a future retry.
|
||||
|
@ -68,33 +70,9 @@ type networkContext struct {
|
|||
}
|
||||
|
||||
func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
||||
var netConfig *cnmallocator.NetworkConfig
|
||||
// There are two ways user can invoke swarm init
|
||||
// with default address pool & vxlan port or with only vxlan port
|
||||
// hence we need two different way to construct netconfig
|
||||
if a.networkConfig != nil {
|
||||
if a.networkConfig.DefaultAddrPool != nil {
|
||||
netConfig = &cnmallocator.NetworkConfig{
|
||||
DefaultAddrPool: a.networkConfig.DefaultAddrPool,
|
||||
SubnetSize: a.networkConfig.SubnetSize,
|
||||
VXLANUDPPort: a.networkConfig.VXLANUDPPort,
|
||||
}
|
||||
} else if a.networkConfig.VXLANUDPPort != 0 {
|
||||
netConfig = &cnmallocator.NetworkConfig{
|
||||
DefaultAddrPool: nil,
|
||||
SubnetSize: 0,
|
||||
VXLANUDPPort: a.networkConfig.VXLANUDPPort,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
na, err := cnmallocator.New(a.pluginGetter, netConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nc := &networkContext{
|
||||
nwkAllocator: na,
|
||||
nwkAllocator: a.nwkAllocator,
|
||||
portAllocator: newPortAllocator(),
|
||||
pendingTasks: make(map[string]*api.Task),
|
||||
unallocatedTasks: make(map[string]*api.Task),
|
||||
unallocatedServices: make(map[string]*api.Service),
|
||||
|
@ -119,7 +97,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
|||
// Try to complete ingress network allocation before anything else so
|
||||
// that the we can get the preferred subnet for ingress network.
|
||||
nc.ingressNetwork = ingressNetwork
|
||||
if !na.IsAllocated(nc.ingressNetwork) {
|
||||
if !nc.nwkAllocator.IsAllocated(nc.ingressNetwork) {
|
||||
if err := a.allocateNetwork(ctx, nc.ingressNetwork); err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed allocating ingress network during init")
|
||||
} else if err := a.store.Batch(func(batch *store.Batch) error {
|
||||
|
@ -233,7 +211,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if nc.isServiceAllocated(s) {
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -261,8 +239,8 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if !nc.nwkAllocator.HostPublishPortsNeedUpdate(s) {
|
||||
if nc.isServiceAllocated(s) {
|
||||
if !nc.portAllocator.hostPublishPortsNeedUpdate(s) {
|
||||
break
|
||||
}
|
||||
updatePortsInHostPublishMode(s)
|
||||
|
@ -284,7 +262,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
case api.EventDeleteService:
|
||||
s := v.Service.Copy()
|
||||
|
||||
if err := nc.nwkAllocator.DeallocateService(s); err != nil {
|
||||
if err := nc.deallocateService(s); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Failed deallocation during delete of service %s", s.ID)
|
||||
} else {
|
||||
nc.somethingWasDeallocated = true
|
||||
|
@ -681,7 +659,7 @@ func (a *Allocator) allocateServices(ctx context.Context, existingAddressesOnly
|
|||
|
||||
var allocatedServices []*api.Service
|
||||
for _, s := range services {
|
||||
if nc.nwkAllocator.IsServiceAllocated(s, networkallocator.OnInit) {
|
||||
if nc.isServiceAllocated(s, networkallocator.OnInit) {
|
||||
continue
|
||||
}
|
||||
if existingAddressesOnly &&
|
||||
|
@ -713,6 +691,23 @@ func (a *Allocator) allocateServices(ctx context.Context, existingAddressesOnly
|
|||
return nil
|
||||
}
|
||||
|
||||
// isServiceAllocated returns false if the passed service needs to have network resources allocated/updated.
|
||||
func (nc *networkContext) isServiceAllocated(s *api.Service, flags ...func(*networkallocator.ServiceAllocationOpts)) bool {
|
||||
if !nc.nwkAllocator.IsServiceAllocated(s, flags...) {
|
||||
return false
|
||||
}
|
||||
|
||||
var options networkallocator.ServiceAllocationOpts
|
||||
for _, flag := range flags {
|
||||
flag(&options)
|
||||
}
|
||||
if (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||
|
||||
(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {
|
||||
return nc.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// allocateTasks allocates tasks in the store so far before we started watching.
|
||||
func (a *Allocator) allocateTasks(ctx context.Context, existingAddressesOnly bool) error {
|
||||
var (
|
||||
|
@ -815,7 +810,7 @@ func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bo
|
|||
// network configured or service endpoints have been
|
||||
// allocated.
|
||||
return (len(t.Networks) == 0 || nc.nwkAllocator.IsTaskAllocated(t)) &&
|
||||
(s == nil || nc.nwkAllocator.IsServiceAllocated(s))
|
||||
(s == nil || nc.isServiceAllocated(s))
|
||||
}
|
||||
|
||||
func taskUpdateNetworks(t *api.Task, networks []*api.NetworkAttachment) {
|
||||
|
@ -1200,13 +1195,13 @@ func (a *Allocator) allocateService(ctx context.Context, s *api.Service, existin
|
|||
// is not there
|
||||
// service has no user-defined endpoints while has already allocated network resources,
|
||||
// need deallocated.
|
||||
if err := nc.nwkAllocator.DeallocateService(s); err != nil {
|
||||
if err := nc.deallocateService(s); err != nil {
|
||||
return err
|
||||
}
|
||||
nc.somethingWasDeallocated = true
|
||||
}
|
||||
|
||||
if err := nc.nwkAllocator.AllocateService(s); err != nil {
|
||||
if err := nc.allocateService(s); err != nil {
|
||||
nc.unallocatedServices[s.ID] = s
|
||||
return err
|
||||
}
|
||||
|
@ -1229,6 +1224,26 @@ func (a *Allocator) allocateService(ctx context.Context, s *api.Service, existin
|
|||
return nil
|
||||
}
|
||||
|
||||
func (nc *networkContext) allocateService(s *api.Service) error {
|
||||
if err := nc.portAllocator.serviceAllocatePorts(s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := nc.nwkAllocator.AllocateService(s); err != nil {
|
||||
nc.portAllocator.serviceDeallocatePorts(s)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *networkContext) deallocateService(s *api.Service) error {
|
||||
if err := nc.nwkAllocator.DeallocateService(s); err != nil {
|
||||
return err
|
||||
}
|
||||
nc.portAllocator.serviceDeallocatePorts(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Allocator) commitAllocatedService(ctx context.Context, batch *store.Batch, s *api.Service) error {
|
||||
if err := batch.Update(func(tx store.Tx) error {
|
||||
err := store.UpdateService(tx, s)
|
||||
|
@ -1241,7 +1256,7 @@ func (a *Allocator) commitAllocatedService(ctx context.Context, batch *store.Bat
|
|||
|
||||
return errors.Wrapf(err, "failed updating state in store transaction for service %s", s.ID)
|
||||
}); err != nil {
|
||||
if err := a.netCtx.nwkAllocator.DeallocateService(s); err != nil {
|
||||
if err := a.netCtx.deallocateService(s); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed rolling back allocation of service %s", s.ID)
|
||||
}
|
||||
|
||||
|
@ -1298,7 +1313,7 @@ func (a *Allocator) allocateTask(ctx context.Context, t *api.Task) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if !nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if !nc.isServiceAllocated(s) {
|
||||
err = fmt.Errorf("service %s to which task %s belongs has pending allocations", s.ID, t.ID)
|
||||
return
|
||||
}
|
||||
|
@ -1423,7 +1438,7 @@ func (a *Allocator) procUnallocatedServices(ctx context.Context) {
|
|||
nc := a.netCtx
|
||||
var allocatedServices []*api.Service
|
||||
for _, s := range nc.unallocatedServices {
|
||||
if !nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if !nc.isServiceAllocated(s) {
|
||||
if err := a.allocateService(ctx, s, false); err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated service %s", s.ID)
|
||||
continue
|
||||
|
@ -1509,16 +1524,6 @@ func (a *Allocator) procTasksNetwork(ctx context.Context, onRetry bool) {
|
|||
}
|
||||
}
|
||||
|
||||
// IsBuiltInNetworkDriver returns whether the passed driver is an internal network driver
|
||||
func IsBuiltInNetworkDriver(name string) bool {
|
||||
return cnmallocator.IsBuiltInDriver(name)
|
||||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures for a given network model
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return cnmallocator.PredefinedNetworks()
|
||||
}
|
||||
|
||||
// updateTaskStatus sets TaskStatus and updates timestamp.
|
||||
func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
|
||||
t.Status = api.TaskStatus{
|
||||
|
|
129
vendor/github.com/moby/swarmkit/v2/manager/allocator/networkallocator/inert.go
generated
vendored
Normal file
129
vendor/github.com/moby/swarmkit/v2/manager/allocator/networkallocator/inert.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
package networkallocator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// InertProvider is a network allocator [Provider] which does not allocate networks.
|
||||
type InertProvider struct{}
|
||||
|
||||
var _ Provider = InertProvider{}
|
||||
|
||||
// NewAllocator returns an instance of [Inert].
|
||||
func (InertProvider) NewAllocator(*Config) (NetworkAllocator, error) {
|
||||
return Inert{}, nil
|
||||
}
|
||||
|
||||
// PredefinedNetworks returns a nil slice.
|
||||
func (InertProvider) PredefinedNetworks() []PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDefaultVXLANUDPPort is a no-op.
|
||||
func (InertProvider) SetDefaultVXLANUDPPort(uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateIPAMDriver returns an InvalidArgument error unless d is nil.
|
||||
func (InertProvider) ValidateIPAMDriver(d *api.Driver) error {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return status.Errorf(codes.InvalidArgument, "IPAM drivers are unavailable")
|
||||
}
|
||||
|
||||
// ValidateIngressNetworkDriver returns an InvalidArgument error unless d is nil.
|
||||
func (InertProvider) ValidateIngressNetworkDriver(d *api.Driver) error {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return status.Errorf(codes.InvalidArgument, "ingress network drivers are unavailable")
|
||||
}
|
||||
|
||||
// ValidateNetworkDriver returns an InvalidArgument error unless d is nil.
|
||||
func (InertProvider) ValidateNetworkDriver(d *api.Driver) error {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return status.Errorf(codes.InvalidArgument, "ingress network drivers are unavailable")
|
||||
}
|
||||
|
||||
// Inert is a [NetworkAllocator] which does not allocate networks.
|
||||
type Inert struct{}
|
||||
|
||||
var _ NetworkAllocator = Inert{}
|
||||
|
||||
var errUnavailable = errors.New("network support is unavailable")
|
||||
|
||||
// Allocate returns an error unless n.Spec.Ingress is true.
|
||||
func (Inert) Allocate(n *api.Network) error {
|
||||
if n.Spec.Ingress {
|
||||
return nil
|
||||
}
|
||||
return errUnavailable
|
||||
}
|
||||
|
||||
// AllocateAttachment unconditionally returns an error.
|
||||
func (Inert) AllocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error {
|
||||
return errUnavailable
|
||||
}
|
||||
|
||||
// AllocateService succeeds iff the service specifies no network attachments.
|
||||
func (Inert) AllocateService(s *api.Service) error {
|
||||
if len(s.Spec.Task.Networks) > 0 || len(s.Spec.Networks) > 0 {
|
||||
return errUnavailable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllocateTask succeeds iff the task specifies no network attachments.
|
||||
func (Inert) AllocateTask(t *api.Task) error {
|
||||
if len(t.Spec.Networks) > 0 {
|
||||
return errUnavailable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deallocate does nothing, successfully.
|
||||
func (Inert) Deallocate(n *api.Network) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateAttachment does nothing, successfully.
|
||||
func (Inert) DeallocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateService does nothing, successfully.
|
||||
func (Inert) DeallocateService(s *api.Service) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateTask does nothing, successfully.
|
||||
func (Inert) DeallocateTask(t *api.Task) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsAllocated returns true iff [Inert.Allocate] would return nil.
|
||||
func (Inert) IsAllocated(n *api.Network) bool {
|
||||
return (Inert{}).Allocate(n) == nil
|
||||
}
|
||||
|
||||
// IsAttachmentAllocated returns false.
|
||||
func (Inert) IsAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsServiceAllocated returns true iff [Inert.AllocateService] would return nil.
|
||||
func (Inert) IsServiceAllocated(s *api.Service, flags ...func(*ServiceAllocationOpts)) bool {
|
||||
return (Inert{}).AllocateService(s) == nil
|
||||
}
|
||||
|
||||
// IsTaskAllocated returns true iff [Inert.AllocateTask] would return nil.
|
||||
func (Inert) IsTaskAllocated(t *api.Task) bool {
|
||||
return (Inert{}).AllocateTask(t) == nil
|
||||
}
|
|
@ -61,10 +61,6 @@ type NetworkAllocator interface {
|
|||
// virtual IP and ports associated with the service.
|
||||
DeallocateService(s *api.Service) error
|
||||
|
||||
// HostPublishPortsNeedUpdate returns true if the passed service needs
|
||||
// allocations for its published ports in host (non ingress) mode
|
||||
HostPublishPortsNeedUpdate(s *api.Service) bool
|
||||
|
||||
//
|
||||
// Task Allocation
|
||||
//
|
||||
|
@ -91,6 +87,35 @@ type NetworkAllocator interface {
|
|||
IsAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool
|
||||
}
|
||||
|
||||
// Config is used to store network related cluster config in the Manager.
|
||||
type Config struct {
|
||||
// DefaultAddrPool specifies default subnet pool for global scope networks
|
||||
DefaultAddrPool []string
|
||||
|
||||
// SubnetSize specifies the subnet size of the networks created from
|
||||
// the default subnet pool
|
||||
SubnetSize uint32
|
||||
|
||||
// VXLANUDPPort specifies the UDP port number for VXLAN traffic
|
||||
VXLANUDPPort uint32
|
||||
}
|
||||
|
||||
// DriverValidator validates whether a network driver spec is supported by the
|
||||
// network provider.
|
||||
type DriverValidator interface {
|
||||
ValidateNetworkDriver(*api.Driver) error
|
||||
ValidateIngressNetworkDriver(*api.Driver) error
|
||||
ValidateIPAMDriver(*api.Driver) error
|
||||
}
|
||||
|
||||
// Provider provides network allocation functionality.
|
||||
type Provider interface {
|
||||
DriverValidator
|
||||
PredefinedNetworks() []PredefinedNetworkData
|
||||
SetDefaultVXLANUDPPort(uint32) error
|
||||
NewAllocator(*Config) (NetworkAllocator, error)
|
||||
}
|
||||
|
||||
// IsIngressNetwork check if the network is an ingress network
|
||||
func IsIngressNetwork(nw *api.Network) bool {
|
||||
if nw.Spec.Ingress {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package cnmallocator
|
||||
package allocator
|
||||
|
||||
import (
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
|
@ -101,36 +101,31 @@ func (ps allocatedPorts) delState(p *api.PortConfig) *api.PortConfig {
|
|||
return nil
|
||||
}
|
||||
|
||||
func newPortAllocator() (*portAllocator, error) {
|
||||
func newPortAllocator() *portAllocator {
|
||||
portSpaces := make(map[api.PortConfig_Protocol]*portSpace)
|
||||
for _, protocol := range []api.PortConfig_Protocol{api.ProtocolTCP, api.ProtocolUDP, api.ProtocolSCTP} {
|
||||
ps, err := newPortSpace(protocol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
portSpaces[protocol] = newPortSpace(protocol)
|
||||
}
|
||||
|
||||
portSpaces[protocol] = ps
|
||||
return &portAllocator{portSpaces: portSpaces}
|
||||
}
|
||||
|
||||
return &portAllocator{portSpaces: portSpaces}, nil
|
||||
}
|
||||
|
||||
func newPortSpace(protocol api.PortConfig_Protocol) (*portSpace, error) {
|
||||
func newPortSpace(protocol api.PortConfig_Protocol) *portSpace {
|
||||
master, err := idm.New(masterPortStart, masterPortEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dynamic, err := idm.New(dynamicPortStart, dynamicPortEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &portSpace{
|
||||
protocol: protocol,
|
||||
masterPortSpace: master,
|
||||
dynamicPortSpace: dynamic,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getPortConfigKey returns a map key for doing set operations with
|
||||
|
@ -377,18 +372,18 @@ func (ps *portSpace) allocate(p *api.PortConfig) (err error) {
|
|||
// If it falls in the dynamic port range check out
|
||||
// from dynamic port space first.
|
||||
if p.PublishedPort >= dynamicPortStart && p.PublishedPort <= dynamicPortEnd {
|
||||
if err = ps.dynamicPortSpace.GetSpecificID(uint64(p.PublishedPort)); err != nil {
|
||||
if err = ps.dynamicPortSpace.GetSpecificID(uint(p.PublishedPort)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
ps.dynamicPortSpace.Release(uint64(p.PublishedPort))
|
||||
ps.dynamicPortSpace.Release(uint(p.PublishedPort))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return ps.masterPortSpace.GetSpecificID(uint64(p.PublishedPort))
|
||||
return ps.masterPortSpace.GetSpecificID(uint(p.PublishedPort))
|
||||
}
|
||||
|
||||
// Check out an arbitrary port from dynamic port space.
|
||||
|
@ -413,8 +408,8 @@ func (ps *portSpace) allocate(p *api.PortConfig) (err error) {
|
|||
|
||||
func (ps *portSpace) free(p *api.PortConfig) {
|
||||
if p.PublishedPort >= dynamicPortStart && p.PublishedPort <= dynamicPortEnd {
|
||||
ps.dynamicPortSpace.Release(uint64(p.PublishedPort))
|
||||
ps.dynamicPortSpace.Release(uint(p.PublishedPort))
|
||||
}
|
||||
|
||||
ps.masterPortSpace.Release(uint64(p.PublishedPort))
|
||||
ps.masterPortSpace.Release(uint(p.PublishedPort))
|
||||
}
|
44
vendor/github.com/moby/swarmkit/v2/manager/controlapi/common.go
generated
vendored
44
vendor/github.com/moby/swarmkit/v2/manager/controlapi/common.go
generated
vendored
|
@ -4,11 +4,7 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator"
|
||||
"github.com/moby/swarmkit/v2/manager/state/store"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
@ -93,43 +89,3 @@ func validateConfigOrSecretAnnotations(m api.Annotations) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDriver(driver *api.Driver, pg plugingetter.PluginGetter, pluginType string) error {
|
||||
if driver == nil {
|
||||
// It is ok to not specify the driver. We will choose
|
||||
// a default driver.
|
||||
return nil
|
||||
}
|
||||
|
||||
if driver.Name == "" {
|
||||
return status.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required")
|
||||
}
|
||||
|
||||
// First check against the known drivers
|
||||
switch pluginType {
|
||||
case ipamapi.PluginEndpointType:
|
||||
if strings.ToLower(driver.Name) == ipamapi.DefaultIPAM {
|
||||
return nil
|
||||
}
|
||||
case driverapi.NetworkPluginEndpointType:
|
||||
if allocator.IsBuiltInNetworkDriver(driver.Name) {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
if pg == nil {
|
||||
return status.Errorf(codes.InvalidArgument, "plugin %s not supported", driver.Name)
|
||||
}
|
||||
|
||||
p, err := pg.Get(driver.Name, pluginType, plugingetter.Lookup)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "error during lookup of plugin %s", driver.Name)
|
||||
}
|
||||
|
||||
if p.IsV1() {
|
||||
return status.Errorf(codes.InvalidArgument, "legacy plugin %s of type %s is not supported in swarm mode", driver.Name, pluginType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
26
vendor/github.com/moby/swarmkit/v2/manager/controlapi/network.go
generated
vendored
26
vendor/github.com/moby/swarmkit/v2/manager/controlapi/network.go
generated
vendored
|
@ -4,9 +4,6 @@ import (
|
|||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/identity"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator"
|
||||
|
@ -51,14 +48,14 @@ func validateIPAMConfiguration(ipamConf *api.IPAMConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateIPAM(ipam *api.IPAMOptions, pg plugingetter.PluginGetter) error {
|
||||
func (s *Server) validateIPAM(ipam *api.IPAMOptions) error {
|
||||
if ipam == nil {
|
||||
// It is ok to not specify any IPAM configurations. We
|
||||
// will choose good defaults.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := validateDriver(ipam.Driver, pg, ipamapi.PluginEndpointType); err != nil {
|
||||
if err := s.netvalidator.ValidateIPAMDriver(ipam.Driver); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -71,15 +68,11 @@ func validateIPAM(ipam *api.IPAMOptions, pg plugingetter.PluginGetter) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateNetworkSpec(spec *api.NetworkSpec, pg plugingetter.PluginGetter) error {
|
||||
func (s *Server) validateNetworkSpec(spec *api.NetworkSpec) error {
|
||||
if spec == nil {
|
||||
return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
|
||||
}
|
||||
|
||||
if spec.Ingress && spec.DriverConfig != nil && spec.DriverConfig.Name != "overlay" {
|
||||
return status.Errorf(codes.Unimplemented, "only overlay driver is currently supported for ingress network")
|
||||
}
|
||||
|
||||
if spec.Attachable && spec.Ingress {
|
||||
return status.Errorf(codes.InvalidArgument, "ingress network cannot be attachable")
|
||||
}
|
||||
|
@ -92,18 +85,25 @@ func validateNetworkSpec(spec *api.NetworkSpec, pg plugingetter.PluginGetter) er
|
|||
return status.Errorf(codes.PermissionDenied, "label %s is for internally created predefined networks and cannot be applied by users",
|
||||
networkallocator.PredefinedLabel)
|
||||
}
|
||||
if err := validateDriver(spec.DriverConfig, pg, driverapi.NetworkPluginEndpointType); err != nil {
|
||||
|
||||
var err error
|
||||
if spec.Ingress {
|
||||
err = s.netvalidator.ValidateIngressNetworkDriver(spec.DriverConfig)
|
||||
} else {
|
||||
err = s.netvalidator.ValidateNetworkDriver(spec.DriverConfig)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return validateIPAM(spec.IPAM, pg)
|
||||
return s.validateIPAM(spec.IPAM)
|
||||
}
|
||||
|
||||
// CreateNetwork creates and returns a Network based on the provided NetworkSpec.
|
||||
// - Returns `InvalidArgument` if the NetworkSpec is malformed.
|
||||
// - Returns an error if the creation fails.
|
||||
func (s *Server) CreateNetwork(ctx context.Context, request *api.CreateNetworkRequest) (*api.CreateNetworkResponse, error) {
|
||||
if err := validateNetworkSpec(request.Spec, s.pg); err != nil {
|
||||
if err := s.validateNetworkSpec(request.Spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
11
vendor/github.com/moby/swarmkit/v2/manager/controlapi/server.go
generated
vendored
11
vendor/github.com/moby/swarmkit/v2/manager/controlapi/server.go
generated
vendored
|
@ -3,8 +3,8 @@ package controlapi
|
|||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/ca"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/drivers"
|
||||
"github.com/moby/swarmkit/v2/manager/state/raft"
|
||||
"github.com/moby/swarmkit/v2/manager/state/store"
|
||||
|
@ -19,17 +19,20 @@ type Server struct {
|
|||
store *store.MemoryStore
|
||||
raft *raft.Node
|
||||
securityConfig *ca.SecurityConfig
|
||||
pg plugingetter.PluginGetter
|
||||
netvalidator networkallocator.DriverValidator
|
||||
dr *drivers.DriverProvider
|
||||
}
|
||||
|
||||
// NewServer creates a Cluster API server.
|
||||
func NewServer(store *store.MemoryStore, raft *raft.Node, securityConfig *ca.SecurityConfig, pg plugingetter.PluginGetter, dr *drivers.DriverProvider) *Server {
|
||||
func NewServer(store *store.MemoryStore, raft *raft.Node, securityConfig *ca.SecurityConfig, nv networkallocator.DriverValidator, dr *drivers.DriverProvider) *Server {
|
||||
if nv == nil {
|
||||
nv = networkallocator.InertProvider{}
|
||||
}
|
||||
return &Server{
|
||||
store: store,
|
||||
dr: dr,
|
||||
raft: raft,
|
||||
securityConfig: securityConfig,
|
||||
pg: pg,
|
||||
netvalidator: nv,
|
||||
}
|
||||
}
|
||||
|
|
14
vendor/github.com/moby/swarmkit/v2/manager/csi/manager.go
generated
vendored
14
vendor/github.com/moby/swarmkit/v2/manager/csi/manager.go
generated
vendored
|
@ -7,12 +7,12 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/go-events"
|
||||
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/manager/state/store"
|
||||
mobyplugin "github.com/moby/swarmkit/v2/node/plugin"
|
||||
"github.com/moby/swarmkit/v2/volumequeue"
|
||||
)
|
||||
|
||||
|
@ -36,12 +36,12 @@ type Manager struct {
|
|||
|
||||
// pg is the plugingetter, which allows us to access the Docker Engine's
|
||||
// plugin store.
|
||||
pg plugingetter.PluginGetter
|
||||
pg mobyplugin.Getter
|
||||
|
||||
// newPlugin is a function which returns an object implementing the Plugin
|
||||
// interface. It allows us to swap out the implementation of plugins while
|
||||
// unit-testing the Manager
|
||||
newPlugin func(pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, provider SecretProvider) Plugin
|
||||
newPlugin func(p mobyplugin.AddrPlugin, provider SecretProvider) Plugin
|
||||
|
||||
// synchronization for starting and stopping the Manager
|
||||
startOnce sync.Once
|
||||
|
@ -55,7 +55,7 @@ type Manager struct {
|
|||
pendingVolumes *volumequeue.VolumeQueue
|
||||
}
|
||||
|
||||
func NewManager(s *store.MemoryStore, pg plugingetter.PluginGetter) *Manager {
|
||||
func NewManager(s *store.MemoryStore, pg mobyplugin.Getter) *Manager {
|
||||
return &Manager{
|
||||
store: s,
|
||||
stopChan: make(chan struct{}),
|
||||
|
@ -469,7 +469,7 @@ func (vm *Manager) getPlugin(name string) (Plugin, error) {
|
|||
}
|
||||
|
||||
// otherwise, we need to load the plugin.
|
||||
pc, err := vm.pg.Get(name, DockerCSIPluginCap, plugingetter.Lookup)
|
||||
pc, err := vm.pg.Get(name, DockerCSIPluginCap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -478,12 +478,12 @@ func (vm *Manager) getPlugin(name string) (Plugin, error) {
|
|||
return nil, errors.New("driver \"" + name + "\" not found")
|
||||
}
|
||||
|
||||
pa, ok := pc.(plugingetter.PluginAddr)
|
||||
pa, ok := pc.(mobyplugin.AddrPlugin)
|
||||
if !ok {
|
||||
return nil, errors.New("plugin for driver \"" + name + "\" does not implement PluginAddr")
|
||||
}
|
||||
|
||||
p := vm.newPlugin(pc, pa, vm.provider)
|
||||
p := vm.newPlugin(pa, vm.provider)
|
||||
vm.plugins[name] = p
|
||||
|
||||
return p, nil
|
||||
|
|
8
vendor/github.com/moby/swarmkit/v2/manager/csi/plugin.go
generated
vendored
8
vendor/github.com/moby/swarmkit/v2/manager/csi/plugin.go
generated
vendored
|
@ -10,10 +10,10 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/internal/csi/capability"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
mobyplugin "github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
// Plugin is the interface for a CSI controller plugin.
|
||||
|
@ -74,12 +74,12 @@ type plugin struct {
|
|||
// the same object. By taking both parts here, we can push off the work of
|
||||
// assuring that the given plugin implements the PluginAddr interface without
|
||||
// having to typecast in this constructor.
|
||||
func NewPlugin(pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, provider SecretProvider) Plugin {
|
||||
func NewPlugin(p mobyplugin.AddrPlugin, provider SecretProvider) Plugin {
|
||||
return &plugin{
|
||||
name: pc.Name(),
|
||||
name: p.Name(),
|
||||
// TODO(dperny): verify that we do not need to include the Network()
|
||||
// portion of the Addr.
|
||||
socket: fmt.Sprintf("%s://%s", pa.Addr().Network(), pa.Addr().String()),
|
||||
socket: fmt.Sprintf("%s://%s", p.Addr().Network(), p.Addr().String()),
|
||||
provider: provider,
|
||||
swarmToCSI: map[string]string{},
|
||||
csiToSwarm: map[string]string{},
|
||||
|
|
8
vendor/github.com/moby/swarmkit/v2/manager/drivers/provider.go
generated
vendored
8
vendor/github.com/moby/swarmkit/v2/manager/drivers/provider.go
generated
vendored
|
@ -3,17 +3,17 @@ package drivers
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
// DriverProvider provides external drivers
|
||||
type DriverProvider struct {
|
||||
pluginGetter plugingetter.PluginGetter
|
||||
pluginGetter plugin.Getter
|
||||
}
|
||||
|
||||
// New returns a new driver provider
|
||||
func New(pluginGetter plugingetter.PluginGetter) *DriverProvider {
|
||||
func New(pluginGetter plugin.Getter) *DriverProvider {
|
||||
return &DriverProvider{pluginGetter: pluginGetter}
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ func (m *DriverProvider) NewSecretDriver(driver *api.Driver) (*SecretDriver, err
|
|||
return nil, fmt.Errorf("driver specification is nil")
|
||||
}
|
||||
// Search for the specified plugin
|
||||
plugin, err := m.pluginGetter.Get(driver.Name, SecretsProviderCapability, plugingetter.Lookup)
|
||||
plugin, err := m.pluginGetter.Get(driver.Name, SecretsProviderCapability)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
6
vendor/github.com/moby/swarmkit/v2/manager/drivers/secrets.go
generated
vendored
6
vendor/github.com/moby/swarmkit/v2/manager/drivers/secrets.go
generated
vendored
|
@ -3,9 +3,9 @@ package drivers
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/moby/swarmkit/v2/api"
|
||||
"github.com/moby/swarmkit/v2/api/naming"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -18,11 +18,11 @@ const (
|
|||
|
||||
// SecretDriver provides secrets from different stores
|
||||
type SecretDriver struct {
|
||||
plugin plugingetter.CompatPlugin
|
||||
plugin plugin.Plugin
|
||||
}
|
||||
|
||||
// NewSecretDriver creates a new driver that provides third party secrets
|
||||
func NewSecretDriver(plugin plugingetter.CompatPlugin) *SecretDriver {
|
||||
func NewSecretDriver(plugin plugin.Plugin) *SecretDriver {
|
||||
return &SecretDriver{plugin: plugin}
|
||||
}
|
||||
|
||||
|
|
27
vendor/github.com/moby/swarmkit/v2/manager/manager.go
generated
vendored
27
vendor/github.com/moby/swarmkit/v2/manager/manager.go
generated
vendored
|
@ -13,7 +13,6 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/go-events"
|
||||
gmetrics "github.com/docker/go-metrics"
|
||||
gogotypes "github.com/gogo/protobuf/types"
|
||||
|
@ -24,7 +23,6 @@ import (
|
|||
"github.com/moby/swarmkit/v2/identity"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/controlapi"
|
||||
"github.com/moby/swarmkit/v2/manager/csi"
|
||||
|
@ -46,6 +44,7 @@ import (
|
|||
"github.com/moby/swarmkit/v2/manager/state/raft/transport"
|
||||
"github.com/moby/swarmkit/v2/manager/state/store"
|
||||
"github.com/moby/swarmkit/v2/manager/watchapi"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
"github.com/moby/swarmkit/v2/remotes"
|
||||
"github.com/moby/swarmkit/v2/xnet"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -124,7 +123,7 @@ type Config struct {
|
|||
Availability api.NodeSpec_Availability
|
||||
|
||||
// PluginGetter provides access to docker's plugin inventory.
|
||||
PluginGetter plugingetter.PluginGetter
|
||||
PluginGetter plugin.Getter
|
||||
|
||||
// FIPS is a boolean stating whether the node is FIPS enabled - if this is the
|
||||
// first node in the cluster, this setting is used to set the cluster-wide mandatory
|
||||
|
@ -132,7 +131,16 @@ type Config struct {
|
|||
FIPS bool
|
||||
|
||||
// NetworkConfig stores network related config for the cluster
|
||||
NetworkConfig *cnmallocator.NetworkConfig
|
||||
NetworkConfig *networkallocator.Config
|
||||
|
||||
NetworkProvider networkallocator.Provider
|
||||
}
|
||||
|
||||
func (c *Config) networkProvider() networkallocator.Provider {
|
||||
if c.NetworkProvider == nil {
|
||||
return networkallocator.InertProvider{}
|
||||
}
|
||||
return c.NetworkProvider
|
||||
}
|
||||
|
||||
// Manager is the cluster manager for Swarm.
|
||||
|
@ -464,7 +472,7 @@ func (m *Manager) Run(parent context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig, m.config.PluginGetter, drivers.New(m.config.PluginGetter))
|
||||
baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig, m.config.networkProvider(), drivers.New(m.config.PluginGetter))
|
||||
baseResourceAPI := resourceapi.New(m.raftNode.MemoryStore())
|
||||
healthServer := health.NewHealthServer()
|
||||
localHealthServer := health.NewHealthServer()
|
||||
|
@ -993,7 +1001,7 @@ func (m *Manager) becomeLeader(ctx context.Context) {
|
|||
// are known to be present in each cluster node. This is needed
|
||||
// in order to allow running services on the predefined docker
|
||||
// networks like `bridge` and `host`.
|
||||
for _, p := range allocator.PredefinedNetworks() {
|
||||
for _, p := range m.config.networkProvider().PredefinedNetworks() {
|
||||
if err := store.CreateNetwork(tx, newPredefinedNetwork(p.Name, p.Driver)); err != nil && err != store.ErrNameConflict {
|
||||
log.G(ctx).WithError(err).Error("failed to create predefined network " + p.Name)
|
||||
}
|
||||
|
@ -1026,25 +1034,26 @@ func (m *Manager) becomeLeader(ctx context.Context) {
|
|||
})
|
||||
if cluster.DefaultAddressPool != nil {
|
||||
if m.config.NetworkConfig == nil {
|
||||
m.config.NetworkConfig = &cnmallocator.NetworkConfig{}
|
||||
m.config.NetworkConfig = &networkallocator.Config{}
|
||||
}
|
||||
m.config.NetworkConfig.DefaultAddrPool = append(m.config.NetworkConfig.DefaultAddrPool, cluster.DefaultAddressPool...)
|
||||
m.config.NetworkConfig.SubnetSize = cluster.SubnetSize
|
||||
}
|
||||
if cluster.VXLANUDPPort != 0 {
|
||||
if m.config.NetworkConfig == nil {
|
||||
m.config.NetworkConfig = &cnmallocator.NetworkConfig{}
|
||||
m.config.NetworkConfig = &networkallocator.Config{}
|
||||
}
|
||||
m.config.NetworkConfig.VXLANUDPPort = cluster.VXLANUDPPort
|
||||
}
|
||||
}
|
||||
|
||||
m.allocator, err = allocator.New(s, m.config.PluginGetter, m.config.NetworkConfig)
|
||||
na, err := m.config.networkProvider().NewAllocator(m.config.NetworkConfig)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to create allocator")
|
||||
// TODO(stevvooe): It doesn't seem correct here to fail
|
||||
// creating the allocator but then use it anyway.
|
||||
}
|
||||
m.allocator = allocator.New(s, na)
|
||||
|
||||
if m.keyManager != nil {
|
||||
go func(keyManager *keymanager.KeyManager) {
|
||||
|
|
24
vendor/github.com/moby/swarmkit/v2/node/node.go
generated
vendored
24
vendor/github.com/moby/swarmkit/v2/node/node.go
generated
vendored
|
@ -15,8 +15,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/libnetwork/drivers/overlay/overlayutils"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/go-metrics"
|
||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/moby/swarmkit/v2/agent"
|
||||
|
@ -29,8 +27,9 @@ import (
|
|||
"github.com/moby/swarmkit/v2/ioutils"
|
||||
"github.com/moby/swarmkit/v2/log"
|
||||
"github.com/moby/swarmkit/v2/manager"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
||||
"github.com/moby/swarmkit/v2/manager/encryption"
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
"github.com/moby/swarmkit/v2/remotes"
|
||||
"github.com/moby/swarmkit/v2/xnet"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -106,8 +105,11 @@ type Config struct {
|
|||
// for connections to the remote API (including the raft service).
|
||||
AdvertiseRemoteAPI string
|
||||
|
||||
// NetworkProvider provides network allocation for the cluster
|
||||
NetworkProvider networkallocator.Provider
|
||||
|
||||
// NetworkConfig stores network related config for the cluster
|
||||
NetworkConfig *cnmallocator.NetworkConfig
|
||||
NetworkConfig *networkallocator.Config
|
||||
|
||||
// Executor specifies the executor to use for the agent.
|
||||
Executor exec.Executor
|
||||
|
@ -132,7 +134,7 @@ type Config struct {
|
|||
Availability api.NodeSpec_Availability
|
||||
|
||||
// PluginGetter provides access to docker's plugin inventory.
|
||||
PluginGetter plugingetter.PluginGetter
|
||||
PluginGetter plugin.Getter
|
||||
|
||||
// FIPS is a boolean stating whether the node is FIPS enabled
|
||||
FIPS bool
|
||||
|
@ -161,7 +163,6 @@ type Node struct {
|
|||
manager *manager.Manager
|
||||
notifyNodeChange chan *agent.NodeChanges // used by the agent to relay node updates from the dispatcher Session stream to (*Node).run
|
||||
unlockKey []byte
|
||||
vxlanUDPPort uint32
|
||||
}
|
||||
|
||||
type lastSeenRole struct {
|
||||
|
@ -271,8 +272,11 @@ func (n *Node) currentRole() api.NodeRole {
|
|||
}
|
||||
|
||||
// configVXLANUDPPort sets vxlan port in libnetwork
|
||||
func configVXLANUDPPort(ctx context.Context, vxlanUDPPort uint32) {
|
||||
if err := overlayutils.ConfigVXLANUDPPort(vxlanUDPPort); err != nil {
|
||||
func (n *Node) configVXLANUDPPort(ctx context.Context, vxlanUDPPort uint32) {
|
||||
if n.config.NetworkProvider == nil {
|
||||
return
|
||||
}
|
||||
if err := n.config.NetworkProvider.SetDefaultVXLANUDPPort(vxlanUDPPort); err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to configure VXLAN UDP port")
|
||||
return
|
||||
}
|
||||
|
@ -369,8 +373,7 @@ func (n *Node) run(ctx context.Context) (err error) {
|
|||
case nodeChanges := <-n.notifyNodeChange:
|
||||
if nodeChanges.Node != nil {
|
||||
if nodeChanges.Node.VXLANUDPPort != 0 {
|
||||
n.vxlanUDPPort = nodeChanges.Node.VXLANUDPPort
|
||||
configVXLANUDPPort(ctx, n.vxlanUDPPort)
|
||||
n.configVXLANUDPPort(ctx, nodeChanges.Node.VXLANUDPPort)
|
||||
}
|
||||
// This is a bit complex to be backward compatible with older CAs that
|
||||
// don't support the Node.Role field. They only use what's presently
|
||||
|
@ -1015,6 +1018,7 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
|
|||
RootCAPaths: rootPaths,
|
||||
FIPS: n.config.FIPS,
|
||||
NetworkConfig: n.config.NetworkConfig,
|
||||
NetworkProvider: n.config.NetworkProvider,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
23
vendor/github.com/moby/swarmkit/v2/node/plugin/pluginapi.go
generated
vendored
Normal file
23
vendor/github.com/moby/swarmkit/v2/node/plugin/pluginapi.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package plugin
|
||||
|
||||
import "net"
|
||||
|
||||
type Plugin interface {
|
||||
Name() string
|
||||
ScopedPath(string) string
|
||||
Client() Client
|
||||
}
|
||||
|
||||
type AddrPlugin interface {
|
||||
Plugin
|
||||
Addr() net.Addr
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
Call(method string, args, ret interface{}) error
|
||||
}
|
||||
|
||||
type Getter interface {
|
||||
Get(name, capability string) (Plugin, error)
|
||||
GetAllManagedPluginsByCap(capability string) []Plugin
|
||||
}
|
72
vendor/github.com/moby/swarmkit/v2/testutils/fake_plugingetter.go
generated
vendored
Normal file
72
vendor/github.com/moby/swarmkit/v2/testutils/fake_plugingetter.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package testutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/moby/swarmkit/v2/node/plugin"
|
||||
)
|
||||
|
||||
const DockerCSIPluginNodeCap = "csinode"
|
||||
const DockerCSIPluginControllerCap = "csicontroller"
|
||||
|
||||
type FakePluginGetter struct {
|
||||
Plugins map[string]*FakePlugin
|
||||
}
|
||||
|
||||
var _ plugin.Getter = &FakePluginGetter{}
|
||||
|
||||
func (f *FakePluginGetter) Get(name, capability string) (plugin.Plugin, error) {
|
||||
if capability != DockerCSIPluginNodeCap && capability != DockerCSIPluginControllerCap {
|
||||
return nil, fmt.Errorf(
|
||||
"requested plugin with %s cap, but should only ever request %s or %s",
|
||||
capability, DockerCSIPluginNodeCap, DockerCSIPluginControllerCap,
|
||||
)
|
||||
}
|
||||
|
||||
if plug, ok := f.Plugins[name]; ok {
|
||||
return plug, nil
|
||||
}
|
||||
return nil, fmt.Errorf("plugin %s not found", name)
|
||||
}
|
||||
|
||||
// GetAllManagedPluginsByCap returns all of the fake's plugins. If capability
|
||||
// is anything other than DockerCSIPluginCap, it returns nothing.
|
||||
func (f *FakePluginGetter) GetAllManagedPluginsByCap(capability string) []plugin.Plugin {
|
||||
if capability != DockerCSIPluginNodeCap && capability != DockerCSIPluginControllerCap {
|
||||
return nil
|
||||
}
|
||||
|
||||
allPlugins := make([]plugin.Plugin, 0, len(f.Plugins))
|
||||
for _, plug := range f.Plugins {
|
||||
allPlugins = append(allPlugins, plug)
|
||||
}
|
||||
return allPlugins
|
||||
}
|
||||
|
||||
type FakePlugin struct {
|
||||
PluginName string
|
||||
PluginAddr net.Addr
|
||||
Scope string
|
||||
}
|
||||
|
||||
var _ plugin.AddrPlugin = &FakePlugin{}
|
||||
|
||||
func (f *FakePlugin) Name() string {
|
||||
return f.PluginName
|
||||
}
|
||||
|
||||
func (f *FakePlugin) ScopedPath(path string) string {
|
||||
if f.Scope != "" {
|
||||
return fmt.Sprintf("%s/%s", f.Scope, path)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func (f *FakePlugin) Client() plugin.Client {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePlugin) Addr() net.Addr {
|
||||
return f.PluginAddr
|
||||
}
|
24
vendor/github.com/moby/swarmkit/v2/testutils/grpc.go
generated
vendored
Normal file
24
vendor/github.com/moby/swarmkit/v2/testutils/grpc.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package testutils
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
||||
// Otherwise, it returns err.Error() or empty string when err is nil.
|
||||
func ErrorDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// ErrorCode returns the error code for err if it was produced by the rpc system.
|
||||
// Otherwise, it returns codes.Unknown.
|
||||
func ErrorCode(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
37
vendor/github.com/moby/swarmkit/v2/testutils/poll.go
generated
vendored
Normal file
37
vendor/github.com/moby/swarmkit/v2/testutils/poll.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
package testutils
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"code.cloudfoundry.org/clock/fakeclock"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PollFuncWithTimeout is used to periodically execute a check function, it
|
||||
// returns error after timeout.
|
||||
func PollFuncWithTimeout(clockSource *fakeclock.FakeClock, f func() error, timeout time.Duration) error {
|
||||
if f() == nil {
|
||||
return nil
|
||||
}
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
for i := 0; ; i++ {
|
||||
if i%5 == 0 && clockSource != nil {
|
||||
clockSource.Increment(time.Second)
|
||||
}
|
||||
err := f()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-timer.C:
|
||||
return errors.Wrap(err, "polling failed")
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PollFunc is like PollFuncWithTimeout with timeout=10s.
|
||||
func PollFunc(clockSource *fakeclock.FakeClock, f func() error) error {
|
||||
return PollFuncWithTimeout(clockSource, f, 10*time.Second)
|
||||
}
|
27
vendor/github.com/pmezard/go-difflib/LICENSE
generated
vendored
Normal file
27
vendor/github.com/pmezard/go-difflib/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2013, Patrick Mezard
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
The names of its contributors may not be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
Normal file
772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
Normal file
|
@ -0,0 +1,772 @@
|
|||
// Package difflib is a partial port of Python difflib module.
|
||||
//
|
||||
// It provides tools to compare sequences of strings and generate textual diffs.
|
||||
//
|
||||
// The following class and functions have been ported:
|
||||
//
|
||||
// - SequenceMatcher
|
||||
//
|
||||
// - unified_diff
|
||||
//
|
||||
// - context_diff
|
||||
//
|
||||
// Getting unified diffs was the main goal of the port. Keep in mind this code
|
||||
// is mostly suitable to output text differences in a human friendly way, there
|
||||
// are no guarantees generated diffs are consumable by patch(1).
|
||||
package difflib
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func calculateRatio(matches, length int) float64 {
|
||||
if length > 0 {
|
||||
return 2.0 * float64(matches) / float64(length)
|
||||
}
|
||||
return 1.0
|
||||
}
|
||||
|
||||
type Match struct {
|
||||
A int
|
||||
B int
|
||||
Size int
|
||||
}
|
||||
|
||||
type OpCode struct {
|
||||
Tag byte
|
||||
I1 int
|
||||
I2 int
|
||||
J1 int
|
||||
J2 int
|
||||
}
|
||||
|
||||
// SequenceMatcher compares sequence of strings. The basic
|
||||
// algorithm predates, and is a little fancier than, an algorithm
|
||||
// published in the late 1980's by Ratcliff and Obershelp under the
|
||||
// hyperbolic name "gestalt pattern matching". The basic idea is to find
|
||||
// the longest contiguous matching subsequence that contains no "junk"
|
||||
// elements (R-O doesn't address junk). The same idea is then applied
|
||||
// recursively to the pieces of the sequences to the left and to the right
|
||||
// of the matching subsequence. This does not yield minimal edit
|
||||
// sequences, but does tend to yield matches that "look right" to people.
|
||||
//
|
||||
// SequenceMatcher tries to compute a "human-friendly diff" between two
|
||||
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
||||
// longest *contiguous* & junk-free matching subsequence. That's what
|
||||
// catches peoples' eyes. The Windows(tm) windiff has another interesting
|
||||
// notion, pairing up elements that appear uniquely in each sequence.
|
||||
// That, and the method here, appear to yield more intuitive difference
|
||||
// reports than does diff. This method appears to be the least vulnerable
|
||||
// to synching up on blocks of "junk lines", though (like blank lines in
|
||||
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
||||
// because this is the only method of the 3 that has a *concept* of
|
||||
// "junk" <wink>.
|
||||
//
|
||||
// Timing: Basic R-O is cubic time worst case and quadratic time expected
|
||||
// case. SequenceMatcher is quadratic time for the worst case and has
|
||||
// expected-case behavior dependent in a complicated way on how many
|
||||
// elements the sequences have in common; best case time is linear.
|
||||
type SequenceMatcher struct {
|
||||
a []string
|
||||
b []string
|
||||
b2j map[string][]int
|
||||
IsJunk func(string) bool
|
||||
autoJunk bool
|
||||
bJunk map[string]struct{}
|
||||
matchingBlocks []Match
|
||||
fullBCount map[string]int
|
||||
bPopular map[string]struct{}
|
||||
opCodes []OpCode
|
||||
}
|
||||
|
||||
func NewMatcher(a, b []string) *SequenceMatcher {
|
||||
m := SequenceMatcher{autoJunk: true}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
func NewMatcherWithJunk(a, b []string, autoJunk bool,
|
||||
isJunk func(string) bool) *SequenceMatcher {
|
||||
|
||||
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
// Set two sequences to be compared.
|
||||
func (m *SequenceMatcher) SetSeqs(a, b []string) {
|
||||
m.SetSeq1(a)
|
||||
m.SetSeq2(b)
|
||||
}
|
||||
|
||||
// Set the first sequence to be compared. The second sequence to be compared is
|
||||
// not changed.
|
||||
//
|
||||
// SequenceMatcher computes and caches detailed information about the second
|
||||
// sequence, so if you want to compare one sequence S against many sequences,
|
||||
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
|
||||
// sequences.
|
||||
//
|
||||
// See also SetSeqs() and SetSeq2().
|
||||
func (m *SequenceMatcher) SetSeq1(a []string) {
|
||||
if &a == &m.a {
|
||||
return
|
||||
}
|
||||
m.a = a
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
}
|
||||
|
||||
// Set the second sequence to be compared. The first sequence to be compared is
|
||||
// not changed.
|
||||
func (m *SequenceMatcher) SetSeq2(b []string) {
|
||||
if &b == &m.b {
|
||||
return
|
||||
}
|
||||
m.b = b
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
m.fullBCount = nil
|
||||
m.chainB()
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) chainB() {
|
||||
// Populate line -> index mapping
|
||||
b2j := map[string][]int{}
|
||||
for i, s := range m.b {
|
||||
indices := b2j[s]
|
||||
indices = append(indices, i)
|
||||
b2j[s] = indices
|
||||
}
|
||||
|
||||
// Purge junk elements
|
||||
m.bJunk = map[string]struct{}{}
|
||||
if m.IsJunk != nil {
|
||||
junk := m.bJunk
|
||||
for s, _ := range b2j {
|
||||
if m.IsJunk(s) {
|
||||
junk[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range junk {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Purge remaining popular elements
|
||||
popular := map[string]struct{}{}
|
||||
n := len(m.b)
|
||||
if m.autoJunk && n >= 200 {
|
||||
ntest := n/100 + 1
|
||||
for s, indices := range b2j {
|
||||
if len(indices) > ntest {
|
||||
popular[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range popular {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
m.bPopular = popular
|
||||
m.b2j = b2j
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) isBJunk(s string) bool {
|
||||
_, ok := m.bJunk[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
||||
//
|
||||
// If IsJunk is not defined:
|
||||
//
|
||||
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||
// alo <= i <= i+k <= ahi
|
||||
// blo <= j <= j+k <= bhi
|
||||
// and for all (i',j',k') meeting those conditions,
|
||||
// k >= k'
|
||||
// i <= i'
|
||||
// and if i == i', j <= j'
|
||||
//
|
||||
// In other words, of all maximal matching blocks, return one that
|
||||
// starts earliest in a, and of all those maximal matching blocks that
|
||||
// start earliest in a, return the one that starts earliest in b.
|
||||
//
|
||||
// If IsJunk is defined, first the longest matching block is
|
||||
// determined as above, but with the additional restriction that no
|
||||
// junk element appears in the block. Then that block is extended as
|
||||
// far as possible by matching (only) junk elements on both sides. So
|
||||
// the resulting block never matches on junk except as identical junk
|
||||
// happens to be adjacent to an "interesting" match.
|
||||
//
|
||||
// If no blocks match, return (alo, blo, 0).
|
||||
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
|
||||
// CAUTION: stripping common prefix or suffix would be incorrect.
|
||||
// E.g.,
|
||||
// ab
|
||||
// acab
|
||||
// Longest matching block is "ab", but if common prefix is
|
||||
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
||||
// strip, so ends up claiming that ab is changed to acab by
|
||||
// inserting "ca" in the middle. That's minimal but unintuitive:
|
||||
// "it's obvious" that someone inserted "ac" at the front.
|
||||
// Windiff ends up at the same place as diff, but by pairing up
|
||||
// the unique 'b's and then matching the first two 'a's.
|
||||
besti, bestj, bestsize := alo, blo, 0
|
||||
|
||||
// find longest junk-free match
|
||||
// during an iteration of the loop, j2len[j] = length of longest
|
||||
// junk-free match ending with a[i-1] and b[j]
|
||||
j2len := map[int]int{}
|
||||
for i := alo; i != ahi; i++ {
|
||||
// look at all instances of a[i] in b; note that because
|
||||
// b2j has no junk keys, the loop is skipped if a[i] is junk
|
||||
newj2len := map[int]int{}
|
||||
for _, j := range m.b2j[m.a[i]] {
|
||||
// a[i] matches b[j]
|
||||
if j < blo {
|
||||
continue
|
||||
}
|
||||
if j >= bhi {
|
||||
break
|
||||
}
|
||||
k := j2len[j-1] + 1
|
||||
newj2len[j] = k
|
||||
if k > bestsize {
|
||||
besti, bestj, bestsize = i-k+1, j-k+1, k
|
||||
}
|
||||
}
|
||||
j2len = newj2len
|
||||
}
|
||||
|
||||
// Extend the best by non-junk elements on each end. In particular,
|
||||
// "popular" non-junk elements aren't in b2j, which greatly speeds
|
||||
// the inner loop above, but also means "the best" match so far
|
||||
// doesn't contain any junk *or* popular non-junk elements.
|
||||
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
!m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
// Now that we have a wholly interesting match (albeit possibly
|
||||
// empty!), we may as well suck up the matching junk on each
|
||||
// side of it too. Can't think of a good reason not to, and it
|
||||
// saves post-processing the (possibly considerable) expense of
|
||||
// figuring out what to do with it. In the case of an empty
|
||||
// interesting match, this is clearly the right thing to do,
|
||||
// because no other kind of match is possible in the regions.
|
||||
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
return Match{A: besti, B: bestj, Size: bestsize}
|
||||
}
|
||||
|
||||
// Return list of triples describing matching subsequences.
|
||||
//
|
||||
// Each triple is of the form (i, j, n), and means that
|
||||
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
||||
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
|
||||
// adjacent triples in the list, and the second is not the last triple in the
|
||||
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
|
||||
// adjacent equal blocks.
|
||||
//
|
||||
// The last triple is a dummy, (len(a), len(b), 0), and is the only
|
||||
// triple with n==0.
|
||||
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
|
||||
if m.matchingBlocks != nil {
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
|
||||
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
|
||||
match := m.findLongestMatch(alo, ahi, blo, bhi)
|
||||
i, j, k := match.A, match.B, match.Size
|
||||
if match.Size > 0 {
|
||||
if alo < i && blo < j {
|
||||
matched = matchBlocks(alo, i, blo, j, matched)
|
||||
}
|
||||
matched = append(matched, match)
|
||||
if i+k < ahi && j+k < bhi {
|
||||
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
|
||||
}
|
||||
}
|
||||
return matched
|
||||
}
|
||||
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
|
||||
|
||||
// It's possible that we have adjacent equal blocks in the
|
||||
// matching_blocks list now.
|
||||
nonAdjacent := []Match{}
|
||||
i1, j1, k1 := 0, 0, 0
|
||||
for _, b := range matched {
|
||||
// Is this block adjacent to i1, j1, k1?
|
||||
i2, j2, k2 := b.A, b.B, b.Size
|
||||
if i1+k1 == i2 && j1+k1 == j2 {
|
||||
// Yes, so collapse them -- this just increases the length of
|
||||
// the first block by the length of the second, and the first
|
||||
// block so lengthened remains the block to compare against.
|
||||
k1 += k2
|
||||
} else {
|
||||
// Not adjacent. Remember the first block (k1==0 means it's
|
||||
// the dummy we started with), and make the second block the
|
||||
// new block to compare against.
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
i1, j1, k1 = i2, j2, k2
|
||||
}
|
||||
}
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
|
||||
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
|
||||
m.matchingBlocks = nonAdjacent
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
// Return list of 5-tuples describing how to turn a into b.
|
||||
//
|
||||
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
||||
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
||||
// tuple preceding it, and likewise for j1 == the previous j2.
|
||||
//
|
||||
// The tags are characters, with these meanings:
|
||||
//
|
||||
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
|
||||
//
|
||||
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
|
||||
//
|
||||
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
|
||||
//
|
||||
// 'e' (equal): a[i1:i2] == b[j1:j2]
|
||||
func (m *SequenceMatcher) GetOpCodes() []OpCode {
|
||||
if m.opCodes != nil {
|
||||
return m.opCodes
|
||||
}
|
||||
i, j := 0, 0
|
||||
matching := m.GetMatchingBlocks()
|
||||
opCodes := make([]OpCode, 0, len(matching))
|
||||
for _, m := range matching {
|
||||
// invariant: we've pumped out correct diffs to change
|
||||
// a[:i] into b[:j], and the next matching block is
|
||||
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
||||
// out a diff to change a[i:ai] into b[j:bj], pump out
|
||||
// the matching block, and move (i,j) beyond the match
|
||||
ai, bj, size := m.A, m.B, m.Size
|
||||
tag := byte(0)
|
||||
if i < ai && j < bj {
|
||||
tag = 'r'
|
||||
} else if i < ai {
|
||||
tag = 'd'
|
||||
} else if j < bj {
|
||||
tag = 'i'
|
||||
}
|
||||
if tag > 0 {
|
||||
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
|
||||
}
|
||||
i, j = ai+size, bj+size
|
||||
// the list of matching blocks is terminated by a
|
||||
// sentinel with size 0
|
||||
if size > 0 {
|
||||
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
|
||||
}
|
||||
}
|
||||
m.opCodes = opCodes
|
||||
return m.opCodes
|
||||
}
|
||||
|
||||
// Isolate change clusters by eliminating ranges with no changes.
|
||||
//
|
||||
// Return a generator of groups with up to n lines of context.
|
||||
// Each group is in the same format as returned by GetOpCodes().
|
||||
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
if n < 0 {
|
||||
n = 3
|
||||
}
|
||||
codes := m.GetOpCodes()
|
||||
if len(codes) == 0 {
|
||||
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
|
||||
}
|
||||
// Fixup leading and trailing groups if they show no changes.
|
||||
if codes[0].Tag == 'e' {
|
||||
c := codes[0]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||
}
|
||||
if codes[len(codes)-1].Tag == 'e' {
|
||||
c := codes[len(codes)-1]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||
}
|
||||
nn := n + n
|
||||
groups := [][]OpCode{}
|
||||
group := []OpCode{}
|
||||
for _, c := range codes {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
// End the current group and start a new one whenever
|
||||
// there is a large range with no changes.
|
||||
if c.Tag == 'e' && i2-i1 > nn {
|
||||
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
|
||||
j1, min(j2, j1+n)})
|
||||
groups = append(groups, group)
|
||||
group = []OpCode{}
|
||||
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||
}
|
||||
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||
}
|
||||
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||
groups = append(groups, group)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// Return a measure of the sequences' similarity (float in [0,1]).
|
||||
//
|
||||
// Where T is the total number of elements in both sequences, and
|
||||
// M is the number of matches, this is 2.0*M / T.
|
||||
// Note that this is 1 if the sequences are identical, and 0 if
|
||||
// they have nothing in common.
|
||||
//
|
||||
// .Ratio() is expensive to compute if you haven't already computed
|
||||
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
|
||||
// want to try .QuickRatio() or .RealQuickRation() first to get an
|
||||
// upper bound.
|
||||
func (m *SequenceMatcher) Ratio() float64 {
|
||||
matches := 0
|
||||
for _, m := range m.GetMatchingBlocks() {
|
||||
matches += m.Size
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() relatively quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute.
|
||||
func (m *SequenceMatcher) QuickRatio() float64 {
|
||||
// viewing a and b as multisets, set matches to the cardinality
|
||||
// of their intersection; this counts the number of matches
|
||||
// without regard to order, so is clearly an upper bound
|
||||
if m.fullBCount == nil {
|
||||
m.fullBCount = map[string]int{}
|
||||
for _, s := range m.b {
|
||||
m.fullBCount[s] = m.fullBCount[s] + 1
|
||||
}
|
||||
}
|
||||
|
||||
// avail[x] is the number of times x appears in 'b' less the
|
||||
// number of times we've seen it in 'a' so far ... kinda
|
||||
avail := map[string]int{}
|
||||
matches := 0
|
||||
for _, s := range m.a {
|
||||
n, ok := avail[s]
|
||||
if !ok {
|
||||
n = m.fullBCount[s]
|
||||
}
|
||||
avail[s] = n - 1
|
||||
if n > 0 {
|
||||
matches += 1
|
||||
}
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() very quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||
la, lb := len(m.a), len(m.b)
|
||||
return calculateRatio(min(la, lb), la+lb)
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format
|
||||
func formatRangeUnified(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, length)
|
||||
}
|
||||
|
||||
// Unified diff parameters
|
||||
type UnifiedDiff struct {
|
||||
A []string // First sequence lines
|
||||
FromFile string // First file name
|
||||
FromDate string // First file time
|
||||
B []string // Second sequence lines
|
||||
ToFile string // Second file name
|
||||
ToDate string // Second file time
|
||||
Eol string // Headers end of line, defaults to LF
|
||||
Context int // Number of context lines
|
||||
}
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a unified diff.
|
||||
//
|
||||
// Unified diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by 'n' which
|
||||
// defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with ---, +++, or @@) are
|
||||
// created with a trailing newline. This is helpful so that inputs
|
||||
// created from file.readlines() result in diffs that are suitable for
|
||||
// file.writelines() since both the inputs and outputs have trailing
|
||||
// newlines.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the lineterm
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The unidiff format normally has a header for filenames and modification
|
||||
// times. Any or all of these may be specified using strings for
|
||||
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
wf := func(format string, args ...interface{}) error {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
return err
|
||||
}
|
||||
ws := func(s string) error {
|
||||
_, err := buf.WriteString(s)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
first, last := g[0], g[len(g)-1]
|
||||
range1 := formatRangeUnified(first.I1, last.I2)
|
||||
range2 := formatRangeUnified(first.J1, last.J2)
|
||||
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range g {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
if c.Tag == 'e' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws(" " + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws("-" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, line := range diff.B[j1:j2] {
|
||||
if err := ws("+" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Like WriteUnifiedDiff but returns the diff a string.
|
||||
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteUnifiedDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format.
|
||||
func formatRangeContext(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
if length <= 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
|
||||
}
|
||||
|
||||
type ContextDiff UnifiedDiff
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a context diff.
|
||||
//
|
||||
// Context diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by diff.Context
|
||||
// which defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with *** or ---) are
|
||||
// created with a trailing newline.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the diff.Eol
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The context diff format normally has a header for filenames and
|
||||
// modification times. Any or all of these may be specified using
|
||||
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
// If not specified, the strings default to blanks.
|
||||
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
var diffErr error
|
||||
wf := func(format string, args ...interface{}) {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
ws := func(s string) {
|
||||
_, err := buf.WriteString(s)
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
prefix := map[byte]string{
|
||||
'i': "+ ",
|
||||
'd': "- ",
|
||||
'r': "! ",
|
||||
'e': " ",
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
}
|
||||
}
|
||||
|
||||
first, last := g[0], g[len(g)-1]
|
||||
ws("***************" + diff.Eol)
|
||||
|
||||
range1 := formatRangeContext(first.I1, last.I2)
|
||||
wf("*** %s ****%s", range1, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'i' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.A[cc.I1:cc.I2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
range2 := formatRangeContext(first.J1, last.J2)
|
||||
wf("--- %s ----%s", range2, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'd' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.B[cc.J1:cc.J2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return diffErr
|
||||
}
|
||||
|
||||
// Like WriteContextDiff but returns the diff a string.
|
||||
func GetContextDiffString(diff ContextDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteContextDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Split a string on "\n" while preserving them. The output can be used
|
||||
// as input for UnifiedDiff and ContextDiff structures.
|
||||
func SplitLines(s string) []string {
|
||||
lines := strings.SplitAfter(s, "\n")
|
||||
lines[len(lines)-1] += "\n"
|
||||
return lines
|
||||
}
|
21
vendor/github.com/stretchr/testify/LICENSE
generated
vendored
Normal file
21
vendor/github.com/stretchr/testify/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
458
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
Normal file
458
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
Normal file
|
@ -0,0 +1,458 @@
|
|||
package assert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type CompareType int
|
||||
|
||||
const (
|
||||
compareLess CompareType = iota - 1
|
||||
compareEqual
|
||||
compareGreater
|
||||
)
|
||||
|
||||
var (
|
||||
intType = reflect.TypeOf(int(1))
|
||||
int8Type = reflect.TypeOf(int8(1))
|
||||
int16Type = reflect.TypeOf(int16(1))
|
||||
int32Type = reflect.TypeOf(int32(1))
|
||||
int64Type = reflect.TypeOf(int64(1))
|
||||
|
||||
uintType = reflect.TypeOf(uint(1))
|
||||
uint8Type = reflect.TypeOf(uint8(1))
|
||||
uint16Type = reflect.TypeOf(uint16(1))
|
||||
uint32Type = reflect.TypeOf(uint32(1))
|
||||
uint64Type = reflect.TypeOf(uint64(1))
|
||||
|
||||
float32Type = reflect.TypeOf(float32(1))
|
||||
float64Type = reflect.TypeOf(float64(1))
|
||||
|
||||
stringType = reflect.TypeOf("")
|
||||
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
bytesType = reflect.TypeOf([]byte{})
|
||||
)
|
||||
|
||||
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
obj1Value := reflect.ValueOf(obj1)
|
||||
obj2Value := reflect.ValueOf(obj2)
|
||||
|
||||
// throughout this switch we try and avoid calling .Convert() if possible,
|
||||
// as this has a pretty big performance impact
|
||||
switch kind {
|
||||
case reflect.Int:
|
||||
{
|
||||
intobj1, ok := obj1.(int)
|
||||
if !ok {
|
||||
intobj1 = obj1Value.Convert(intType).Interface().(int)
|
||||
}
|
||||
intobj2, ok := obj2.(int)
|
||||
if !ok {
|
||||
intobj2 = obj2Value.Convert(intType).Interface().(int)
|
||||
}
|
||||
if intobj1 > intobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if intobj1 == intobj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if intobj1 < intobj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Int8:
|
||||
{
|
||||
int8obj1, ok := obj1.(int8)
|
||||
if !ok {
|
||||
int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
|
||||
}
|
||||
int8obj2, ok := obj2.(int8)
|
||||
if !ok {
|
||||
int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
|
||||
}
|
||||
if int8obj1 > int8obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if int8obj1 == int8obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if int8obj1 < int8obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Int16:
|
||||
{
|
||||
int16obj1, ok := obj1.(int16)
|
||||
if !ok {
|
||||
int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
|
||||
}
|
||||
int16obj2, ok := obj2.(int16)
|
||||
if !ok {
|
||||
int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
|
||||
}
|
||||
if int16obj1 > int16obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if int16obj1 == int16obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if int16obj1 < int16obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Int32:
|
||||
{
|
||||
int32obj1, ok := obj1.(int32)
|
||||
if !ok {
|
||||
int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
|
||||
}
|
||||
int32obj2, ok := obj2.(int32)
|
||||
if !ok {
|
||||
int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
|
||||
}
|
||||
if int32obj1 > int32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if int32obj1 == int32obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if int32obj1 < int32obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
{
|
||||
int64obj1, ok := obj1.(int64)
|
||||
if !ok {
|
||||
int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
|
||||
}
|
||||
int64obj2, ok := obj2.(int64)
|
||||
if !ok {
|
||||
int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
|
||||
}
|
||||
if int64obj1 > int64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if int64obj1 == int64obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if int64obj1 < int64obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint:
|
||||
{
|
||||
uintobj1, ok := obj1.(uint)
|
||||
if !ok {
|
||||
uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
|
||||
}
|
||||
uintobj2, ok := obj2.(uint)
|
||||
if !ok {
|
||||
uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
|
||||
}
|
||||
if uintobj1 > uintobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uintobj1 == uintobj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uintobj1 < uintobj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint8:
|
||||
{
|
||||
uint8obj1, ok := obj1.(uint8)
|
||||
if !ok {
|
||||
uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
|
||||
}
|
||||
uint8obj2, ok := obj2.(uint8)
|
||||
if !ok {
|
||||
uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
|
||||
}
|
||||
if uint8obj1 > uint8obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uint8obj1 == uint8obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uint8obj1 < uint8obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint16:
|
||||
{
|
||||
uint16obj1, ok := obj1.(uint16)
|
||||
if !ok {
|
||||
uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
|
||||
}
|
||||
uint16obj2, ok := obj2.(uint16)
|
||||
if !ok {
|
||||
uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
|
||||
}
|
||||
if uint16obj1 > uint16obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uint16obj1 == uint16obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uint16obj1 < uint16obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
{
|
||||
uint32obj1, ok := obj1.(uint32)
|
||||
if !ok {
|
||||
uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
|
||||
}
|
||||
uint32obj2, ok := obj2.(uint32)
|
||||
if !ok {
|
||||
uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
|
||||
}
|
||||
if uint32obj1 > uint32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uint32obj1 == uint32obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uint32obj1 < uint32obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
{
|
||||
uint64obj1, ok := obj1.(uint64)
|
||||
if !ok {
|
||||
uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
|
||||
}
|
||||
uint64obj2, ok := obj2.(uint64)
|
||||
if !ok {
|
||||
uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
|
||||
}
|
||||
if uint64obj1 > uint64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uint64obj1 == uint64obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uint64obj1 < uint64obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
{
|
||||
float32obj1, ok := obj1.(float32)
|
||||
if !ok {
|
||||
float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
|
||||
}
|
||||
float32obj2, ok := obj2.(float32)
|
||||
if !ok {
|
||||
float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
|
||||
}
|
||||
if float32obj1 > float32obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if float32obj1 == float32obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if float32obj1 < float32obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
{
|
||||
float64obj1, ok := obj1.(float64)
|
||||
if !ok {
|
||||
float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
|
||||
}
|
||||
float64obj2, ok := obj2.(float64)
|
||||
if !ok {
|
||||
float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
|
||||
}
|
||||
if float64obj1 > float64obj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if float64obj1 == float64obj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if float64obj1 < float64obj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
{
|
||||
stringobj1, ok := obj1.(string)
|
||||
if !ok {
|
||||
stringobj1 = obj1Value.Convert(stringType).Interface().(string)
|
||||
}
|
||||
stringobj2, ok := obj2.(string)
|
||||
if !ok {
|
||||
stringobj2 = obj2Value.Convert(stringType).Interface().(string)
|
||||
}
|
||||
if stringobj1 > stringobj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if stringobj1 == stringobj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if stringobj1 < stringobj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
// Check for known struct types we can check for compare results.
|
||||
case reflect.Struct:
|
||||
{
|
||||
// All structs enter here. We're not interested in most types.
|
||||
if !canConvert(obj1Value, timeType) {
|
||||
break
|
||||
}
|
||||
|
||||
// time.Time can compared!
|
||||
timeObj1, ok := obj1.(time.Time)
|
||||
if !ok {
|
||||
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
|
||||
}
|
||||
|
||||
timeObj2, ok := obj2.(time.Time)
|
||||
if !ok {
|
||||
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
|
||||
}
|
||||
|
||||
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
|
||||
}
|
||||
case reflect.Slice:
|
||||
{
|
||||
// We only care about the []byte type.
|
||||
if !canConvert(obj1Value, bytesType) {
|
||||
break
|
||||
}
|
||||
|
||||
// []byte can be compared!
|
||||
bytesObj1, ok := obj1.([]byte)
|
||||
if !ok {
|
||||
bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte)
|
||||
|
||||
}
|
||||
bytesObj2, ok := obj2.([]byte)
|
||||
if !ok {
|
||||
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
|
||||
}
|
||||
|
||||
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
|
||||
}
|
||||
}
|
||||
|
||||
return compareEqual, false
|
||||
}
|
||||
|
||||
// Greater asserts that the first element is greater than the second
|
||||
//
|
||||
// assert.Greater(t, 2, 1)
|
||||
// assert.Greater(t, float64(2), float64(1))
|
||||
// assert.Greater(t, "b", "a")
|
||||
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// GreaterOrEqual asserts that the first element is greater than or equal to the second
|
||||
//
|
||||
// assert.GreaterOrEqual(t, 2, 1)
|
||||
// assert.GreaterOrEqual(t, 2, 2)
|
||||
// assert.GreaterOrEqual(t, "b", "a")
|
||||
// assert.GreaterOrEqual(t, "b", "b")
|
||||
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Less asserts that the first element is less than the second
|
||||
//
|
||||
// assert.Less(t, 1, 2)
|
||||
// assert.Less(t, float64(1), float64(2))
|
||||
// assert.Less(t, "a", "b")
|
||||
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// LessOrEqual asserts that the first element is less than or equal to the second
|
||||
//
|
||||
// assert.LessOrEqual(t, 1, 2)
|
||||
// assert.LessOrEqual(t, 2, 2)
|
||||
// assert.LessOrEqual(t, "a", "b")
|
||||
// assert.LessOrEqual(t, "b", "b")
|
||||
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Positive asserts that the specified element is positive
|
||||
//
|
||||
// assert.Positive(t, 1)
|
||||
// assert.Positive(t, 1.23)
|
||||
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
zero := reflect.Zero(reflect.TypeOf(e))
|
||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Negative asserts that the specified element is negative
|
||||
//
|
||||
// assert.Negative(t, -1)
|
||||
// assert.Negative(t, -1.23)
|
||||
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
zero := reflect.Zero(reflect.TypeOf(e))
|
||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
|
||||
}
|
||||
|
||||
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
|
||||
e1Kind := reflect.ValueOf(e1).Kind()
|
||||
e2Kind := reflect.ValueOf(e2).Kind()
|
||||
if e1Kind != e2Kind {
|
||||
return Fail(t, "Elements should be the same type", msgAndArgs...)
|
||||
}
|
||||
|
||||
compareResult, isComparable := compare(e1, e2, e1Kind)
|
||||
if !isComparable {
|
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
|
||||
}
|
||||
|
||||
if !containsValue(allowedComparesResults, compareResult) {
|
||||
return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func containsValue(values []CompareType, value CompareType) bool {
|
||||
for _, v := range values {
|
||||
if v == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||
// merged/removed with assertion_compare_go1.17_test.go and
|
||||
// assertion_compare_legacy.go
|
||||
|
||||
package assert
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Wrapper around reflect.Value.CanConvert, for compatibility
|
||||
// reasons.
|
||||
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||
return value.CanConvert(to)
|
||||
}
|
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
//go:build !go1.17
|
||||
// +build !go1.17
|
||||
|
||||
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||
// merged/removed with assertion_compare_go1.17_test.go and
|
||||
// assertion_compare_can_convert.go
|
||||
|
||||
package assert
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Older versions of Go does not have the reflect.Value.CanConvert
|
||||
// method.
|
||||
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||
return false
|
||||
}
|
805
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
Normal file
805
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
Normal file
|
@ -0,0 +1,805 @@
|
|||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
|
||||
package assert
|
||||
|
||||
import (
|
||||
http "net/http"
|
||||
url "net/url"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Conditionf uses a Comparison to assert a complex condition.
|
||||
func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Condition(t, comp, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Containsf asserts that the specified string, list(array, slice...) or map contains the
|
||||
// specified substring or element.
|
||||
//
|
||||
// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
|
||||
// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
|
||||
// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
|
||||
func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// DirExistsf checks whether a directory exists in the given path. It also fails
|
||||
// if the path is a file rather a directory or there is an error checking whether it exists.
|
||||
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return DirExists(t, path, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
|
||||
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
|
||||
// the number of appearances of each of them in both lists should match.
|
||||
//
|
||||
// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
|
||||
func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
// assert.Emptyf(t, obj, "error message %s", "formatted")
|
||||
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Empty(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Equalf asserts that two objects are equal.
|
||||
//
|
||||
// assert.Equalf(t, 123, 123, "error message %s", "formatted")
|
||||
//
|
||||
// Pointer variable equality is determined based on the equality of the
|
||||
// referenced values (as opposed to the memory addresses). Function equality
|
||||
// cannot be determined and will always fail.
|
||||
func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
|
||||
// and that it is equal to the provided error.
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
|
||||
func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EqualExportedValuesf asserts that the types of two objects are equal and their public
|
||||
// fields are also equal. This is useful for comparing structs that have private fields
|
||||
// that could potentially differ.
|
||||
//
|
||||
// type S struct {
|
||||
// Exported int
|
||||
// notExported int
|
||||
// }
|
||||
// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
|
||||
// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
|
||||
func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertable to the same types
|
||||
// and equal.
|
||||
//
|
||||
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
|
||||
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Errorf asserts that a function returned an error (i.e. not `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// if assert.Errorf(t, err, "error message %s", "formatted") {
|
||||
// assert.Equal(t, expectedErrorf, err)
|
||||
// }
|
||||
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Error(t, err, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
|
||||
// This is a wrapper for errors.As.
|
||||
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
|
||||
// and that the error contains the specified substring.
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
|
||||
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Eventuallyf asserts that given condition will be met in waitFor time,
|
||||
// periodically checking target function each tick.
|
||||
//
|
||||
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
|
||||
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EventuallyWithTf asserts that given condition will be met in waitFor time,
|
||||
// periodically checking target function each tick. In contrast to Eventually,
|
||||
// it supplies a CollectT to the condition function, so that the condition
|
||||
// function can use the CollectT to call other assertions.
|
||||
// The condition is considered "met" if no errors are raised in a tick.
|
||||
// The supplied CollectT collects all errors from one tick (if there are any).
|
||||
// If the condition is not met before waitFor, the collected errors of
|
||||
// the last tick are copied to t.
|
||||
//
|
||||
// externalValue := false
|
||||
// go func() {
|
||||
// time.Sleep(8*time.Second)
|
||||
// externalValue = true
|
||||
// }()
|
||||
// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
|
||||
// // add assertions as needed; any assertion failure will fail the current tick
|
||||
// assert.True(c, externalValue, "expected 'externalValue' to be true")
|
||||
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
|
||||
func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Exactlyf asserts that two objects are equal in value and type.
|
||||
//
|
||||
// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
|
||||
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Failf reports a failure through
|
||||
func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// FailNowf fails test
|
||||
func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Falsef asserts that the specified value is false.
|
||||
//
|
||||
// assert.Falsef(t, myBool, "error message %s", "formatted")
|
||||
func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return False(t, value, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// FileExistsf checks whether a file exists in the given path. It also fails if
|
||||
// the path points to a directory or there is an error when trying to check the file.
|
||||
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return FileExists(t, path, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Greaterf asserts that the first element is greater than the second
|
||||
//
|
||||
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
|
||||
// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
|
||||
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
|
||||
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
|
||||
//
|
||||
// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
|
||||
// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
|
||||
// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
|
||||
// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
|
||||
func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPBodyContainsf asserts that a specified handler returns a
|
||||
// body that contains a string.
|
||||
//
|
||||
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPBodyNotContainsf asserts that a specified handler returns a
|
||||
// body that does not contain a string.
|
||||
//
|
||||
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPErrorf asserts that a specified handler returns an error status code.
|
||||
//
|
||||
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
|
||||
//
|
||||
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
|
||||
//
|
||||
// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// HTTPSuccessf asserts that a specified handler returns a success status code.
|
||||
//
|
||||
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Implementsf asserts that an object is implemented by the specified interface.
|
||||
//
|
||||
// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
|
||||
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InDeltaf asserts that the two numerals are within delta of each other.
|
||||
//
|
||||
// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
|
||||
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
|
||||
func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InDeltaSlicef is the same as InDelta, except it compares two slices.
|
||||
func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InEpsilonf asserts that expected and actual have a relative error less than epsilon
|
||||
func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
|
||||
func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsDecreasingf asserts that the collection is decreasing
|
||||
//
|
||||
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
|
||||
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
|
||||
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
|
||||
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsIncreasingf asserts that the collection is increasing
|
||||
//
|
||||
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
|
||||
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
|
||||
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
|
||||
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsNonDecreasingf asserts that the collection is not decreasing
|
||||
//
|
||||
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
|
||||
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
|
||||
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
|
||||
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsNonIncreasingf asserts that the collection is not increasing
|
||||
//
|
||||
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
|
||||
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
|
||||
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
|
||||
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// IsTypef asserts that the specified objects are of the same type.
|
||||
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// JSONEqf asserts that two JSON strings are equivalent.
|
||||
//
|
||||
// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
|
||||
func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Lenf asserts that the specified object has specific length.
|
||||
// Lenf also fails if the object has a type that len() not accept.
|
||||
//
|
||||
// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
|
||||
func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Len(t, object, length, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Lessf asserts that the first element is less than the second
|
||||
//
|
||||
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
|
||||
// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
|
||||
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
|
||||
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// LessOrEqualf asserts that the first element is less than or equal to the second
|
||||
//
|
||||
// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
|
||||
// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
|
||||
// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
|
||||
// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
|
||||
func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Negativef asserts that the specified element is negative
|
||||
//
|
||||
// assert.Negativef(t, -1, "error message %s", "formatted")
|
||||
// assert.Negativef(t, -1.23, "error message %s", "formatted")
|
||||
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Negative(t, e, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
|
||||
// periodically checking the target function each tick.
|
||||
//
|
||||
// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
|
||||
func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Nilf asserts that the specified object is nil.
|
||||
//
|
||||
// assert.Nilf(t, err, "error message %s", "formatted")
|
||||
func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Nil(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NoDirExistsf checks whether a directory does not exist in the given path.
|
||||
// It fails if the path points to an existing _directory_ only.
|
||||
func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NoErrorf asserts that a function returned no error (i.e. `nil`).
|
||||
//
|
||||
// actualObj, err := SomeFunction()
|
||||
// if assert.NoErrorf(t, err, "error message %s", "formatted") {
|
||||
// assert.Equal(t, expectedObj, actualObj)
|
||||
// }
|
||||
func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NoError(t, err, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NoFileExistsf checks whether a file does not exist in a given path. It fails
|
||||
// if the path points to an existing _file_ only.
|
||||
func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
|
||||
// specified substring or element.
|
||||
//
|
||||
// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
|
||||
// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
|
||||
// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
|
||||
func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
|
||||
// assert.Equal(t, "two", obj[1])
|
||||
// }
|
||||
func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotEqualf asserts that the specified values are NOT equal.
|
||||
//
|
||||
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
|
||||
//
|
||||
// Pointer variable equality is determined based on the equality of the
|
||||
// referenced values (as opposed to the memory addresses).
|
||||
func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
|
||||
//
|
||||
// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
|
||||
func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotNilf asserts that the specified object is not nil.
|
||||
//
|
||||
// assert.NotNilf(t, err, "error message %s", "formatted")
|
||||
func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotNil(t, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
|
||||
//
|
||||
// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
|
||||
func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotPanics(t, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotRegexpf asserts that a specified regexp does not match a string.
|
||||
//
|
||||
// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
|
||||
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
|
||||
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotSamef asserts that two pointers do not reference the same object.
|
||||
//
|
||||
// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
|
||||
//
|
||||
// Both arguments must be pointer variables. Pointer variable sameness is
|
||||
// determined based on the equality of both type and value.
|
||||
func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotSubsetf asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
//
|
||||
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
|
||||
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotZerof asserts that i is not the zero value for its type.
|
||||
func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotZero(t, i, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
|
||||
//
|
||||
// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
|
||||
func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Panics(t, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
|
||||
// panics, and that the recovered panic value is an error that satisfies the
|
||||
// EqualError comparison.
|
||||
//
|
||||
// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
|
||||
func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
|
||||
// the recovered panic value equals the expected panic value.
|
||||
//
|
||||
// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
|
||||
func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Positivef asserts that the specified element is positive
|
||||
//
|
||||
// assert.Positivef(t, 1, "error message %s", "formatted")
|
||||
// assert.Positivef(t, 1.23, "error message %s", "formatted")
|
||||
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Positive(t, e, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Regexpf asserts that a specified regexp matches a string.
|
||||
//
|
||||
// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
|
||||
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
|
||||
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Samef asserts that two pointers reference the same object.
|
||||
//
|
||||
// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
|
||||
//
|
||||
// Both arguments must be pointer variables. Pointer variable sameness is
|
||||
// determined based on the equality of both type and value.
|
||||
func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Subsetf asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
//
|
||||
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
|
||||
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Truef asserts that the specified value is true.
|
||||
//
|
||||
// assert.Truef(t, myBool, "error message %s", "formatted")
|
||||
func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return True(t, value, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// WithinDurationf asserts that the two times are within duration delta of each other.
|
||||
//
|
||||
// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
|
||||
func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// WithinRangef asserts that a time is within a time range (inclusive).
|
||||
//
|
||||
// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
|
||||
func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// YAMLEqf asserts that two YAML strings are equivalent.
|
||||
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Zerof asserts that i is the zero value for its type.
|
||||
func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return Zero(t, i, append([]interface{}{msg}, args...)...)
|
||||
}
|
5
vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
generated
vendored
Normal file
5
vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
{{.CommentFormat}}
|
||||
func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
|
||||
if h, ok := t.(tHelper); ok { h.Helper() }
|
||||
return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
|
||||
}
|
1598
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
Normal file
1598
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
5
vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
generated
vendored
Normal file
5
vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
{{.CommentWithoutT "a"}}
|
||||
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
|
||||
if h, ok := a.t.(tHelper); ok { h.Helper() }
|
||||
return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
|
||||
}
|
81
vendor/github.com/stretchr/testify/assert/assertion_order.go
generated
vendored
Normal file
81
vendor/github.com/stretchr/testify/assert/assertion_order.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
package assert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// isOrdered checks that collection contains orderable elements.
|
||||
func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
objKind := reflect.TypeOf(object).Kind()
|
||||
if objKind != reflect.Slice && objKind != reflect.Array {
|
||||
return false
|
||||
}
|
||||
|
||||
objValue := reflect.ValueOf(object)
|
||||
objLen := objValue.Len()
|
||||
|
||||
if objLen <= 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
value := objValue.Index(0)
|
||||
valueInterface := value.Interface()
|
||||
firstValueKind := value.Kind()
|
||||
|
||||
for i := 1; i < objLen; i++ {
|
||||
prevValue := value
|
||||
prevValueInterface := valueInterface
|
||||
|
||||
value = objValue.Index(i)
|
||||
valueInterface = value.Interface()
|
||||
|
||||
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
|
||||
|
||||
if !isComparable {
|
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
|
||||
}
|
||||
|
||||
if !containsValue(allowedComparesResults, compareResult) {
|
||||
return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// IsIncreasing asserts that the collection is increasing
|
||||
//
|
||||
// assert.IsIncreasing(t, []int{1, 2, 3})
|
||||
// assert.IsIncreasing(t, []float{1, 2})
|
||||
// assert.IsIncreasing(t, []string{"a", "b"})
|
||||
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonIncreasing asserts that the collection is not increasing
|
||||
//
|
||||
// assert.IsNonIncreasing(t, []int{2, 1, 1})
|
||||
// assert.IsNonIncreasing(t, []float{2, 1})
|
||||
// assert.IsNonIncreasing(t, []string{"b", "a"})
|
||||
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsDecreasing asserts that the collection is decreasing
|
||||
//
|
||||
// assert.IsDecreasing(t, []int{2, 1, 0})
|
||||
// assert.IsDecreasing(t, []float{2, 1})
|
||||
// assert.IsDecreasing(t, []string{"b", "a"})
|
||||
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonDecreasing asserts that the collection is not decreasing
|
||||
//
|
||||
// assert.IsNonDecreasing(t, []int{1, 1, 2})
|
||||
// assert.IsNonDecreasing(t, []float{1, 2})
|
||||
// assert.IsNonDecreasing(t, []string{"a", "b"})
|
||||
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
2054
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
Normal file
2054
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
46
vendor/github.com/stretchr/testify/assert/doc.go
generated
vendored
Normal file
46
vendor/github.com/stretchr/testify/assert/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
|
||||
//
|
||||
// # Example Usage
|
||||
//
|
||||
// The following is a complete example using assert in a standard test function:
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/stretchr/testify/assert"
|
||||
// )
|
||||
//
|
||||
// func TestSomething(t *testing.T) {
|
||||
//
|
||||
// var a string = "Hello"
|
||||
// var b string = "Hello"
|
||||
//
|
||||
// assert.Equal(t, a, b, "The two words should be the same.")
|
||||
//
|
||||
// }
|
||||
//
|
||||
// if you assert many times, use the format below:
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/stretchr/testify/assert"
|
||||
// )
|
||||
//
|
||||
// func TestSomething(t *testing.T) {
|
||||
// assert := assert.New(t)
|
||||
//
|
||||
// var a string = "Hello"
|
||||
// var b string = "Hello"
|
||||
//
|
||||
// assert.Equal(a, b, "The two words should be the same.")
|
||||
// }
|
||||
//
|
||||
// # Assertions
|
||||
//
|
||||
// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
|
||||
// All assertion functions take, as the first argument, the `*testing.T` object provided by the
|
||||
// testing framework. This allows the assertion funcs to write the failings and other details to
|
||||
// the correct place.
|
||||
//
|
||||
// Every assertion function also takes an optional string message as the final argument,
|
||||
// allowing custom error messages to be appended to the message the assertion method outputs.
|
||||
package assert
|
10
vendor/github.com/stretchr/testify/assert/errors.go
generated
vendored
Normal file
10
vendor/github.com/stretchr/testify/assert/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
package assert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// AnError is an error instance useful for testing. If the code does not care
|
||||
// about error specifics, and only needs to return the error for example, this
|
||||
// error should be used to make the test code more readable.
|
||||
var AnError = errors.New("assert.AnError general error for testing")
|
16
vendor/github.com/stretchr/testify/assert/forward_assertions.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/assert/forward_assertions.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
package assert
|
||||
|
||||
// Assertions provides assertion methods around the
|
||||
// TestingT interface.
|
||||
type Assertions struct {
|
||||
t TestingT
|
||||
}
|
||||
|
||||
// New makes a new Assertions object for the specified TestingT.
|
||||
func New(t TestingT) *Assertions {
|
||||
return &Assertions{
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
|
162
vendor/github.com/stretchr/testify/assert/http_assertions.go
generated
vendored
Normal file
162
vendor/github.com/stretchr/testify/assert/http_assertions.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
|||
package assert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// httpCode is a helper that returns HTTP code of the response. It returns -1 and
|
||||
// an error if building a new request fails.
|
||||
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
|
||||
w := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, url, nil)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
req.URL.RawQuery = values.Encode()
|
||||
handler(w, req)
|
||||
return w.Code, nil
|
||||
}
|
||||
|
||||
// HTTPSuccess asserts that a specified handler returns a success status code.
|
||||
//
|
||||
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
code, err := httpCode(handler, method, url, values)
|
||||
if err != nil {
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
|
||||
}
|
||||
|
||||
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
|
||||
if !isSuccessCode {
|
||||
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
|
||||
}
|
||||
|
||||
return isSuccessCode
|
||||
}
|
||||
|
||||
// HTTPRedirect asserts that a specified handler returns a redirect status code.
|
||||
//
|
||||
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
code, err := httpCode(handler, method, url, values)
|
||||
if err != nil {
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
|
||||
}
|
||||
|
||||
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
|
||||
if !isRedirectCode {
|
||||
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
|
||||
}
|
||||
|
||||
return isRedirectCode
|
||||
}
|
||||
|
||||
// HTTPError asserts that a specified handler returns an error status code.
|
||||
//
|
||||
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
code, err := httpCode(handler, method, url, values)
|
||||
if err != nil {
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
|
||||
}
|
||||
|
||||
isErrorCode := code >= http.StatusBadRequest
|
||||
if !isErrorCode {
|
||||
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
|
||||
}
|
||||
|
||||
return isErrorCode
|
||||
}
|
||||
|
||||
// HTTPStatusCode asserts that a specified handler returns a specified status code.
|
||||
//
|
||||
// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
code, err := httpCode(handler, method, url, values)
|
||||
if err != nil {
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
|
||||
}
|
||||
|
||||
successful := code == statuscode
|
||||
if !successful {
|
||||
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
|
||||
}
|
||||
|
||||
return successful
|
||||
}
|
||||
|
||||
// HTTPBody is a helper that returns HTTP body of the response. It returns
|
||||
// empty string if building a new request fails.
|
||||
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
|
||||
w := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
handler(w, req)
|
||||
return w.Body.String()
|
||||
}
|
||||
|
||||
// HTTPBodyContains asserts that a specified handler returns a
|
||||
// body that contains a string.
|
||||
//
|
||||
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
body := HTTPBody(handler, method, url, values)
|
||||
|
||||
contains := strings.Contains(body, fmt.Sprint(str))
|
||||
if !contains {
|
||||
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
|
||||
}
|
||||
|
||||
return contains
|
||||
}
|
||||
|
||||
// HTTPBodyNotContains asserts that a specified handler returns a
|
||||
// body that does not contain a string.
|
||||
//
|
||||
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
|
||||
//
|
||||
// Returns whether the assertion was successful (true) or not (false).
|
||||
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
body := HTTPBody(handler, method, url, values)
|
||||
|
||||
contains := strings.Contains(body, fmt.Sprint(str))
|
||||
if contains {
|
||||
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
|
||||
}
|
||||
|
||||
return !contains
|
||||
}
|
29
vendor/github.com/stretchr/testify/require/doc.go
generated
vendored
Normal file
29
vendor/github.com/stretchr/testify/require/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Package require implements the same assertions as the `assert` package but
|
||||
// stops test execution when a test fails.
|
||||
//
|
||||
// # Example Usage
|
||||
//
|
||||
// The following is a complete example using require in a standard test function:
|
||||
//
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/stretchr/testify/require"
|
||||
// )
|
||||
//
|
||||
// func TestSomething(t *testing.T) {
|
||||
//
|
||||
// var a string = "Hello"
|
||||
// var b string = "Hello"
|
||||
//
|
||||
// require.Equal(t, a, b, "The two words should be the same.")
|
||||
//
|
||||
// }
|
||||
//
|
||||
// # Assertions
|
||||
//
|
||||
// The `require` package have same global functions as in the `assert` package,
|
||||
// but instead of returning a boolean result they call `t.FailNow()`.
|
||||
//
|
||||
// Every assertion function also takes an optional string message as the final argument,
|
||||
// allowing custom error messages to be appended to the message the assertion method outputs.
|
||||
package require
|
16
vendor/github.com/stretchr/testify/require/forward_requirements.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/require/forward_requirements.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
package require
|
||||
|
||||
// Assertions provides assertion methods around the
|
||||
// TestingT interface.
|
||||
type Assertions struct {
|
||||
t TestingT
|
||||
}
|
||||
|
||||
// New makes a new Assertions object for the specified TestingT.
|
||||
func New(t TestingT) *Assertions {
|
||||
return &Assertions{
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs"
|
2031
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
Normal file
2031
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
6
vendor/github.com/stretchr/testify/require/require.go.tmpl
generated
vendored
Normal file
6
vendor/github.com/stretchr/testify/require/require.go.tmpl
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
{{.Comment}}
|
||||
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
|
||||
if h, ok := t.(tHelper); ok { h.Helper() }
|
||||
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
|
||||
t.FailNow()
|
||||
}
|
1599
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
Normal file
1599
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
5
vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
generated
vendored
Normal file
5
vendor/github.com/stretchr/testify/require/require_forward.go.tmpl
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
{{.CommentWithoutT "a"}}
|
||||
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) {
|
||||
if h, ok := a.t.(tHelper); ok { h.Helper() }
|
||||
{{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
|
||||
}
|
29
vendor/github.com/stretchr/testify/require/requirements.go
generated
vendored
Normal file
29
vendor/github.com/stretchr/testify/require/requirements.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package require
|
||||
|
||||
// TestingT is an interface wrapper around *testing.T
|
||||
type TestingT interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
FailNow()
|
||||
}
|
||||
|
||||
type tHelper interface {
|
||||
Helper()
|
||||
}
|
||||
|
||||
// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
|
||||
// for table driven tests.
|
||||
type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{})
|
||||
|
||||
// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
|
||||
// for table driven tests.
|
||||
type ValueAssertionFunc func(TestingT, interface{}, ...interface{})
|
||||
|
||||
// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
|
||||
// for table driven tests.
|
||||
type BoolAssertionFunc func(TestingT, bool, ...interface{})
|
||||
|
||||
// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
|
||||
// for table driven tests.
|
||||
type ErrorAssertionFunc func(TestingT, error, ...interface{})
|
||||
|
||||
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs"
|
66
vendor/github.com/stretchr/testify/suite/doc.go
generated
vendored
Normal file
66
vendor/github.com/stretchr/testify/suite/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Package suite contains logic for creating testing suite structs
|
||||
// and running the methods on those structs as tests. The most useful
|
||||
// piece of this package is that you can create setup/teardown methods
|
||||
// on your testing suites, which will run before/after the whole suite
|
||||
// or individual tests (depending on which interface(s) you
|
||||
// implement).
|
||||
//
|
||||
// A testing suite is usually built by first extending the built-in
|
||||
// suite functionality from suite.Suite in testify. Alternatively,
|
||||
// you could reproduce that logic on your own if you wanted (you
|
||||
// just need to implement the TestingSuite interface from
|
||||
// suite/interfaces.go).
|
||||
//
|
||||
// After that, you can implement any of the interfaces in
|
||||
// suite/interfaces.go to add setup/teardown functionality to your
|
||||
// suite, and add any methods that start with "Test" to add tests.
|
||||
// Methods that do not match any suite interfaces and do not begin
|
||||
// with "Test" will not be run by testify, and can safely be used as
|
||||
// helper methods.
|
||||
//
|
||||
// Once you've built your testing suite, you need to run the suite
|
||||
// (using suite.Run from testify) inside any function that matches the
|
||||
// identity that "go test" is already looking for (i.e.
|
||||
// func(*testing.T)).
|
||||
//
|
||||
// Regular expression to select test suites specified command-line
|
||||
// argument "-run". Regular expression to select the methods
|
||||
// of test suites specified command-line argument "-m".
|
||||
// Suite object has assertion methods.
|
||||
//
|
||||
// A crude example:
|
||||
//
|
||||
// // Basic imports
|
||||
// import (
|
||||
// "testing"
|
||||
// "github.com/stretchr/testify/assert"
|
||||
// "github.com/stretchr/testify/suite"
|
||||
// )
|
||||
//
|
||||
// // Define the suite, and absorb the built-in basic suite
|
||||
// // functionality from testify - including a T() method which
|
||||
// // returns the current testing context
|
||||
// type ExampleTestSuite struct {
|
||||
// suite.Suite
|
||||
// VariableThatShouldStartAtFive int
|
||||
// }
|
||||
//
|
||||
// // Make sure that VariableThatShouldStartAtFive is set to five
|
||||
// // before each test
|
||||
// func (suite *ExampleTestSuite) SetupTest() {
|
||||
// suite.VariableThatShouldStartAtFive = 5
|
||||
// }
|
||||
//
|
||||
// // All methods that begin with "Test" are run as tests within a
|
||||
// // suite.
|
||||
// func (suite *ExampleTestSuite) TestExample() {
|
||||
// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive)
|
||||
// suite.Equal(5, suite.VariableThatShouldStartAtFive)
|
||||
// }
|
||||
//
|
||||
// // In order for 'go test' to run this suite, we need to create
|
||||
// // a normal test function and pass our suite to suite.Run
|
||||
// func TestExampleTestSuite(t *testing.T) {
|
||||
// suite.Run(t, new(ExampleTestSuite))
|
||||
// }
|
||||
package suite
|
66
vendor/github.com/stretchr/testify/suite/interfaces.go
generated
vendored
Normal file
66
vendor/github.com/stretchr/testify/suite/interfaces.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
package suite
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestingSuite can store and return the current *testing.T context
|
||||
// generated by 'go test'.
|
||||
type TestingSuite interface {
|
||||
T() *testing.T
|
||||
SetT(*testing.T)
|
||||
SetS(suite TestingSuite)
|
||||
}
|
||||
|
||||
// SetupAllSuite has a SetupSuite method, which will run before the
|
||||
// tests in the suite are run.
|
||||
type SetupAllSuite interface {
|
||||
SetupSuite()
|
||||
}
|
||||
|
||||
// SetupTestSuite has a SetupTest method, which will run before each
|
||||
// test in the suite.
|
||||
type SetupTestSuite interface {
|
||||
SetupTest()
|
||||
}
|
||||
|
||||
// TearDownAllSuite has a TearDownSuite method, which will run after
|
||||
// all the tests in the suite have been run.
|
||||
type TearDownAllSuite interface {
|
||||
TearDownSuite()
|
||||
}
|
||||
|
||||
// TearDownTestSuite has a TearDownTest method, which will run after
|
||||
// each test in the suite.
|
||||
type TearDownTestSuite interface {
|
||||
TearDownTest()
|
||||
}
|
||||
|
||||
// BeforeTest has a function to be executed right before the test
|
||||
// starts and receives the suite and test names as input
|
||||
type BeforeTest interface {
|
||||
BeforeTest(suiteName, testName string)
|
||||
}
|
||||
|
||||
// AfterTest has a function to be executed right after the test
|
||||
// finishes and receives the suite and test names as input
|
||||
type AfterTest interface {
|
||||
AfterTest(suiteName, testName string)
|
||||
}
|
||||
|
||||
// WithStats implements HandleStats, a function that will be executed
|
||||
// when a test suite is finished. The stats contain information about
|
||||
// the execution of that suite and its tests.
|
||||
type WithStats interface {
|
||||
HandleStats(suiteName string, stats *SuiteInformation)
|
||||
}
|
||||
|
||||
// SetupSubTest has a SetupSubTest method, which will run before each
|
||||
// subtest in the suite.
|
||||
type SetupSubTest interface {
|
||||
SetupSubTest()
|
||||
}
|
||||
|
||||
// TearDownSubTest has a TearDownSubTest method, which will run after
|
||||
// each subtest in the suite have been run.
|
||||
type TearDownSubTest interface {
|
||||
TearDownSubTest()
|
||||
}
|
46
vendor/github.com/stretchr/testify/suite/stats.go
generated
vendored
Normal file
46
vendor/github.com/stretchr/testify/suite/stats.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package suite
|
||||
|
||||
import "time"
|
||||
|
||||
// SuiteInformation stats stores stats for the whole suite execution.
|
||||
type SuiteInformation struct {
|
||||
Start, End time.Time
|
||||
TestStats map[string]*TestInformation
|
||||
}
|
||||
|
||||
// TestInformation stores information about the execution of each test.
|
||||
type TestInformation struct {
|
||||
TestName string
|
||||
Start, End time.Time
|
||||
Passed bool
|
||||
}
|
||||
|
||||
func newSuiteInformation() *SuiteInformation {
|
||||
testStats := make(map[string]*TestInformation)
|
||||
|
||||
return &SuiteInformation{
|
||||
TestStats: testStats,
|
||||
}
|
||||
}
|
||||
|
||||
func (s SuiteInformation) start(testName string) {
|
||||
s.TestStats[testName] = &TestInformation{
|
||||
TestName: testName,
|
||||
Start: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s SuiteInformation) end(testName string, passed bool) {
|
||||
s.TestStats[testName].End = time.Now()
|
||||
s.TestStats[testName].Passed = passed
|
||||
}
|
||||
|
||||
func (s SuiteInformation) Passed() bool {
|
||||
for _, stats := range s.TestStats {
|
||||
if !stats.Passed {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
248
vendor/github.com/stretchr/testify/suite/suite.go
generated
vendored
Normal file
248
vendor/github.com/stretchr/testify/suite/suite.go
generated
vendored
Normal file
|
@ -0,0 +1,248 @@
|
|||
package suite
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var allTestsFilter = func(_, _ string) (bool, error) { return true, nil }
|
||||
var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run")
|
||||
|
||||
// Suite is a basic testing suite with methods for storing and
|
||||
// retrieving the current *testing.T context.
|
||||
type Suite struct {
|
||||
*assert.Assertions
|
||||
|
||||
mu sync.RWMutex
|
||||
require *require.Assertions
|
||||
t *testing.T
|
||||
|
||||
// Parent suite to have access to the implemented methods of parent struct
|
||||
s TestingSuite
|
||||
}
|
||||
|
||||
// T retrieves the current *testing.T context.
|
||||
func (suite *Suite) T() *testing.T {
|
||||
suite.mu.RLock()
|
||||
defer suite.mu.RUnlock()
|
||||
return suite.t
|
||||
}
|
||||
|
||||
// SetT sets the current *testing.T context.
|
||||
func (suite *Suite) SetT(t *testing.T) {
|
||||
suite.mu.Lock()
|
||||
defer suite.mu.Unlock()
|
||||
suite.t = t
|
||||
suite.Assertions = assert.New(t)
|
||||
suite.require = require.New(t)
|
||||
}
|
||||
|
||||
// SetS needs to set the current test suite as parent
|
||||
// to get access to the parent methods
|
||||
func (suite *Suite) SetS(s TestingSuite) {
|
||||
suite.s = s
|
||||
}
|
||||
|
||||
// Require returns a require context for suite.
|
||||
func (suite *Suite) Require() *require.Assertions {
|
||||
suite.mu.Lock()
|
||||
defer suite.mu.Unlock()
|
||||
if suite.require == nil {
|
||||
suite.require = require.New(suite.T())
|
||||
}
|
||||
return suite.require
|
||||
}
|
||||
|
||||
// Assert returns an assert context for suite. Normally, you can call
|
||||
// `suite.NoError(expected, actual)`, but for situations where the embedded
|
||||
// methods are overridden (for example, you might want to override
|
||||
// assert.Assertions with require.Assertions), this method is provided so you
|
||||
// can call `suite.Assert().NoError()`.
|
||||
func (suite *Suite) Assert() *assert.Assertions {
|
||||
suite.mu.Lock()
|
||||
defer suite.mu.Unlock()
|
||||
if suite.Assertions == nil {
|
||||
suite.Assertions = assert.New(suite.T())
|
||||
}
|
||||
return suite.Assertions
|
||||
}
|
||||
|
||||
func recoverAndFailOnPanic(t *testing.T) {
|
||||
r := recover()
|
||||
failOnPanic(t, r)
|
||||
}
|
||||
|
||||
func failOnPanic(t *testing.T, r interface{}) {
|
||||
if r != nil {
|
||||
t.Errorf("test panicked: %v\n%s", r, debug.Stack())
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// Run provides suite functionality around golang subtests. It should be
|
||||
// called in place of t.Run(name, func(t *testing.T)) in test suite code.
|
||||
// The passed-in func will be executed as a subtest with a fresh instance of t.
|
||||
// Provides compatibility with go test pkg -run TestSuite/TestName/SubTestName.
|
||||
func (suite *Suite) Run(name string, subtest func()) bool {
|
||||
oldT := suite.T()
|
||||
|
||||
if setupSubTest, ok := suite.s.(SetupSubTest); ok {
|
||||
setupSubTest.SetupSubTest()
|
||||
}
|
||||
|
||||
defer func() {
|
||||
suite.SetT(oldT)
|
||||
if tearDownSubTest, ok := suite.s.(TearDownSubTest); ok {
|
||||
tearDownSubTest.TearDownSubTest()
|
||||
}
|
||||
}()
|
||||
|
||||
return oldT.Run(name, func(t *testing.T) {
|
||||
suite.SetT(t)
|
||||
subtest()
|
||||
})
|
||||
}
|
||||
|
||||
// Run takes a testing suite and runs all of the tests attached
|
||||
// to it.
|
||||
func Run(t *testing.T, suite TestingSuite) {
|
||||
defer recoverAndFailOnPanic(t)
|
||||
|
||||
suite.SetT(t)
|
||||
suite.SetS(suite)
|
||||
|
||||
var suiteSetupDone bool
|
||||
|
||||
var stats *SuiteInformation
|
||||
if _, ok := suite.(WithStats); ok {
|
||||
stats = newSuiteInformation()
|
||||
}
|
||||
|
||||
tests := []testing.InternalTest{}
|
||||
methodFinder := reflect.TypeOf(suite)
|
||||
suiteName := methodFinder.Elem().Name()
|
||||
|
||||
for i := 0; i < methodFinder.NumMethod(); i++ {
|
||||
method := methodFinder.Method(i)
|
||||
|
||||
ok, err := methodFilter(method.Name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if !suiteSetupDone {
|
||||
if stats != nil {
|
||||
stats.Start = time.Now()
|
||||
}
|
||||
|
||||
if setupAllSuite, ok := suite.(SetupAllSuite); ok {
|
||||
setupAllSuite.SetupSuite()
|
||||
}
|
||||
|
||||
suiteSetupDone = true
|
||||
}
|
||||
|
||||
test := testing.InternalTest{
|
||||
Name: method.Name,
|
||||
F: func(t *testing.T) {
|
||||
parentT := suite.T()
|
||||
suite.SetT(t)
|
||||
defer recoverAndFailOnPanic(t)
|
||||
defer func() {
|
||||
r := recover()
|
||||
|
||||
if stats != nil {
|
||||
passed := !t.Failed() && r == nil
|
||||
stats.end(method.Name, passed)
|
||||
}
|
||||
|
||||
if afterTestSuite, ok := suite.(AfterTest); ok {
|
||||
afterTestSuite.AfterTest(suiteName, method.Name)
|
||||
}
|
||||
|
||||
if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok {
|
||||
tearDownTestSuite.TearDownTest()
|
||||
}
|
||||
|
||||
suite.SetT(parentT)
|
||||
failOnPanic(t, r)
|
||||
}()
|
||||
|
||||
if setupTestSuite, ok := suite.(SetupTestSuite); ok {
|
||||
setupTestSuite.SetupTest()
|
||||
}
|
||||
if beforeTestSuite, ok := suite.(BeforeTest); ok {
|
||||
beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name)
|
||||
}
|
||||
|
||||
if stats != nil {
|
||||
stats.start(method.Name)
|
||||
}
|
||||
|
||||
method.Func.Call([]reflect.Value{reflect.ValueOf(suite)})
|
||||
},
|
||||
}
|
||||
tests = append(tests, test)
|
||||
}
|
||||
if suiteSetupDone {
|
||||
defer func() {
|
||||
if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok {
|
||||
tearDownAllSuite.TearDownSuite()
|
||||
}
|
||||
|
||||
if suiteWithStats, measureStats := suite.(WithStats); measureStats {
|
||||
stats.End = time.Now()
|
||||
suiteWithStats.HandleStats(suiteName, stats)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
runTests(t, tests)
|
||||
}
|
||||
|
||||
// Filtering method according to set regular expression
|
||||
// specified command-line argument -m
|
||||
func methodFilter(name string) (bool, error) {
|
||||
if ok, _ := regexp.MatchString("^Test", name); !ok {
|
||||
return false, nil
|
||||
}
|
||||
return regexp.MatchString(*matchMethod, name)
|
||||
}
|
||||
|
||||
func runTests(t testing.TB, tests []testing.InternalTest) {
|
||||
if len(tests) == 0 {
|
||||
t.Log("warning: no tests to run")
|
||||
return
|
||||
}
|
||||
|
||||
r, ok := t.(runner)
|
||||
if !ok { // backwards compatibility with Go 1.6 and below
|
||||
if !testing.RunTests(allTestsFilter, tests) {
|
||||
t.Fail()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
r.Run(test.Name, test.F)
|
||||
}
|
||||
}
|
||||
|
||||
type runner interface {
|
||||
Run(name string, f func(t *testing.T)) bool
|
||||
}
|
50
vendor/gopkg.in/yaml.v3/LICENSE
generated
vendored
Normal file
50
vendor/gopkg.in/yaml.v3/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
|
||||
This project is covered by two different licenses: MIT and Apache.
|
||||
|
||||
#### MIT License ####
|
||||
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original MIT license, with the additional
|
||||
copyright staring in 2011 when the project was ported over:
|
||||
|
||||
apic.go emitterc.go parserc.go readerc.go scannerc.go
|
||||
writerc.go yamlh.go yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006-2010 Kirill Simonov
|
||||
Copyright (c) 2006-2011 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
### Apache License ###
|
||||
|
||||
All the remaining project files are covered by the Apache license:
|
||||
|
||||
Copyright (c) 2011-2019 Canonical Ltd
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
13
vendor/gopkg.in/yaml.v3/NOTICE
generated
vendored
Normal file
13
vendor/gopkg.in/yaml.v3/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
150
vendor/gopkg.in/yaml.v3/README.md
generated
vendored
Normal file
150
vendor/gopkg.in/yaml.v3/README.md
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
# YAML support for the Go language
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||
C library to parse and generate YAML data quickly and reliably.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
The yaml package supports most of YAML 1.2, but preserves some behavior
|
||||
from 1.1 for backwards compatibility.
|
||||
|
||||
Specifically, as of v3 of the yaml package:
|
||||
|
||||
- YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
|
||||
decoded into a typed bool value. Otherwise they behave as a string. Booleans
|
||||
in YAML 1.2 are _true/false_ only.
|
||||
- Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
|
||||
as specified in YAML 1.2, because most parsers still use the old format.
|
||||
Octals in the _0o777_ format are supported though, so new files work.
|
||||
- Does not support base-60 floats. These are gone from YAML 1.2, and were
|
||||
actually never supported by this package as it's clearly a poor choice.
|
||||
|
||||
and offers backwards
|
||||
compatibility with YAML 1.1 in some cases.
|
||||
1.2, including support for
|
||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||
supported since they're a poor design and are gone in YAML 1.2.
|
||||
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
The import path for the package is *gopkg.in/yaml.v3*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
go get gopkg.in/yaml.v3
|
||||
|
||||
API documentation
|
||||
-----------------
|
||||
|
||||
If opened in a browser, the import path itself leads to the API documentation:
|
||||
|
||||
- [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
|
||||
|
||||
API stability
|
||||
-------------
|
||||
|
||||
The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The yaml package is licensed under the MIT and Apache License 2.0 licenses.
|
||||
Please see the LICENSE file for details.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
747
vendor/gopkg.in/yaml.v3/apic.go
generated
vendored
Normal file
747
vendor/gopkg.in/yaml.v3/apic.go
generated
vendored
Normal file
|
@ -0,0 +1,747 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||
|
||||
// Check if we can move the queue at the beginning of the buffer.
|
||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||
}
|
||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||
parser.tokens_head = 0
|
||||
}
|
||||
parser.tokens = append(parser.tokens, *token)
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||
parser.tokens[parser.tokens_head+pos] = *token
|
||||
}
|
||||
|
||||
// Create a new parser object.
|
||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||
*parser = yaml_parser_t{
|
||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||
buffer: make([]byte, 0, input_buffer_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy a parser object.
|
||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||
*parser = yaml_parser_t{}
|
||||
}
|
||||
|
||||
// String read handler.
|
||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
if parser.input_pos == len(parser.input) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(buffer, parser.input[parser.input_pos:])
|
||||
parser.input_pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Reader read handler.
|
||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_reader.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_string_read_handler
|
||||
parser.input = input
|
||||
parser.input_pos = 0
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_reader_read_handler
|
||||
parser.input_reader = r
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
if parser.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the encoding only once")
|
||||
}
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
best_width: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{}
|
||||
}
|
||||
|
||||
// String write handler.
|
||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
||||
// emitted text.
|
||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_writer.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a string output.
|
||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_string_write_handler
|
||||
emitter.output_buffer = output_buffer
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_writer_write_handler
|
||||
emitter.output_writer = w
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||
if emitter.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the output encoding only once")
|
||||
}
|
||||
emitter.encoding = encoding
|
||||
}
|
||||
|
||||
// Set the canonical output style.
|
||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||
emitter.canonical = canonical
|
||||
}
|
||||
|
||||
// Set the indentation increment.
|
||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||
if indent < 2 || indent > 9 {
|
||||
indent = 2
|
||||
}
|
||||
emitter.best_indent = indent
|
||||
}
|
||||
|
||||
// Set the preferred line width.
|
||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||
if width < 0 {
|
||||
width = -1
|
||||
}
|
||||
emitter.best_width = width
|
||||
}
|
||||
|
||||
// Set if unescaped non-ASCII characters are allowed.
|
||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||
emitter.unicode = unicode
|
||||
}
|
||||
|
||||
// Set the preferred line break character.
|
||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
emitter.line_break = line_break
|
||||
}
|
||||
|
||||
///*
|
||||
// * Destroy a token object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_token_delete(yaml_token_t *token)
|
||||
//{
|
||||
// assert(token); // Non-NULL token object expected.
|
||||
//
|
||||
// switch (token.type)
|
||||
// {
|
||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||
// yaml_free(token.data.tag_directive.handle);
|
||||
// yaml_free(token.data.tag_directive.prefix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ALIAS_TOKEN:
|
||||
// yaml_free(token.data.alias.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ANCHOR_TOKEN:
|
||||
// yaml_free(token.data.anchor.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_TAG_TOKEN:
|
||||
// yaml_free(token.data.tag.handle);
|
||||
// yaml_free(token.data.tag.suffix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_SCALAR_TOKEN:
|
||||
// yaml_free(token.data.scalar.value);
|
||||
// break;
|
||||
//
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// memset(token, 0, sizeof(yaml_token_t));
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Check if a string is a valid UTF-8 sequence.
|
||||
// *
|
||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||
// */
|
||||
//
|
||||
//static int
|
||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||
//{
|
||||
// yaml_char_t *end = start+length;
|
||||
// yaml_char_t *pointer = start;
|
||||
//
|
||||
// while (pointer < end) {
|
||||
// unsigned char octet;
|
||||
// unsigned int width;
|
||||
// unsigned int value;
|
||||
// size_t k;
|
||||
//
|
||||
// octet = pointer[0];
|
||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||
// if (!width) return 0;
|
||||
// if (pointer+width > end) return 0;
|
||||
// for (k = 1; k < width; k ++) {
|
||||
// octet = pointer[k];
|
||||
// if ((octet & 0xC0) != 0x80) return 0;
|
||||
// value = (value << 6) + (octet & 0x3F);
|
||||
// }
|
||||
// if (!((width == 1) ||
|
||||
// (width == 2 && value >= 0x80) ||
|
||||
// (width == 3 && value >= 0x800) ||
|
||||
// (width == 4 && value >= 0x10000))) return 0;
|
||||
//
|
||||
// pointer += width;
|
||||
// }
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(
|
||||
event *yaml_event_t,
|
||||
version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t,
|
||||
implicit bool,
|
||||
) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create ALIAS.
|
||||
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_ALIAS_EVENT,
|
||||
anchor: anchor,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SCALAR.
|
||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SCALAR_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
value: value,
|
||||
implicit: plain_implicit,
|
||||
quoted_implicit: quoted_implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-START.
|
||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-END.
|
||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
func yaml_event_delete(event *yaml_event_t) {
|
||||
*event = yaml_event_t{}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_initialize(document *yaml_document_t,
|
||||
// version_directive *yaml_version_directive_t,
|
||||
// tag_directives_start *yaml_tag_directive_t,
|
||||
// tag_directives_end *yaml_tag_directive_t,
|
||||
// start_implicit int, end_implicit int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// struct {
|
||||
// start *yaml_node_t
|
||||
// end *yaml_node_t
|
||||
// top *yaml_node_t
|
||||
// } nodes = { NULL, NULL, NULL }
|
||||
// version_directive_copy *yaml_version_directive_t = NULL
|
||||
// struct {
|
||||
// start *yaml_tag_directive_t
|
||||
// end *yaml_tag_directive_t
|
||||
// top *yaml_tag_directive_t
|
||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||
// value yaml_tag_directive_t = { NULL, NULL }
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert((tag_directives_start && tag_directives_end) ||
|
||||
// (tag_directives_start == tag_directives_end))
|
||||
// // Valid tag directives are expected.
|
||||
//
|
||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// if (version_directive) {
|
||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||
// if (!version_directive_copy) goto error
|
||||
// version_directive_copy.major = version_directive.major
|
||||
// version_directive_copy.minor = version_directive.minor
|
||||
// }
|
||||
//
|
||||
// if (tag_directives_start != tag_directives_end) {
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||
// goto error
|
||||
// for (tag_directive = tag_directives_start
|
||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||
// assert(tag_directive.handle)
|
||||
// assert(tag_directive.prefix)
|
||||
// if (!yaml_check_utf8(tag_directive.handle,
|
||||
// strlen((char *)tag_directive.handle)))
|
||||
// goto error
|
||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||
// strlen((char *)tag_directive.prefix)))
|
||||
// goto error
|
||||
// value.handle = yaml_strdup(tag_directive.handle)
|
||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||
// if (!value.handle || !value.prefix) goto error
|
||||
// if (!PUSH(&context, tag_directives_copy, value))
|
||||
// goto error
|
||||
// value.handle = NULL
|
||||
// value.prefix = NULL
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||
// tag_directives_copy.start, tag_directives_copy.top,
|
||||
// start_implicit, end_implicit, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, nodes)
|
||||
// yaml_free(version_directive_copy)
|
||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
// }
|
||||
// STACK_DEL(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Destroy a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_document_delete(document *yaml_document_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||
// node yaml_node_t = POP(&context, document.nodes)
|
||||
// yaml_free(node.tag)
|
||||
// switch (node.type) {
|
||||
// case YAML_SCALAR_NODE:
|
||||
// yaml_free(node.data.scalar.value)
|
||||
// break
|
||||
// case YAML_SEQUENCE_NODE:
|
||||
// STACK_DEL(&context, node.data.sequence.items)
|
||||
// break
|
||||
// case YAML_MAPPING_NODE:
|
||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||
// break
|
||||
// default:
|
||||
// assert(0) // Should not happen.
|
||||
// }
|
||||
// }
|
||||
// STACK_DEL(&context, document.nodes)
|
||||
//
|
||||
// yaml_free(document.version_directive)
|
||||
// for (tag_directive = document.tag_directives.start
|
||||
// tag_directive != document.tag_directives.end
|
||||
// tag_directive++) {
|
||||
// yaml_free(tag_directive.handle)
|
||||
// yaml_free(tag_directive.prefix)
|
||||
// }
|
||||
// yaml_free(document.tag_directives.start)
|
||||
//
|
||||
// memset(document, 0, sizeof(yaml_document_t))
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get a document node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||
// return document.nodes.start + index - 1
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get the root object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_root_node(document *yaml_document_t)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (document.nodes.top != document.nodes.start) {
|
||||
// return document.nodes.start
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a scalar node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_scalar(document *yaml_document_t,
|
||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||
// style yaml_scalar_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// value_copy *yaml_char_t = NULL
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert(value) // Non-NULL value is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (length < 0) {
|
||||
// length = strlen((char *)value)
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(value, length)) goto error
|
||||
// value_copy = yaml_malloc(length+1)
|
||||
// if (!value_copy) goto error
|
||||
// memcpy(value_copy, value, length)
|
||||
// value_copy[length] = '\0'
|
||||
//
|
||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// yaml_free(tag_copy)
|
||||
// yaml_free(value_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a sequence node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_sequence(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_item_t
|
||||
// end *yaml_node_item_t
|
||||
// top *yaml_node_item_t
|
||||
// } items = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, items)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a mapping node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_mapping(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_pair_t
|
||||
// end *yaml_node_pair_t
|
||||
// top *yaml_node_pair_t
|
||||
// } pairs = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, pairs)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append an item to a sequence node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||
// sequence int, item int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(sequence > 0
|
||||
// && document.nodes.start + sequence <= document.nodes.top)
|
||||
// // Valid sequence id is required.
|
||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||
// // A sequence node is required.
|
||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||
// // Valid item id is required.
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append a pair of a key and a value to a mapping node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||
// mapping int, key int, value int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// pair yaml_node_pair_t
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(mapping > 0
|
||||
// && document.nodes.start + mapping <= document.nodes.top)
|
||||
// // Valid mapping id is required.
|
||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||
// // A mapping node is required.
|
||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||
// // Valid key id is required.
|
||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||
// // Valid value id is required.
|
||||
//
|
||||
// pair.key = key
|
||||
// pair.value = value
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
//
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue