moby/libnetwork/agent.pb.go

1169 lines
30 KiB
Go
Raw Permalink Normal View History

// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: agent.proto
package libnetwork
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type PortConfig_Protocol int32
const (
ProtocolTCP PortConfig_Protocol = 0
ProtocolUDP PortConfig_Protocol = 1
ProtocolSCTP PortConfig_Protocol = 2
)
var PortConfig_Protocol_name = map[int32]string{
0: "TCP",
1: "UDP",
2: "SCTP",
}
var PortConfig_Protocol_value = map[string]int32{
"TCP": 0,
"UDP": 1,
"SCTP": 2,
}
func (x PortConfig_Protocol) String() string {
return proto.EnumName(PortConfig_Protocol_name, int32(x))
}
func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_56ede974c0020f77, []int{1, 0}
}
// EndpointRecord specifies all the endpoint specific information that
// needs to gossiped to nodes participating in the network.
type EndpointRecord struct {
// Name of the container
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Service name of the service to which this endpoint belongs.
ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
// Service ID of the service to which this endpoint belongs.
ServiceID string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Virtual IP of the service to which this endpoint belongs.
VirtualIP string `protobuf:"bytes,4,opt,name=virtual_ip,json=virtualIp,proto3" json:"virtual_ip,omitempty"`
// IP assigned to this endpoint.
EndpointIP string `protobuf:"bytes,5,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
// IngressPorts exposed by the service to which this endpoint belongs.
IngressPorts []*PortConfig `protobuf:"bytes,6,rep,name=ingress_ports,json=ingressPorts,proto3" json:"ingress_ports,omitempty"`
// A list of aliases which are alternate names for the service
Aliases []string `protobuf:"bytes,7,rep,name=aliases,proto3" json:"aliases,omitempty"`
// List of aliases task specific aliases
TaskAliases []string `protobuf:"bytes,8,rep,name=task_aliases,json=taskAliases,proto3" json:"task_aliases,omitempty"`
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
// Whether this enpoint's service has been disabled
ServiceDisabled bool `protobuf:"varint,9,opt,name=service_disabled,json=serviceDisabled,proto3" json:"service_disabled,omitempty"`
}
func (m *EndpointRecord) Reset() { *m = EndpointRecord{} }
func (*EndpointRecord) ProtoMessage() {}
func (*EndpointRecord) Descriptor() ([]byte, []int) {
return fileDescriptor_56ede974c0020f77, []int{0}
}
func (m *EndpointRecord) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EndpointRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_EndpointRecord.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *EndpointRecord) XXX_Merge(src proto.Message) {
xxx_messageInfo_EndpointRecord.Merge(m, src)
}
func (m *EndpointRecord) XXX_Size() int {
return m.Size()
}
func (m *EndpointRecord) XXX_DiscardUnknown() {
xxx_messageInfo_EndpointRecord.DiscardUnknown(m)
}
var xxx_messageInfo_EndpointRecord proto.InternalMessageInfo
func (m *EndpointRecord) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *EndpointRecord) GetServiceName() string {
if m != nil {
return m.ServiceName
}
return ""
}
func (m *EndpointRecord) GetServiceID() string {
if m != nil {
return m.ServiceID
}
return ""
}
func (m *EndpointRecord) GetVirtualIP() string {
if m != nil {
return m.VirtualIP
}
return ""
}
func (m *EndpointRecord) GetEndpointIP() string {
if m != nil {
return m.EndpointIP
}
return ""
}
func (m *EndpointRecord) GetIngressPorts() []*PortConfig {
if m != nil {
return m.IngressPorts
}
return nil
}
func (m *EndpointRecord) GetAliases() []string {
if m != nil {
return m.Aliases
}
return nil
}
func (m *EndpointRecord) GetTaskAliases() []string {
if m != nil {
return m.TaskAliases
}
return nil
}
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
func (m *EndpointRecord) GetServiceDisabled() bool {
if m != nil {
return m.ServiceDisabled
}
return false
}
// PortConfig specifies an exposed port which can be
// addressed using the given name. This can be later queried
// using a service discovery api or a DNS SRV query. The node
// port specifies a port that can be used to address this
// service external to the cluster by sending a connection
// request to this port to any node on the cluster.
type PortConfig struct {
// Name for the port. If provided the port information can
// be queried using the name as in a DNS SRV query.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Protocol for the port which is exposed.
Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=libnetwork.PortConfig_Protocol" json:"protocol,omitempty"`
// The port which the application is exposing and is bound to.
TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"`
// PublishedPort specifies the port on which the service is
// exposed on all nodes on the cluster. If not specified an
// arbitrary port in the node port range is allocated by the
// system. If specified it should be within the node port
// range and it should be available.
PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"`
}
func (m *PortConfig) Reset() { *m = PortConfig{} }
func (*PortConfig) ProtoMessage() {}
func (*PortConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_56ede974c0020f77, []int{1}
}
func (m *PortConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PortConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_PortConfig.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *PortConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_PortConfig.Merge(m, src)
}
func (m *PortConfig) XXX_Size() int {
return m.Size()
}
func (m *PortConfig) XXX_DiscardUnknown() {
xxx_messageInfo_PortConfig.DiscardUnknown(m)
}
var xxx_messageInfo_PortConfig proto.InternalMessageInfo
func (m *PortConfig) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *PortConfig) GetProtocol() PortConfig_Protocol {
if m != nil {
return m.Protocol
}
return ProtocolTCP
}
func (m *PortConfig) GetTargetPort() uint32 {
if m != nil {
return m.TargetPort
}
return 0
}
func (m *PortConfig) GetPublishedPort() uint32 {
if m != nil {
return m.PublishedPort
}
return 0
}
func init() {
proto.RegisterEnum("libnetwork.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value)
proto.RegisterType((*EndpointRecord)(nil), "libnetwork.EndpointRecord")
proto.RegisterType((*PortConfig)(nil), "libnetwork.PortConfig")
}
func init() { proto.RegisterFile("agent.proto", fileDescriptor_56ede974c0020f77) }
var fileDescriptor_56ede974c0020f77 = []byte{
// 486 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x31, 0x6f, 0xd3, 0x40,
0x18, 0x86, 0xed, 0x24, 0xb4, 0xf1, 0xe7, 0x24, 0x8d, 0x6e, 0x40, 0x56, 0x86, 0x8b, 0x89, 0x40,
0x0a, 0x12, 0x38, 0x52, 0x19, 0x3b, 0xd1, 0x84, 0xc1, 0x0b, 0xb2, 0xae, 0x29, 0x6b, 0xb0, 0xe3,
0xab, 0x7b, 0xaa, 0xeb, 0xb3, 0x7c, 0x97, 0xb2, 0xb2, 0x81, 0x3a, 0xf1, 0x07, 0x3a, 0xf1, 0x67,
0x18, 0x3b, 0x76, 0xaa, 0xa8, 0xf3, 0x07, 0x58, 0xd9, 0xd0, 0x9d, 0xed, 0x46, 0x48, 0xdd, 0x7c,
0xcf, 0xfb, 0x7c, 0xd6, 0x77, 0xef, 0x81, 0x1d, 0x26, 0x34, 0x93, 0x5e, 0x5e, 0x70, 0xc9, 0x11,
0xa4, 0x2c, 0xca, 0xa8, 0xfc, 0xc2, 0x8b, 0x8b, 0xd1, 0xdb, 0x84, 0xc9, 0xf3, 0x4d, 0xe4, 0xad,
0xf9, 0xe5, 0x2c, 0xe1, 0x09, 0x9f, 0x69, 0x25, 0xda, 0x9c, 0xe9, 0x93, 0x3e, 0xe8, 0xaf, 0x6a,
0x74, 0xf2, 0xb7, 0x05, 0x83, 0x0f, 0x59, 0x9c, 0x73, 0x96, 0x49, 0x42, 0xd7, 0xbc, 0x88, 0x11,
0x82, 0x4e, 0x16, 0x5e, 0x52, 0xc7, 0x74, 0xcd, 0xa9, 0x45, 0xf4, 0x37, 0x7a, 0x01, 0x3d, 0x41,
0x8b, 0x2b, 0xb6, 0xa6, 0x2b, 0x9d, 0xb5, 0x74, 0x66, 0xd7, 0xec, 0xa3, 0x52, 0xde, 0x00, 0x34,
0x0a, 0x8b, 0x9d, 0xb6, 0x12, 0x8e, 0xfb, 0xe5, 0xfd, 0xd8, 0x3a, 0xa9, 0xa8, 0xbf, 0x20, 0x56,
0x2d, 0xf8, 0xb1, 0xb2, 0xaf, 0x58, 0x21, 0x37, 0x61, 0xba, 0x62, 0xb9, 0xd3, 0xd9, 0xd9, 0x9f,
0x2a, 0xea, 0x07, 0xc4, 0xaa, 0x05, 0x3f, 0x47, 0x33, 0xb0, 0x69, 0xbd, 0xa4, 0xd2, 0x9f, 0x69,
0x7d, 0x50, 0xde, 0x8f, 0xa1, 0xd9, 0xdd, 0x0f, 0x08, 0x34, 0x8a, 0x9f, 0xa3, 0x23, 0xe8, 0xb3,
0x2c, 0x29, 0xa8, 0x10, 0xab, 0x9c, 0x17, 0x52, 0x38, 0x7b, 0x6e, 0x7b, 0x6a, 0x1f, 0x3e, 0xf7,
0x76, 0x4d, 0x79, 0x01, 0x2f, 0xe4, 0x9c, 0x67, 0x67, 0x2c, 0x21, 0xbd, 0x5a, 0x56, 0x48, 0x20,
0x07, 0xf6, 0xc3, 0x94, 0x85, 0x82, 0x0a, 0x67, 0xdf, 0x6d, 0x4f, 0x2d, 0xd2, 0x1c, 0x55, 0x0d,
0x32, 0x14, 0x17, 0xab, 0x26, 0xee, 0xea, 0xd8, 0x56, 0xec, 0x7d, 0xad, 0xbc, 0x86, 0x61, 0x53,
0x43, 0xcc, 0x44, 0x18, 0xa5, 0x34, 0x76, 0x2c, 0xd7, 0x9c, 0x76, 0xc9, 0x41, 0xcd, 0x17, 0x35,
0x9e, 0x7c, 0x6b, 0x01, 0xec, 0x96, 0x78, 0xb2, 0xf7, 0x23, 0xe8, 0xea, 0x77, 0x5a, 0xf3, 0x54,
0x77, 0x3e, 0x38, 0x1c, 0x3f, 0x7d, 0x05, 0x2f, 0xa8, 0x35, 0xf2, 0x38, 0x80, 0xc6, 0x60, 0xcb,
0xb0, 0x48, 0xa8, 0xd4, 0x1d, 0xe8, 0x27, 0xe9, 0x13, 0xa8, 0x90, 0x9a, 0x44, 0xaf, 0x60, 0x90,
0x6f, 0xa2, 0x94, 0x89, 0x73, 0x1a, 0x57, 0x4e, 0x47, 0x3b, 0xfd, 0x47, 0xaa, 0xb4, 0xc9, 0x67,
0xe8, 0x36, 0x7f, 0x47, 0x0e, 0xb4, 0x97, 0xf3, 0x60, 0x68, 0x8c, 0x0e, 0xae, 0x6f, 0x5c, 0xbb,
0xc1, 0xcb, 0x79, 0xa0, 0x92, 0xd3, 0x45, 0x30, 0x34, 0xff, 0x4f, 0x4e, 0x17, 0x01, 0x1a, 0x41,
0xe7, 0x64, 0xbe, 0x0c, 0x86, 0xad, 0xd1, 0xf0, 0xfa, 0xc6, 0xed, 0x35, 0x91, 0x62, 0xa3, 0xce,
0xf7, 0x9f, 0xd8, 0x38, 0x7e, 0x79, 0xf7, 0x80, 0x8d, 0x3f, 0x0f, 0xd8, 0xfc, 0x5a, 0x62, 0xf3,
0x57, 0x89, 0xcd, 0xdb, 0x12, 0x9b, 0xbf, 0x4b, 0x6c, 0xfe, 0xd8, 0x62, 0xe3, 0x76, 0x8b, 0x8d,
0xbb, 0x2d, 0x36, 0xa2, 0x3d, 0x7d, 0xb3, 0x77, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xc6,
0x3a, 0x88, 0xfc, 0x02, 0x00, 0x00,
}
func (this *EndpointRecord) GoString() string {
if this == nil {
return "nil"
}
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
s := make([]string, 0, 13)
s = append(s, "&libnetwork.EndpointRecord{")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "ServiceName: "+fmt.Sprintf("%#v", this.ServiceName)+",\n")
s = append(s, "ServiceID: "+fmt.Sprintf("%#v", this.ServiceID)+",\n")
s = append(s, "VirtualIP: "+fmt.Sprintf("%#v", this.VirtualIP)+",\n")
s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n")
if this.IngressPorts != nil {
s = append(s, "IngressPorts: "+fmt.Sprintf("%#v", this.IngressPorts)+",\n")
}
s = append(s, "Aliases: "+fmt.Sprintf("%#v", this.Aliases)+",\n")
s = append(s, "TaskAliases: "+fmt.Sprintf("%#v", this.TaskAliases)+",\n")
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
s = append(s, "ServiceDisabled: "+fmt.Sprintf("%#v", this.ServiceDisabled)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *PortConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&libnetwork.PortConfig{")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n")
s = append(s, "TargetPort: "+fmt.Sprintf("%#v", this.TargetPort)+",\n")
s = append(s, "PublishedPort: "+fmt.Sprintf("%#v", this.PublishedPort)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringAgent(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *EndpointRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointRecord) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EndpointRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ServiceDisabled {
i--
if m.ServiceDisabled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x48
}
if len(m.TaskAliases) > 0 {
for iNdEx := len(m.TaskAliases) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.TaskAliases[iNdEx])
copy(dAtA[i:], m.TaskAliases[iNdEx])
i = encodeVarintAgent(dAtA, i, uint64(len(m.TaskAliases[iNdEx])))
i--
dAtA[i] = 0x42
}
}
if len(m.Aliases) > 0 {
for iNdEx := len(m.Aliases) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Aliases[iNdEx])
copy(dAtA[i:], m.Aliases[iNdEx])
i = encodeVarintAgent(dAtA, i, uint64(len(m.Aliases[iNdEx])))
i--
dAtA[i] = 0x3a
}
}
if len(m.IngressPorts) > 0 {
for iNdEx := len(m.IngressPorts) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.IngressPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintAgent(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
}
if len(m.EndpointIP) > 0 {
i -= len(m.EndpointIP)
copy(dAtA[i:], m.EndpointIP)
i = encodeVarintAgent(dAtA, i, uint64(len(m.EndpointIP)))
i--
dAtA[i] = 0x2a
}
if len(m.VirtualIP) > 0 {
i -= len(m.VirtualIP)
copy(dAtA[i:], m.VirtualIP)
i = encodeVarintAgent(dAtA, i, uint64(len(m.VirtualIP)))
i--
dAtA[i] = 0x22
}
if len(m.ServiceID) > 0 {
i -= len(m.ServiceID)
copy(dAtA[i:], m.ServiceID)
i = encodeVarintAgent(dAtA, i, uint64(len(m.ServiceID)))
i--
dAtA[i] = 0x1a
}
if len(m.ServiceName) > 0 {
i -= len(m.ServiceName)
copy(dAtA[i:], m.ServiceName)
i = encodeVarintAgent(dAtA, i, uint64(len(m.ServiceName)))
i--
dAtA[i] = 0x12
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintAgent(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
}
return len(dAtA) - i, nil
}
func (m *PortConfig) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PortConfig) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PortConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.PublishedPort != 0 {
i = encodeVarintAgent(dAtA, i, uint64(m.PublishedPort))
i--
dAtA[i] = 0x20
}
if m.TargetPort != 0 {
i = encodeVarintAgent(dAtA, i, uint64(m.TargetPort))
i--
dAtA[i] = 0x18
}
if m.Protocol != 0 {
i = encodeVarintAgent(dAtA, i, uint64(m.Protocol))
i--
dAtA[i] = 0x10
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintAgent(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintAgent(dAtA []byte, offset int, v uint64) int {
offset -= sovAgent(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *EndpointRecord) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
l = len(m.ServiceName)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
l = len(m.ServiceID)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
l = len(m.VirtualIP)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
l = len(m.EndpointIP)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
if len(m.IngressPorts) > 0 {
for _, e := range m.IngressPorts {
l = e.Size()
n += 1 + l + sovAgent(uint64(l))
}
}
if len(m.Aliases) > 0 {
for _, s := range m.Aliases {
l = len(s)
n += 1 + l + sovAgent(uint64(l))
}
}
if len(m.TaskAliases) > 0 {
for _, s := range m.TaskAliases {
l = len(s)
n += 1 + l + sovAgent(uint64(l))
}
}
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
if m.ServiceDisabled {
n += 2
}
return n
}
func (m *PortConfig) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
if m.Protocol != 0 {
n += 1 + sovAgent(uint64(m.Protocol))
}
if m.TargetPort != 0 {
n += 1 + sovAgent(uint64(m.TargetPort))
}
if m.PublishedPort != 0 {
n += 1 + sovAgent(uint64(m.PublishedPort))
}
return n
}
func sovAgent(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozAgent(x uint64) (n int) {
return sovAgent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *EndpointRecord) String() string {
if this == nil {
return "nil"
}
repeatedStringForIngressPorts := "[]*PortConfig{"
for _, f := range this.IngressPorts {
repeatedStringForIngressPorts += strings.Replace(f.String(), "PortConfig", "PortConfig", 1) + ","
}
repeatedStringForIngressPorts += "}"
s := strings.Join([]string{`&EndpointRecord{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`,
`ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`,
`VirtualIP:` + fmt.Sprintf("%v", this.VirtualIP) + `,`,
`EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
`IngressPorts:` + repeatedStringForIngressPorts + `,`,
`Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`,
`TaskAliases:` + fmt.Sprintf("%v", this.TaskAliases) + `,`,
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
`ServiceDisabled:` + fmt.Sprintf("%v", this.ServiceDisabled) + `,`,
`}`,
}, "")
return s
}
func (this *PortConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PortConfig{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
`TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`,
`PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`,
`}`,
}, "")
return s
}
func valueToStringAgent(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *EndpointRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ServiceName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ServiceID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field VirtualIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.VirtualIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndpointIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IngressPorts", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IngressPorts = append(m.IngressPorts, &PortConfig{})
if err := m.IngressPorts[len(m.IngressPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TaskAliases", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TaskAliases = append(m.TaskAliases, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceDisabled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
if b < 0x80 {
break
}
}
m.ServiceDisabled = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipAgent(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthAgent
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PortConfig) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PortConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthAgent
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
m.Protocol = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Protocol |= PortConfig_Protocol(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
}
m.TargetPort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TargetPort |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType)
}
m.PublishedPort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.PublishedPort |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipAgent(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthAgent
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipAgent(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAgent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAgent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAgent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthAgent
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupAgent
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthAgent
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthAgent = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAgent = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupAgent = fmt.Errorf("proto: unexpected end of group")
)