vendor swarmkit 970b45afa1c9da9ed4b9c793669cedbb05ad3833
Signed-off-by: Dong Chen <dongluo.chen@docker.com>
This commit is contained in:
parent
c7c7f36da7
commit
aac861f3d9
62 changed files with 5982 additions and 2579 deletions
|
@ -105,7 +105,7 @@ github.com/docker/containerd 422e31ce907fd9c3833a38d7b8fdd023e5a76e73
|
|||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit 9fdea50c14492b6e1f472813849794d36bfef217
|
||||
github.com/docker/swarmkit 970b45afa1c9da9ed4b9c793669cedbb05ad3833
|
||||
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
|
||||
github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
|
|
352
vendor/github.com/docker/swarmkit/api/control.pb.go
generated
vendored
352
vendor/github.com/docker/swarmkit/api/control.pb.go
generated
vendored
|
@ -195,6 +195,7 @@ type ListTasksRequest_Filters struct {
|
|||
DesiredStates []TaskState `protobuf:"varint,6,rep,packed,name=desired_states,json=desiredStates,enum=docker.swarmkit.v1.TaskState" json:"desired_states,omitempty"`
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
NamePrefixes []string `protobuf:"bytes,7,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"`
|
||||
Runtimes []string `protobuf:"bytes,9,rep,name=runtimes" json:"runtimes,omitempty"`
|
||||
// UpToDate matches tasks that are consistent with the current
|
||||
// service definition.
|
||||
// Note: this is intended for internal status reporting rather
|
||||
|
@ -299,6 +300,7 @@ type ListServicesRequest_Filters struct {
|
|||
Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"`
|
||||
Runtimes []string `protobuf:"bytes,5,rep,name=runtimes" json:"runtimes,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ListServicesRequest_Filters) Reset() { *m = ListServicesRequest_Filters{} }
|
||||
|
@ -1175,6 +1177,11 @@ func (m *ListTasksRequest_Filters) CopyFrom(src interface{}) {
|
|||
copy(m.NamePrefixes, o.NamePrefixes)
|
||||
}
|
||||
|
||||
if o.Runtimes != nil {
|
||||
m.Runtimes = make([]string, len(o.Runtimes))
|
||||
copy(m.Runtimes, o.Runtimes)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *ListTasksResponse) Copy() *ListTasksResponse {
|
||||
|
@ -1393,6 +1400,11 @@ func (m *ListServicesRequest_Filters) CopyFrom(src interface{}) {
|
|||
copy(m.NamePrefixes, o.NamePrefixes)
|
||||
}
|
||||
|
||||
if o.Runtimes != nil {
|
||||
m.Runtimes = make([]string, len(o.Runtimes))
|
||||
copy(m.Runtimes, o.Runtimes)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *ListServicesResponse) Copy() *ListServicesResponse {
|
||||
|
@ -3467,6 +3479,21 @@ func (m *ListTasksRequest_Filters) MarshalTo(dAtA []byte) (int, error) {
|
|||
}
|
||||
i++
|
||||
}
|
||||
if len(m.Runtimes) > 0 {
|
||||
for _, s := range m.Runtimes {
|
||||
dAtA[i] = 0x4a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -3832,6 +3859,21 @@ func (m *ListServicesRequest_Filters) MarshalTo(dAtA []byte) (int, error) {
|
|||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if len(m.Runtimes) > 0 {
|
||||
for _, s := range m.Runtimes {
|
||||
dAtA[i] = 0x2a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -6015,6 +6057,12 @@ func (m *ListTasksRequest_Filters) Size() (n int) {
|
|||
if m.UpToDate {
|
||||
n += 2
|
||||
}
|
||||
if len(m.Runtimes) > 0 {
|
||||
for _, s := range m.Runtimes {
|
||||
l = len(s)
|
||||
n += 1 + l + sovControl(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -6156,6 +6204,12 @@ func (m *ListServicesRequest_Filters) Size() (n int) {
|
|||
n += 1 + l + sovControl(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Runtimes) > 0 {
|
||||
for _, s := range m.Runtimes {
|
||||
l = len(s)
|
||||
n += 1 + l + sovControl(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -6738,6 +6792,7 @@ func (this *ListTasksRequest_Filters) String() string {
|
|||
`DesiredStates:` + fmt.Sprintf("%v", this.DesiredStates) + `,`,
|
||||
`NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`,
|
||||
`UpToDate:` + fmt.Sprintf("%v", this.UpToDate) + `,`,
|
||||
`Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -6863,6 +6918,7 @@ func (this *ListServicesRequest_Filters) String() string {
|
|||
`IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`,
|
||||
`Labels:` + mapStringForLabels + `,`,
|
||||
`NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`,
|
||||
`Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -9037,6 +9093,35 @@ func (m *ListTasksRequest_Filters) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
}
|
||||
m.UpToDate = bool(v != 0)
|
||||
case 9:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowControl
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipControl(dAtA[iNdEx:])
|
||||
|
@ -10158,6 +10243,35 @@ func (m *ListServicesRequest_Filters) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowControl
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthControl
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipControl(dAtA[iNdEx:])
|
||||
|
@ -13349,122 +13463,124 @@ var (
|
|||
func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
|
||||
|
||||
var fileDescriptorControl = []byte{
|
||||
// 1865 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x5a, 0x4b, 0x6f, 0x1b, 0x47,
|
||||
0x12, 0x16, 0x1f, 0x12, 0xa9, 0xa2, 0x48, 0x49, 0x2d, 0xda, 0x4b, 0xd0, 0x5e, 0x49, 0x18, 0xaf,
|
||||
0x65, 0x6a, 0xa1, 0xa5, 0xd6, 0xf4, 0x1a, 0xeb, 0xf5, 0x62, 0x1f, 0x96, 0x68, 0x3b, 0xb4, 0x6c,
|
||||
0xd9, 0x18, 0x49, 0x46, 0x6e, 0x04, 0x45, 0xb6, 0x95, 0x31, 0x29, 0x0e, 0x33, 0x33, 0x94, 0x2d,
|
||||
0xe4, 0x92, 0x04, 0xc9, 0x4f, 0x08, 0x90, 0x43, 0x7e, 0x41, 0x02, 0xe4, 0x90, 0x53, 0x6e, 0xb9,
|
||||
0x1a, 0x39, 0xe5, 0x98, 0x93, 0x10, 0x13, 0x08, 0x90, 0x53, 0x7e, 0x43, 0xd0, 0xaf, 0x79, 0xb1,
|
||||
0x67, 0x86, 0x94, 0x04, 0xc8, 0x27, 0xcf, 0xf4, 0x7c, 0xd5, 0x55, 0xdd, 0xf5, 0xf5, 0xc7, 0xea,
|
||||
0xb2, 0x20, 0xdb, 0xd4, 0xbb, 0x96, 0xa1, 0x77, 0xca, 0x3d, 0x43, 0xb7, 0x74, 0x84, 0x5a, 0x7a,
|
||||
0xb3, 0x8d, 0x8d, 0xb2, 0xf9, 0xaa, 0x61, 0x1c, 0xb6, 0x35, 0xab, 0x7c, 0x74, 0xb3, 0x98, 0x31,
|
||||
0x7b, 0xb8, 0x69, 0x32, 0x40, 0x31, 0xab, 0xef, 0xbf, 0xc4, 0x4d, 0x4b, 0xbc, 0x66, 0xac, 0xe3,
|
||||
0x1e, 0x16, 0x2f, 0xf9, 0x03, 0xfd, 0x40, 0xa7, 0x8f, 0xeb, 0xe4, 0x89, 0x8f, 0x2e, 0xf4, 0x3a,
|
||||
0xfd, 0x03, 0xad, 0xbb, 0xce, 0xfe, 0x61, 0x83, 0xca, 0x6d, 0xc8, 0x3d, 0xc4, 0xd6, 0xb6, 0xde,
|
||||
0xc2, 0x2a, 0xfe, 0xb0, 0x8f, 0x4d, 0x0b, 0x5d, 0x83, 0x54, 0x57, 0x6f, 0xe1, 0xba, 0xd6, 0x2a,
|
||||
0xc4, 0x96, 0x63, 0xa5, 0xe9, 0x0d, 0x18, 0x9c, 0x2c, 0x4d, 0x11, 0x44, 0xad, 0xaa, 0x4e, 0x91,
|
||||
0x4f, 0xb5, 0x96, 0xf2, 0x3f, 0x98, 0xb5, 0xcd, 0xcc, 0x9e, 0xde, 0x35, 0x31, 0x5a, 0x83, 0x24,
|
||||
0xf9, 0x48, 0x8d, 0x32, 0x95, 0x42, 0x79, 0x78, 0x01, 0x65, 0x8a, 0xa7, 0x28, 0xe5, 0x24, 0x01,
|
||||
0x73, 0x8f, 0x35, 0x93, 0x4e, 0x61, 0x0a, 0xd7, 0x0f, 0x20, 0xf5, 0x42, 0xeb, 0x58, 0xd8, 0x30,
|
||||
0xf9, 0x2c, 0x6b, 0xb2, 0x59, 0xfc, 0x66, 0xe5, 0x07, 0xcc, 0x46, 0x15, 0xc6, 0xc5, 0x4f, 0x12,
|
||||
0x90, 0xe2, 0x83, 0x28, 0x0f, 0x93, 0xdd, 0xc6, 0x21, 0x26, 0x33, 0x26, 0x4a, 0xd3, 0x2a, 0x7b,
|
||||
0x41, 0xeb, 0x90, 0xd1, 0x5a, 0xf5, 0x9e, 0x81, 0x5f, 0x68, 0xaf, 0xb1, 0x59, 0x88, 0x93, 0x6f,
|
||||
0x1b, 0xb9, 0xc1, 0xc9, 0x12, 0xd4, 0xaa, 0xcf, 0xf8, 0xa8, 0x0a, 0x5a, 0x4b, 0x3c, 0xa3, 0x67,
|
||||
0x30, 0xd5, 0x69, 0xec, 0xe3, 0x8e, 0x59, 0x48, 0x2c, 0x27, 0x4a, 0x99, 0xca, 0x9d, 0x71, 0x22,
|
||||
0x2b, 0x3f, 0xa6, 0xa6, 0xf7, 0xbb, 0x96, 0x71, 0xac, 0xf2, 0x79, 0x50, 0x0d, 0x32, 0x87, 0xf8,
|
||||
0x70, 0x1f, 0x1b, 0xe6, 0x07, 0x5a, 0xcf, 0x2c, 0x24, 0x97, 0x13, 0xa5, 0x5c, 0xe5, 0x46, 0xd0,
|
||||
0xb6, 0xed, 0xf4, 0x70, 0xb3, 0xfc, 0xc4, 0xc6, 0xab, 0x6e, 0x5b, 0x54, 0x81, 0x49, 0x43, 0xef,
|
||||
0x60, 0xb3, 0x30, 0x49, 0x27, 0xb9, 0x1a, 0xb8, 0xf7, 0x7a, 0x07, 0xab, 0x0c, 0x8a, 0xae, 0x41,
|
||||
0x96, 0x6c, 0x85, 0xb3, 0x07, 0x53, 0x74, 0x7f, 0x66, 0xc8, 0xa0, 0x58, 0x75, 0xf1, 0x5f, 0x90,
|
||||
0x71, 0x85, 0x8e, 0xe6, 0x20, 0xd1, 0xc6, 0xc7, 0x8c, 0x16, 0x2a, 0x79, 0x24, 0xbb, 0x7b, 0xd4,
|
||||
0xe8, 0xf4, 0x71, 0x21, 0x4e, 0xc7, 0xd8, 0xcb, 0xdd, 0xf8, 0x9d, 0x98, 0xb2, 0x09, 0xf3, 0xae,
|
||||
0xed, 0xe0, 0x1c, 0x29, 0xc3, 0x24, 0xc9, 0x3e, 0x4b, 0x46, 0x18, 0x49, 0x18, 0x4c, 0xf9, 0x3a,
|
||||
0x06, 0xf3, 0x7b, 0xbd, 0x56, 0xc3, 0xc2, 0xe3, 0x32, 0x14, 0xfd, 0x17, 0x66, 0x28, 0xe8, 0x08,
|
||||
0x1b, 0xa6, 0xa6, 0x77, 0x69, 0x80, 0x99, 0xca, 0x15, 0x99, 0xc7, 0xe7, 0x0c, 0xa2, 0x66, 0x88,
|
||||
0x01, 0x7f, 0x41, 0x7f, 0x87, 0x24, 0x39, 0x6e, 0x85, 0x04, 0xb5, 0xbb, 0x1a, 0x96, 0x17, 0x95,
|
||||
0x22, 0x95, 0x0d, 0x40, 0xee, 0x58, 0x4f, 0x75, 0x2c, 0xb6, 0x61, 0x5e, 0xc5, 0x87, 0xfa, 0xd1,
|
||||
0xf8, 0xeb, 0xcd, 0xc3, 0xe4, 0x0b, 0xdd, 0x68, 0xb2, 0x4c, 0xa4, 0x55, 0xf6, 0xa2, 0xe4, 0x01,
|
||||
0xb9, 0xe7, 0x63, 0x31, 0xf1, 0x43, 0xbf, 0xdb, 0x30, 0xdb, 0x2e, 0x17, 0x56, 0xc3, 0x6c, 0xfb,
|
||||
0x5c, 0x10, 0x04, 0x71, 0x41, 0x3e, 0xd9, 0x87, 0x9e, 0x99, 0x39, 0xab, 0x23, 0x1f, 0xc3, 0x56,
|
||||
0x47, 0xf1, 0x14, 0xa5, 0xdc, 0x11, 0xab, 0x1b, 0xdb, 0xb5, 0xbd, 0x0e, 0xb7, 0x77, 0xe5, 0xab,
|
||||
0x24, 0x13, 0x11, 0x32, 0x78, 0x0a, 0x11, 0x71, 0x9b, 0x0d, 0x8b, 0xc8, 0x0f, 0x17, 0x28, 0x22,
|
||||
0xb2, 0xc8, 0xa4, 0x22, 0xb2, 0x0e, 0x19, 0x13, 0x1b, 0x47, 0x5a, 0x93, 0xb0, 0x83, 0x89, 0x08,
|
||||
0x0f, 0x61, 0x87, 0x0d, 0xd7, 0xaa, 0xa6, 0x0a, 0x1c, 0x52, 0x6b, 0x99, 0x68, 0x05, 0xd2, 0x9c,
|
||||
0x4b, 0x4c, 0x2d, 0xa6, 0x37, 0x32, 0x83, 0x93, 0xa5, 0x14, 0x23, 0x93, 0xa9, 0xa6, 0x18, 0x9b,
|
||||
0x4c, 0x54, 0x85, 0x5c, 0x0b, 0x9b, 0x9a, 0x81, 0x5b, 0x75, 0xd3, 0x6a, 0x58, 0x5c, 0x1f, 0x72,
|
||||
0x95, 0x3f, 0x07, 0xa5, 0x78, 0x87, 0xa0, 0xd4, 0x2c, 0x37, 0xa2, 0x6f, 0x12, 0x91, 0x49, 0x0d,
|
||||
0x8b, 0x0c, 0xba, 0x0a, 0xd0, 0xef, 0xd5, 0x2d, 0xbd, 0x4e, 0xce, 0x4e, 0x21, 0x4d, 0xe9, 0x9b,
|
||||
0xee, 0xf7, 0x76, 0xf5, 0x6a, 0xc3, 0xc2, 0xe7, 0x20, 0x41, 0x7c, 0x33, 0x1d, 0x09, 0x22, 0x9c,
|
||||
0x0a, 0x95, 0x20, 0x4a, 0x32, 0x06, 0x53, 0xb6, 0x20, 0xbf, 0x69, 0xe0, 0x86, 0x85, 0xf9, 0x86,
|
||||
0x0a, 0x9a, 0xdd, 0xe2, 0xfa, 0xc0, 0x38, 0xb6, 0x24, 0x9b, 0x86, 0x5b, 0xb8, 0x24, 0x62, 0x1b,
|
||||
0x2e, 0xf9, 0x26, 0xe3, 0x51, 0xdd, 0x86, 0x14, 0x4f, 0x12, 0x9f, 0xf0, 0x4a, 0xc8, 0x84, 0xaa,
|
||||
0xc0, 0x2a, 0xf7, 0x60, 0xfe, 0x21, 0xb6, 0x7c, 0x91, 0xad, 0x01, 0x38, 0x9c, 0xe0, 0x67, 0x2a,
|
||||
0x3b, 0x38, 0x59, 0x9a, 0xb6, 0x29, 0xa1, 0x4e, 0xdb, 0x8c, 0x50, 0xb6, 0x00, 0xb9, 0xa7, 0x38,
|
||||
0x5b, 0x3c, 0xdf, 0xc5, 0x21, 0xcf, 0x34, 0xf0, 0x2c, 0x31, 0xa1, 0x2a, 0xcc, 0x0a, 0xf4, 0x18,
|
||||
0xf2, 0x9d, 0xe3, 0x36, 0x42, 0xc1, 0x6f, 0x79, 0x14, 0x7c, 0xb4, 0x0c, 0xa1, 0x27, 0x90, 0x36,
|
||||
0xf4, 0x4e, 0x67, 0xbf, 0xd1, 0x6c, 0x17, 0x92, 0xcb, 0xb1, 0x52, 0xae, 0x72, 0x53, 0x66, 0x28,
|
||||
0x5b, 0x64, 0x59, 0xe5, 0x86, 0xaa, 0x3d, 0x85, 0xa2, 0x40, 0x5a, 0x8c, 0xa2, 0x34, 0x24, 0xb7,
|
||||
0x9f, 0x6e, 0xdf, 0x9f, 0x9b, 0x40, 0x33, 0x90, 0x7e, 0xa6, 0xde, 0x7f, 0x5e, 0x7b, 0xba, 0xb7,
|
||||
0x33, 0x17, 0x23, 0xa4, 0xf0, 0x4d, 0x77, 0xb6, 0x24, 0x54, 0x21, 0xcf, 0xb4, 0xf2, 0x4c, 0xbc,
|
||||
0xf8, 0x13, 0x5c, 0xf2, 0xcd, 0xc2, 0x45, 0xf7, 0xb7, 0x38, 0x2c, 0x90, 0x63, 0xc5, 0xc7, 0x6d,
|
||||
0xdd, 0xad, 0xf9, 0x75, 0x77, 0x3d, 0x48, 0xdd, 0x7c, 0x96, 0xc3, 0xd2, 0xfb, 0x79, 0xfc, 0xdc,
|
||||
0xa5, 0x77, 0xc7, 0x27, 0xbd, 0xff, 0x1e, 0x33, 0x38, 0xa9, 0xfa, 0x0e, 0xc9, 0x5b, 0xf2, 0x7c,
|
||||
0x6b, 0xa8, 0xa7, 0x90, 0xf7, 0x86, 0xc4, 0x89, 0xf1, 0x4f, 0x48, 0xf3, 0x44, 0x09, 0x19, 0x0b,
|
||||
0x65, 0x86, 0x0d, 0x76, 0xc4, 0x6c, 0x1b, 0x5b, 0xaf, 0x74, 0xa3, 0x3d, 0x86, 0x98, 0x71, 0x0b,
|
||||
0x99, 0x98, 0xd9, 0x93, 0x39, 0xbc, 0xed, 0xb2, 0xa1, 0x30, 0xde, 0x0a, 0x2b, 0x81, 0x55, 0xf6,
|
||||
0xa8, 0x98, 0xf9, 0x22, 0x43, 0x90, 0x24, 0xbb, 0xc9, 0xf7, 0x8b, 0x3e, 0x13, 0x22, 0x73, 0x1b,
|
||||
0x42, 0xe4, 0xb8, 0x43, 0x64, 0x6e, 0x4b, 0x88, 0xcc, 0x01, 0xb6, 0xc0, 0x9d, 0x53, 0x8c, 0xef,
|
||||
0x8b, 0xb3, 0x75, 0xee, 0x61, 0xda, 0xe7, 0xcd, 0x17, 0xa9, 0x7d, 0xde, 0xf8, 0xf8, 0x29, 0xce,
|
||||
0x9b, 0xcf, 0xf2, 0xdd, 0x3a, 0x6f, 0x01, 0xc1, 0x5d, 0xe4, 0x79, 0x73, 0x42, 0x72, 0xce, 0x1b,
|
||||
0x4f, 0x54, 0xe8, 0x79, 0x13, 0x99, 0xb3, 0xc1, 0xfc, 0xf7, 0x79, 0xb3, 0xd3, 0x37, 0x2d, 0x6c,
|
||||
0xb8, 0x74, 0xb8, 0xc9, 0x46, 0x7c, 0x3a, 0xcc, 0x71, 0x84, 0x17, 0x1c, 0x60, 0xd3, 0xd7, 0x9e,
|
||||
0xc2, 0xa1, 0x2f, 0x87, 0x84, 0xd1, 0x57, 0x58, 0x09, 0xac, 0xcd, 0x25, 0xfe, 0xe1, 0x14, 0x5c,
|
||||
0xf2, 0x59, 0xbe, 0x5b, 0x5c, 0x0a, 0x08, 0xee, 0x22, 0xb9, 0xe4, 0x84, 0xe4, 0x70, 0x89, 0x67,
|
||||
0x23, 0x94, 0x4b, 0x22, 0x75, 0x36, 0x58, 0xf9, 0x22, 0x06, 0x99, 0x2d, 0x7c, 0xac, 0xea, 0x56,
|
||||
0xc3, 0x22, 0xe5, 0xcd, 0x5f, 0x61, 0x9e, 0x90, 0x0c, 0x1b, 0xf5, 0x97, 0xba, 0xd6, 0xad, 0x5b,
|
||||
0x7a, 0x1b, 0x77, 0x69, 0x68, 0x69, 0x75, 0x96, 0x7d, 0x78, 0xa4, 0x6b, 0xdd, 0x5d, 0x32, 0x8c,
|
||||
0xd6, 0x00, 0x1d, 0x36, 0xba, 0x8d, 0x03, 0x2f, 0x98, 0xdd, 0x14, 0xe7, 0xf8, 0x17, 0x29, 0xba,
|
||||
0xdf, 0xed, 0xe8, 0xcd, 0x76, 0x9d, 0xac, 0x3a, 0xe1, 0x41, 0xef, 0xd1, 0x0f, 0x5b, 0xf8, 0x58,
|
||||
0xf9, 0xd4, 0xae, 0xf9, 0xce, 0xc2, 0x73, 0x52, 0xf3, 0x09, 0xf4, 0x38, 0x35, 0x1f, 0xb7, 0x19,
|
||||
0xa3, 0xe6, 0xe3, 0xde, 0x5d, 0x35, 0xdf, 0x3d, 0x52, 0xf3, 0xb1, 0x5d, 0xa5, 0x35, 0x5f, 0x80,
|
||||
0xa1, 0x6b, 0xf3, 0x37, 0x92, 0x6f, 0x4e, 0x96, 0x26, 0x54, 0xdb, 0xcc, 0xa9, 0xe1, 0xce, 0xe9,
|
||||
0xa0, 0xfe, 0x07, 0xe6, 0x68, 0x55, 0xde, 0x34, 0xb0, 0x25, 0xf6, 0x73, 0x15, 0xa6, 0x4d, 0x3a,
|
||||
0xe0, 0x6c, 0xe7, 0xcc, 0xe0, 0x64, 0x29, 0xcd, 0x50, 0xb5, 0x2a, 0xf9, 0x9d, 0xa7, 0x4f, 0x2d,
|
||||
0xe5, 0x21, 0xbf, 0x17, 0x30, 0x73, 0x1e, 0x4a, 0x05, 0xa6, 0x18, 0x80, 0x47, 0x52, 0x94, 0xd7,
|
||||
0x0c, 0xd4, 0x86, 0x23, 0x95, 0xef, 0x63, 0xb0, 0x20, 0x8a, 0xd3, 0xd3, 0xc5, 0x82, 0x36, 0x20,
|
||||
0xc7, 0xa1, 0x63, 0xe4, 0x35, 0xcb, 0x4c, 0x44, 0x5a, 0x2b, 0x9e, 0xb4, 0x2e, 0x06, 0x07, 0xee,
|
||||
0x2a, 0x4f, 0x1e, 0x39, 0x57, 0x91, 0x33, 0x6f, 0xc3, 0xaf, 0x71, 0x40, 0xac, 0x12, 0x23, 0xaf,
|
||||
0xb6, 0x6c, 0xbe, 0xe7, 0x97, 0xcd, 0x72, 0x70, 0x55, 0xe9, 0x36, 0x1c, 0x56, 0xcd, 0xcf, 0xce,
|
||||
0x5f, 0x35, 0x55, 0x9f, 0x6a, 0xde, 0x1d, 0x2f, 0xb6, 0x0b, 0x11, 0xcd, 0x2d, 0x71, 0xb5, 0xe0,
|
||||
0x11, 0xf1, 0x94, 0xfd, 0x83, 0x5c, 0x84, 0xe8, 0x10, 0x97, 0xcc, 0xb0, 0x9c, 0x09, 0xa8, 0x52,
|
||||
0x83, 0x05, 0x71, 0xd9, 0x76, 0x53, 0xb7, 0xe2, 0xa9, 0x75, 0x47, 0xe6, 0x92, 0x77, 0xaa, 0x33,
|
||||
0x70, 0xe9, 0xff, 0xb0, 0x20, 0x2e, 0x56, 0xa7, 0x3c, 0xdd, 0x97, 0x9d, 0x0b, 0x9e, 0x3b, 0x9a,
|
||||
0xca, 0x37, 0x97, 0x21, 0xb5, 0xc9, 0xfe, 0x17, 0x01, 0x69, 0x90, 0xe2, 0x0d, 0x7a, 0xa4, 0xc8,
|
||||
0x82, 0xf2, 0x36, 0xfd, 0x8b, 0xd7, 0x42, 0x31, 0xbc, 0x12, 0xbd, 0xf4, 0xe3, 0xb7, 0xbf, 0x7f,
|
||||
0x19, 0x9f, 0x85, 0x2c, 0x05, 0xfd, 0x8d, 0xff, 0x12, 0x20, 0x1d, 0xa6, 0xed, 0x4e, 0x2f, 0xfa,
|
||||
0xcb, 0x28, 0x7d, 0xf1, 0xe2, 0xf5, 0x08, 0x54, 0xb8, 0x43, 0x03, 0xc0, 0x69, 0xb4, 0xa2, 0xeb,
|
||||
0xc1, 0xf7, 0x73, 0xf7, 0x0a, 0x57, 0xa2, 0x60, 0x91, 0x3e, 0x9d, 0x46, 0xaa, 0xdc, 0xe7, 0x50,
|
||||
0xe3, 0x56, 0xee, 0x53, 0xd2, 0x8f, 0x0d, 0xf0, 0xc9, 0x72, 0xb8, 0xdb, 0x30, 0xdb, 0x81, 0x39,
|
||||
0x74, 0x35, 0x52, 0x03, 0x73, 0xe8, 0x69, 0x99, 0x86, 0xe7, 0x90, 0xb6, 0xca, 0x82, 0x73, 0xe8,
|
||||
0x6e, 0x4b, 0x06, 0xe7, 0xd0, 0xd3, 0x6f, 0x8b, 0xdc, 0x4f, 0xba, 0xbc, 0x90, 0xfd, 0x74, 0xaf,
|
||||
0x70, 0x25, 0x0a, 0x16, 0xe9, 0xd3, 0x69, 0x75, 0xc9, 0x7d, 0x0e, 0x75, 0xd3, 0xe4, 0x3e, 0x87,
|
||||
0x3b, 0x66, 0x41, 0x3e, 0x5f, 0xc3, 0x8c, 0xfb, 0x0a, 0x8f, 0x6e, 0x8c, 0xd8, 0x77, 0x28, 0x96,
|
||||
0xa2, 0x81, 0xe1, 0x9e, 0x3f, 0x82, 0xac, 0xa7, 0xd7, 0x88, 0xa4, 0x33, 0xca, 0x7a, 0x9b, 0xc5,
|
||||
0xd5, 0x11, 0x90, 0x91, 0xce, 0x3d, 0x3d, 0x2d, 0xb9, 0x73, 0x59, 0x17, 0x4d, 0xee, 0x5c, 0xda,
|
||||
0x20, 0x0b, 0x71, 0xee, 0x69, 0x5d, 0xc9, 0x9d, 0xcb, 0x7a, 0x64, 0x72, 0xe7, 0xf2, 0x3e, 0x58,
|
||||
0x28, 0xc9, 0xf8, 0x55, 0x30, 0x90, 0x64, 0xde, 0xf6, 0x41, 0x20, 0xc9, 0xfc, 0xbd, 0x80, 0x70,
|
||||
0x92, 0x89, 0x7b, 0x6b, 0x30, 0xc9, 0x7c, 0x97, 0xed, 0x60, 0x92, 0xf9, 0xaf, 0xc0, 0x91, 0x24,
|
||||
0x13, 0x0b, 0x0e, 0x21, 0x99, 0x6f, 0xcd, 0xab, 0x23, 0x20, 0x47, 0xcc, 0x73, 0xa8, 0x73, 0x59,
|
||||
0xbf, 0x26, 0x2c, 0xcf, 0x23, 0x3a, 0x67, 0x79, 0xe6, 0x85, 0x7b, 0x60, 0x9e, 0xbd, 0x57, 0xa2,
|
||||
0xc0, 0x3c, 0xfb, 0x6e, 0x0d, 0x11, 0x79, 0x16, 0x77, 0xca, 0xe0, 0x3c, 0xfb, 0x2e, 0xc2, 0xc1,
|
||||
0x79, 0xf6, 0x5f, 0x4f, 0x23, 0xcf, 0xb3, 0x58, 0x70, 0xc8, 0x79, 0xf6, 0xad, 0x79, 0x75, 0x04,
|
||||
0x64, 0xe4, 0x8f, 0x93, 0x7d, 0x9b, 0x91, 0xff, 0x38, 0xf9, 0xef, 0x4a, 0xc5, 0xeb, 0x11, 0xa8,
|
||||
0xc8, 0x7d, 0x76, 0x5f, 0x1d, 0xe4, 0xfb, 0x2c, 0xb9, 0x16, 0x15, 0x4b, 0xd1, 0xc0, 0x70, 0xcf,
|
||||
0x7d, 0xc8, 0xb8, 0x0a, 0x60, 0xb4, 0x32, 0x5a, 0xcd, 0x5e, 0xbc, 0x11, 0x89, 0x8b, 0x5c, 0xb0,
|
||||
0xbb, 0xbe, 0x95, 0x2f, 0x58, 0x52, 0x4c, 0x17, 0x4b, 0xd1, 0xc0, 0x48, 0xcf, 0xee, 0x5a, 0x56,
|
||||
0xee, 0x59, 0x52, 0x2f, 0x17, 0x4b, 0xd1, 0xc0, 0x50, 0xcf, 0x1b, 0x85, 0x37, 0x6f, 0x17, 0x27,
|
||||
0x7e, 0x7e, 0xbb, 0x38, 0xf1, 0xf1, 0x60, 0x31, 0xf6, 0x66, 0xb0, 0x18, 0xfb, 0x69, 0xb0, 0x18,
|
||||
0xfb, 0x65, 0xb0, 0x18, 0xdb, 0x9f, 0xa2, 0x7f, 0x1a, 0x73, 0xeb, 0x8f, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0xcc, 0x5c, 0xf3, 0xfa, 0x93, 0x23, 0x00, 0x00,
|
||||
// 1894 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x5a, 0x4f, 0x6f, 0xdb, 0x46,
|
||||
0x16, 0xb7, 0xfe, 0xd8, 0x92, 0x9e, 0x2c, 0xd9, 0x1e, 0x2b, 0x59, 0x41, 0xc9, 0xda, 0x06, 0xb3,
|
||||
0x71, 0xe4, 0x85, 0x57, 0xde, 0x28, 0x1b, 0x6c, 0x36, 0x8b, 0xfd, 0x13, 0x5b, 0x49, 0x56, 0x71,
|
||||
0xe2, 0x04, 0xb4, 0x1d, 0xec, 0x4d, 0x90, 0xa5, 0x89, 0xcb, 0x48, 0x16, 0x55, 0x92, 0x72, 0x62,
|
||||
0xf4, 0xd2, 0x16, 0xe9, 0x47, 0x28, 0xd0, 0x6b, 0xaf, 0x2d, 0xd0, 0x43, 0x4f, 0xf9, 0x08, 0x41,
|
||||
0x4f, 0x3d, 0x16, 0x28, 0x60, 0x34, 0x02, 0x0a, 0xf4, 0xd4, 0xcf, 0x50, 0xcc, 0x3f, 0x92, 0xa2,
|
||||
0x86, 0xa4, 0x64, 0x0b, 0x70, 0x4f, 0x21, 0x87, 0xbf, 0x37, 0xef, 0xcd, 0xbc, 0xdf, 0xfc, 0xf4,
|
||||
0xe6, 0x39, 0x90, 0x69, 0xe8, 0x1d, 0xcb, 0xd0, 0xdb, 0xa5, 0xae, 0xa1, 0x5b, 0x3a, 0x42, 0x4d,
|
||||
0xbd, 0xd1, 0xc2, 0x46, 0xc9, 0x7c, 0x55, 0x37, 0x8e, 0x5a, 0x9a, 0x55, 0x3a, 0xbe, 0x59, 0x48,
|
||||
0x9b, 0x5d, 0xdc, 0x30, 0x19, 0xa0, 0x90, 0xd1, 0x0f, 0x5e, 0xe2, 0x86, 0x25, 0x5e, 0xd3, 0xd6,
|
||||
0x49, 0x17, 0x8b, 0x97, 0xdc, 0xa1, 0x7e, 0xa8, 0xd3, 0xc7, 0x0d, 0xf2, 0xc4, 0x47, 0x17, 0xbb,
|
||||
0xed, 0xde, 0xa1, 0xd6, 0xd9, 0x60, 0xff, 0xb0, 0x41, 0xe5, 0x36, 0x64, 0x1f, 0x62, 0x6b, 0x47,
|
||||
0x6f, 0x62, 0x15, 0x7f, 0xd8, 0xc3, 0xa6, 0x85, 0xae, 0x41, 0xa2, 0xa3, 0x37, 0x71, 0x4d, 0x6b,
|
||||
0xe6, 0x23, 0x2b, 0x91, 0x62, 0x6a, 0x13, 0xfa, 0xa7, 0xcb, 0x33, 0x04, 0x51, 0xad, 0xa8, 0x33,
|
||||
0xe4, 0x53, 0xb5, 0xa9, 0xfc, 0x07, 0xe6, 0x6c, 0x33, 0xb3, 0xab, 0x77, 0x4c, 0x8c, 0xd6, 0x21,
|
||||
0x4e, 0x3e, 0x52, 0xa3, 0x74, 0x39, 0x5f, 0x1a, 0x5e, 0x40, 0x89, 0xe2, 0x29, 0x4a, 0x39, 0x8d,
|
||||
0xc1, 0xfc, 0x63, 0xcd, 0xa4, 0x53, 0x98, 0xc2, 0xf5, 0x03, 0x48, 0xbc, 0xd0, 0xda, 0x16, 0x36,
|
||||
0x4c, 0x3e, 0xcb, 0xba, 0x6c, 0x16, 0xaf, 0x59, 0xe9, 0x01, 0xb3, 0x51, 0x85, 0x71, 0xe1, 0x93,
|
||||
0x18, 0x24, 0xf8, 0x20, 0xca, 0xc1, 0x74, 0xa7, 0x7e, 0x84, 0xc9, 0x8c, 0xb1, 0x62, 0x4a, 0x65,
|
||||
0x2f, 0x68, 0x03, 0xd2, 0x5a, 0xb3, 0xd6, 0x35, 0xf0, 0x0b, 0xed, 0x35, 0x36, 0xf3, 0x51, 0xf2,
|
||||
0x6d, 0x33, 0xdb, 0x3f, 0x5d, 0x86, 0x6a, 0xe5, 0x19, 0x1f, 0x55, 0x41, 0x6b, 0x8a, 0x67, 0xf4,
|
||||
0x0c, 0x66, 0xda, 0xf5, 0x03, 0xdc, 0x36, 0xf3, 0xb1, 0x95, 0x58, 0x31, 0x5d, 0xbe, 0x33, 0x4e,
|
||||
0x64, 0xa5, 0xc7, 0xd4, 0xf4, 0x7e, 0xc7, 0x32, 0x4e, 0x54, 0x3e, 0x0f, 0xaa, 0x42, 0xfa, 0x08,
|
||||
0x1f, 0x1d, 0x60, 0xc3, 0xfc, 0x40, 0xeb, 0x9a, 0xf9, 0xf8, 0x4a, 0xac, 0x98, 0x2d, 0xdf, 0xf0,
|
||||
0xdb, 0xb6, 0xdd, 0x2e, 0x6e, 0x94, 0x9e, 0xd8, 0x78, 0xd5, 0x6d, 0x8b, 0xca, 0x30, 0x6d, 0xe8,
|
||||
0x6d, 0x6c, 0xe6, 0xa7, 0xe9, 0x24, 0x57, 0x7d, 0xf7, 0x5e, 0x6f, 0x63, 0x95, 0x41, 0xd1, 0x35,
|
||||
0xc8, 0x90, 0xad, 0x70, 0xf6, 0x60, 0x86, 0xee, 0xcf, 0x2c, 0x19, 0x14, 0xab, 0x2e, 0xfc, 0x03,
|
||||
0xd2, 0xae, 0xd0, 0xd1, 0x3c, 0xc4, 0x5a, 0xf8, 0x84, 0xd1, 0x42, 0x25, 0x8f, 0x64, 0x77, 0x8f,
|
||||
0xeb, 0xed, 0x1e, 0xce, 0x47, 0xe9, 0x18, 0x7b, 0xb9, 0x1b, 0xbd, 0x13, 0x51, 0xb6, 0x60, 0xc1,
|
||||
0xb5, 0x1d, 0x9c, 0x23, 0x25, 0x98, 0x26, 0xd9, 0x67, 0xc9, 0x08, 0x22, 0x09, 0x83, 0x29, 0x5f,
|
||||
0x45, 0x60, 0x61, 0xbf, 0xdb, 0xac, 0x5b, 0x78, 0x5c, 0x86, 0xa2, 0x7f, 0xc3, 0x2c, 0x05, 0x1d,
|
||||
0x63, 0xc3, 0xd4, 0xf4, 0x0e, 0x0d, 0x30, 0x5d, 0xbe, 0x22, 0xf3, 0xf8, 0x9c, 0x41, 0xd4, 0x34,
|
||||
0x31, 0xe0, 0x2f, 0xe8, 0xaf, 0x10, 0x27, 0xc7, 0x2d, 0x1f, 0xa3, 0x76, 0x57, 0x83, 0xf2, 0xa2,
|
||||
0x52, 0xa4, 0xb2, 0x09, 0xc8, 0x1d, 0xeb, 0x99, 0x8e, 0xc5, 0x0e, 0x2c, 0xa8, 0xf8, 0x48, 0x3f,
|
||||
0x1e, 0x7f, 0xbd, 0x39, 0x98, 0x7e, 0xa1, 0x1b, 0x0d, 0x96, 0x89, 0xa4, 0xca, 0x5e, 0x94, 0x1c,
|
||||
0x20, 0xf7, 0x7c, 0x2c, 0x26, 0x7e, 0xe8, 0xf7, 0xea, 0x66, 0xcb, 0xe5, 0xc2, 0xaa, 0x9b, 0x2d,
|
||||
0x8f, 0x0b, 0x82, 0x20, 0x2e, 0xc8, 0x27, 0xfb, 0xd0, 0x33, 0x33, 0x67, 0x75, 0xe4, 0x63, 0xd0,
|
||||
0xea, 0x28, 0x9e, 0xa2, 0x94, 0x3b, 0x62, 0x75, 0x63, 0xbb, 0xb6, 0xd7, 0xe1, 0xf6, 0xae, 0xbc,
|
||||
0x8d, 0x33, 0x11, 0x21, 0x83, 0x67, 0x10, 0x11, 0xb7, 0xd9, 0xb0, 0x88, 0xfc, 0x78, 0x81, 0x22,
|
||||
0x22, 0x8b, 0x4c, 0x2a, 0x22, 0x1b, 0x90, 0x36, 0xb1, 0x71, 0xac, 0x35, 0x08, 0x3b, 0x98, 0x88,
|
||||
0xf0, 0x10, 0x76, 0xd9, 0x70, 0xb5, 0x62, 0xaa, 0xc0, 0x21, 0xd5, 0xa6, 0x89, 0x56, 0x21, 0xc9,
|
||||
0xb9, 0xc4, 0xd4, 0x22, 0xb5, 0x99, 0xee, 0x9f, 0x2e, 0x27, 0x18, 0x99, 0x4c, 0x35, 0xc1, 0xd8,
|
||||
0x64, 0xa2, 0x0a, 0x64, 0x9b, 0xd8, 0xd4, 0x0c, 0xdc, 0xac, 0x99, 0x56, 0xdd, 0xe2, 0xfa, 0x90,
|
||||
0x2d, 0xff, 0xd1, 0x2f, 0xc5, 0xbb, 0x04, 0xa5, 0x66, 0xb8, 0x11, 0x7d, 0x93, 0x88, 0x4c, 0x62,
|
||||
0x58, 0x64, 0xd0, 0x55, 0x80, 0x5e, 0xb7, 0x66, 0xe9, 0x35, 0x72, 0x76, 0xf2, 0x49, 0x4a, 0xdf,
|
||||
0x64, 0xaf, 0xbb, 0xa7, 0x57, 0xea, 0x16, 0x46, 0x05, 0x48, 0x1a, 0xbd, 0x8e, 0xa5, 0x91, 0xdd,
|
||||
0x4f, 0x51, 0x6b, 0xfb, 0x7d, 0x02, 0xf2, 0xc4, 0x37, 0xda, 0x91, 0x27, 0xc2, 0xb7, 0x40, 0x79,
|
||||
0xa2, 0x04, 0x64, 0x30, 0x65, 0x1b, 0x72, 0x5b, 0x06, 0xae, 0x5b, 0x98, 0x6f, 0xb6, 0xa0, 0xe0,
|
||||
0x2d, 0xae, 0x1d, 0x8c, 0x7f, 0xcb, 0xb2, 0x69, 0xb8, 0x85, 0x4b, 0x3e, 0x76, 0xe0, 0x92, 0x67,
|
||||
0x32, 0x1e, 0xd5, 0x6d, 0x48, 0xf0, 0x04, 0xf2, 0x09, 0xaf, 0x04, 0x4c, 0xa8, 0x0a, 0xac, 0x72,
|
||||
0x0f, 0x16, 0x1e, 0x62, 0xcb, 0x13, 0xd9, 0x3a, 0x80, 0xc3, 0x17, 0x7e, 0xde, 0x32, 0xfd, 0xd3,
|
||||
0xe5, 0x94, 0x4d, 0x17, 0x35, 0x65, 0xb3, 0x45, 0xd9, 0x06, 0xe4, 0x9e, 0xe2, 0x7c, 0xf1, 0x7c,
|
||||
0x1b, 0x85, 0x1c, 0xd3, 0xc7, 0xf3, 0xc4, 0x84, 0x2a, 0x30, 0x27, 0xd0, 0x63, 0x48, 0x7b, 0x96,
|
||||
0xdb, 0x08, 0x75, 0xbf, 0x35, 0xa0, 0xee, 0xa3, 0x65, 0x08, 0x3d, 0x81, 0xa4, 0xa1, 0xb7, 0xdb,
|
||||
0x07, 0xf5, 0x46, 0x2b, 0x1f, 0x5f, 0x89, 0x14, 0xb3, 0xe5, 0x9b, 0x32, 0x43, 0xd9, 0x22, 0x4b,
|
||||
0x2a, 0x37, 0x54, 0xed, 0x29, 0x14, 0x05, 0x92, 0x62, 0x14, 0x25, 0x21, 0xbe, 0xf3, 0x74, 0xe7,
|
||||
0xfe, 0xfc, 0x14, 0x9a, 0x85, 0xe4, 0x33, 0xf5, 0xfe, 0xf3, 0xea, 0xd3, 0xfd, 0xdd, 0xf9, 0x08,
|
||||
0x21, 0x85, 0x67, 0xba, 0xf3, 0x25, 0xa1, 0x02, 0x39, 0xa6, 0xa3, 0xe7, 0xe2, 0xc5, 0x1f, 0xe0,
|
||||
0x92, 0x67, 0x16, 0x2e, 0xc8, 0x6f, 0x62, 0xb0, 0x48, 0x8e, 0x15, 0x1f, 0xb7, 0x35, 0xb9, 0xea,
|
||||
0xd5, 0xe4, 0x0d, 0x3f, 0xe5, 0xf3, 0x58, 0x0e, 0xcb, 0xf2, 0x97, 0xd1, 0x89, 0xcb, 0xf2, 0xae,
|
||||
0x47, 0x96, 0xff, 0x39, 0x66, 0x70, 0x52, 0x65, 0x1e, 0x92, 0xbe, 0xb8, 0x44, 0xfa, 0xdc, 0xe2,
|
||||
0x36, 0x3d, 0x39, 0x71, 0x7b, 0x0a, 0xb9, 0xc1, 0x70, 0x39, 0x69, 0xfe, 0x0e, 0x49, 0x9e, 0x44,
|
||||
0x21, 0x71, 0x81, 0xac, 0xb1, 0xc1, 0x8e, 0xd0, 0xed, 0x60, 0xeb, 0x95, 0x6e, 0xb4, 0xc6, 0x10,
|
||||
0x3a, 0x6e, 0x21, 0x13, 0x3a, 0x7b, 0x32, 0x87, 0xd3, 0x1d, 0x36, 0x14, 0xc4, 0x69, 0x61, 0x25,
|
||||
0xb0, 0xca, 0x3e, 0x15, 0x3a, 0x4f, 0x64, 0x08, 0xe2, 0x64, 0xa7, 0xf9, 0x7e, 0xd1, 0x67, 0x42,
|
||||
0x72, 0x6e, 0x43, 0x48, 0x1e, 0x75, 0x48, 0xce, 0x6d, 0x09, 0xc9, 0x39, 0xc0, 0x16, 0xbf, 0x09,
|
||||
0xc5, 0xf8, 0x7f, 0x71, 0xee, 0x26, 0x1e, 0xa6, 0x7d, 0x16, 0x3d, 0x91, 0x2a, 0xbf, 0x44, 0xd9,
|
||||
0x59, 0xe4, 0xe3, 0x67, 0x38, 0x8b, 0x1e, 0xcb, 0xe1, 0xb3, 0xf8, 0xd9, 0x05, 0x9e, 0x45, 0x9f,
|
||||
0xe0, 0xce, 0x7c, 0x16, 0x27, 0x70, 0xde, 0x9c, 0x90, 0x9c, 0xf3, 0xc6, 0x13, 0x15, 0x78, 0xde,
|
||||
0x44, 0xe6, 0x6c, 0x30, 0xff, 0xed, 0xde, 0x6a, 0xf7, 0x4c, 0x0b, 0x1b, 0x2e, 0x8d, 0x6e, 0xb0,
|
||||
0x11, 0x8f, 0x46, 0x73, 0x1c, 0xe1, 0x05, 0x07, 0xd8, 0xf4, 0xb5, 0xa7, 0x70, 0xe8, 0xcb, 0x21,
|
||||
0x41, 0xf4, 0x15, 0x56, 0x02, 0x6b, 0x73, 0x89, 0x7f, 0x38, 0x03, 0x97, 0x3c, 0x96, 0xbf, 0x2f,
|
||||
0x2e, 0xf9, 0x04, 0x77, 0x91, 0x5c, 0x72, 0x42, 0x72, 0xb8, 0xc4, 0xb3, 0x11, 0xc8, 0x25, 0x91,
|
||||
0x3a, 0x1b, 0xac, 0x7c, 0x1e, 0x81, 0xf4, 0x36, 0x3e, 0x51, 0x75, 0xab, 0x6e, 0x91, 0xd2, 0xe7,
|
||||
0xcf, 0xb0, 0x40, 0x48, 0x86, 0x8d, 0xda, 0x4b, 0x5d, 0xeb, 0xd4, 0x2c, 0xbd, 0x85, 0x3b, 0x34,
|
||||
0xb4, 0xa4, 0x3a, 0xc7, 0x3e, 0x3c, 0xd2, 0xb5, 0xce, 0x1e, 0x19, 0x46, 0xeb, 0x80, 0x8e, 0xea,
|
||||
0x9d, 0xfa, 0xe1, 0x20, 0x98, 0xdd, 0x30, 0xe7, 0xf9, 0x17, 0x29, 0xba, 0xd7, 0x69, 0xeb, 0x8d,
|
||||
0x56, 0x8d, 0xac, 0x3a, 0x36, 0x80, 0xde, 0xa7, 0x1f, 0xb6, 0xf1, 0x89, 0xf2, 0xa9, 0x5d, 0x0f,
|
||||
0x9e, 0x87, 0xe7, 0xa4, 0x1e, 0x14, 0xe8, 0x71, 0xea, 0x41, 0x6e, 0x33, 0x46, 0x3d, 0xc8, 0xbd,
|
||||
0xbb, 0xea, 0xc1, 0x7b, 0xa4, 0x1e, 0x64, 0xbb, 0x4a, 0xeb, 0x41, 0x1f, 0x43, 0xd7, 0xe6, 0x6f,
|
||||
0xc6, 0xdf, 0x9d, 0x2e, 0x4f, 0xa9, 0xb6, 0x99, 0x53, 0xdf, 0x4d, 0xe8, 0xa0, 0xfe, 0x0b, 0xe6,
|
||||
0x69, 0xc5, 0xde, 0x30, 0xb0, 0x25, 0xf6, 0x73, 0x0d, 0x52, 0x26, 0x1d, 0x70, 0xb6, 0x73, 0xb6,
|
||||
0x7f, 0xba, 0x9c, 0x64, 0xa8, 0x6a, 0x85, 0xfc, 0xce, 0xd3, 0xa7, 0xa6, 0xf2, 0x90, 0xdf, 0x19,
|
||||
0x98, 0x39, 0x0f, 0xa5, 0x0c, 0x33, 0x0c, 0xc0, 0x23, 0x29, 0xc8, 0x6b, 0x06, 0x6a, 0xc3, 0x91,
|
||||
0xca, 0xdb, 0x08, 0x2c, 0x8a, 0xc2, 0xf5, 0x6c, 0xb1, 0xa0, 0x4d, 0xc8, 0x72, 0xe8, 0x18, 0x79,
|
||||
0xcd, 0x30, 0x13, 0x91, 0xd6, 0xf2, 0x40, 0x5a, 0x97, 0xfc, 0x03, 0x77, 0x95, 0x27, 0x8f, 0x9c,
|
||||
0x6b, 0xca, 0xb9, 0xb7, 0xe1, 0xe7, 0x28, 0x20, 0x56, 0x89, 0x91, 0x57, 0x5b, 0x36, 0xff, 0xe7,
|
||||
0x95, 0xcd, 0x92, 0x7f, 0xc5, 0xe9, 0x36, 0x1c, 0x56, 0xcd, 0x37, 0x93, 0x57, 0x4d, 0xd5, 0xa3,
|
||||
0x9a, 0x77, 0xc7, 0x8b, 0xed, 0x42, 0x44, 0x73, 0x5b, 0x5c, 0x3b, 0x78, 0x44, 0x3c, 0x65, 0x7f,
|
||||
0x23, 0x97, 0x24, 0x3a, 0xc4, 0x25, 0x33, 0x28, 0x67, 0x02, 0xaa, 0x54, 0x61, 0x51, 0x5c, 0xc4,
|
||||
0xdd, 0xd4, 0x2d, 0x0f, 0xd4, 0xba, 0x23, 0x73, 0x69, 0x70, 0xaa, 0x73, 0x70, 0xe9, 0xbf, 0xb0,
|
||||
0x28, 0x2e, 0x5d, 0x67, 0x3c, 0xdd, 0x97, 0x9d, 0xcb, 0x9f, 0x3b, 0x9a, 0xf2, 0xd7, 0x97, 0x21,
|
||||
0xb1, 0xc5, 0xfe, 0xfa, 0x80, 0x34, 0x48, 0xf0, 0xc6, 0x3e, 0x52, 0x64, 0x41, 0x0d, 0xfe, 0xb1,
|
||||
0xa0, 0x70, 0x2d, 0x10, 0xc3, 0x2b, 0xd1, 0x4b, 0xdf, 0x7d, 0xf3, 0xeb, 0x17, 0xd1, 0x39, 0xc8,
|
||||
0x50, 0xd0, 0x5f, 0xf8, 0x2f, 0x01, 0xd2, 0x21, 0x65, 0x77, 0x88, 0xd1, 0x9f, 0x46, 0xe9, 0xa7,
|
||||
0x17, 0xae, 0x87, 0xa0, 0x82, 0x1d, 0x1a, 0x00, 0x4e, 0x83, 0x16, 0x5d, 0xf7, 0xbf, 0xbb, 0xbb,
|
||||
0x57, 0xb8, 0x1a, 0x06, 0x0b, 0xf5, 0xe9, 0x34, 0x60, 0xe5, 0x3e, 0x87, 0x1a, 0xbe, 0x72, 0x9f,
|
||||
0x92, 0x3e, 0xae, 0x8f, 0x4f, 0x96, 0xc3, 0xbd, 0xba, 0xd9, 0xf2, 0xcd, 0xa1, 0xab, 0x01, 0xeb,
|
||||
0x9b, 0xc3, 0x81, 0x56, 0x6b, 0x70, 0x0e, 0x69, 0x1b, 0xcd, 0x3f, 0x87, 0xee, 0x76, 0xa6, 0x7f,
|
||||
0x0e, 0x07, 0x7a, 0x71, 0xa1, 0xfb, 0x49, 0x97, 0x17, 0xb0, 0x9f, 0xee, 0x15, 0xae, 0x86, 0xc1,
|
||||
0x42, 0x7d, 0x3a, 0x6d, 0x30, 0xb9, 0xcf, 0xa1, 0x4e, 0x9b, 0xdc, 0xe7, 0x70, 0x37, 0xcd, 0xcf,
|
||||
0xe7, 0x6b, 0x98, 0x75, 0x5f, 0xe1, 0xd1, 0x8d, 0x11, 0x7b, 0x12, 0x85, 0x62, 0x38, 0x30, 0xd8,
|
||||
0xf3, 0x47, 0x90, 0x19, 0xe8, 0x43, 0x22, 0xe9, 0x8c, 0xb2, 0xbe, 0x67, 0x61, 0x6d, 0x04, 0x64,
|
||||
0xa8, 0xf3, 0x81, 0x7e, 0x97, 0xdc, 0xb9, 0xac, 0xc3, 0x26, 0x77, 0x2e, 0x6d, 0x9e, 0x05, 0x38,
|
||||
0x1f, 0x68, 0x6b, 0xc9, 0x9d, 0xcb, 0xfa, 0x67, 0x72, 0xe7, 0xf2, 0x1e, 0x59, 0x20, 0xc9, 0xf8,
|
||||
0x55, 0xd0, 0x97, 0x64, 0x83, 0xed, 0x03, 0x5f, 0x92, 0x79, 0x7b, 0x01, 0xc1, 0x24, 0x13, 0xf7,
|
||||
0x56, 0x7f, 0x92, 0x79, 0x2e, 0xdb, 0xfe, 0x24, 0xf3, 0x5e, 0x81, 0x43, 0x49, 0x26, 0x16, 0x1c,
|
||||
0x40, 0x32, 0xcf, 0x9a, 0xd7, 0x46, 0x40, 0x8e, 0x98, 0xe7, 0x40, 0xe7, 0xb2, 0x7e, 0x4d, 0x50,
|
||||
0x9e, 0x47, 0x74, 0xce, 0xf2, 0xcc, 0x0b, 0x77, 0xdf, 0x3c, 0x0f, 0x5e, 0x89, 0x7c, 0xf3, 0xec,
|
||||
0xb9, 0x35, 0x84, 0xe4, 0x59, 0xdc, 0x29, 0xfd, 0xf3, 0xec, 0xb9, 0x08, 0xfb, 0xe7, 0xd9, 0x7b,
|
||||
0x3d, 0x0d, 0x3d, 0xcf, 0x62, 0xc1, 0x01, 0xe7, 0xd9, 0xb3, 0xe6, 0xb5, 0x11, 0x90, 0xa1, 0x3f,
|
||||
0x4e, 0xf6, 0x6d, 0x46, 0xfe, 0xe3, 0xe4, 0xbd, 0x2b, 0x15, 0xae, 0x87, 0xa0, 0x42, 0xf7, 0xd9,
|
||||
0x7d, 0x75, 0x90, 0xef, 0xb3, 0xe4, 0x5a, 0x54, 0x28, 0x86, 0x03, 0x83, 0x3d, 0xf7, 0x20, 0xed,
|
||||
0x2a, 0x80, 0xd1, 0xea, 0x68, 0x35, 0x7b, 0xe1, 0x46, 0x28, 0x2e, 0x74, 0xc1, 0xee, 0xfa, 0x56,
|
||||
0xbe, 0x60, 0x49, 0x31, 0x5d, 0x28, 0x86, 0x03, 0x43, 0x3d, 0xbb, 0x6b, 0x59, 0xb9, 0x67, 0x49,
|
||||
0xbd, 0x5c, 0x28, 0x86, 0x03, 0x03, 0x3d, 0x6f, 0xe6, 0xdf, 0xbd, 0x5f, 0x9a, 0xfa, 0xe1, 0xfd,
|
||||
0xd2, 0xd4, 0xc7, 0xfd, 0xa5, 0xc8, 0xbb, 0xfe, 0x52, 0xe4, 0xfb, 0xfe, 0x52, 0xe4, 0xa7, 0xfe,
|
||||
0x52, 0xe4, 0x60, 0x86, 0xfe, 0x97, 0x9a, 0x5b, 0xbf, 0x05, 0x00, 0x00, 0xff, 0xff, 0x35, 0xa6,
|
||||
0x7d, 0x8d, 0xcb, 0x23, 0x00, 0x00,
|
||||
}
|
||||
|
|
3
vendor/github.com/docker/swarmkit/api/control.proto
generated
vendored
3
vendor/github.com/docker/swarmkit/api/control.proto
generated
vendored
|
@ -191,6 +191,8 @@ message ListTasksRequest {
|
|||
repeated docker.swarmkit.v1.TaskState desired_states = 6;
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
repeated string name_prefixes = 7;
|
||||
repeated string runtimes = 9;
|
||||
|
||||
// UpToDate matches tasks that are consistent with the current
|
||||
// service definition.
|
||||
// Note: this is intended for internal status reporting rather
|
||||
|
@ -260,6 +262,7 @@ message ListServicesRequest {
|
|||
map<string, string> labels = 3;
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
repeated string name_prefixes = 4;
|
||||
repeated string runtimes = 5;
|
||||
}
|
||||
|
||||
Filters filters = 1;
|
||||
|
|
9
vendor/github.com/docker/swarmkit/api/deepcopy/copy.go
generated
vendored
9
vendor/github.com/docker/swarmkit/api/deepcopy/copy.go
generated
vendored
|
@ -26,6 +26,15 @@ type CopierFrom interface {
|
|||
// types that use this function.
|
||||
func Copy(dst, src interface{}) {
|
||||
switch dst := dst.(type) {
|
||||
case *types.Any:
|
||||
src := src.(*types.Any)
|
||||
dst.TypeUrl = src.TypeUrl
|
||||
if src.Value != nil {
|
||||
dst.Value = make([]byte, len(src.Value))
|
||||
copy(dst.Value, src.Value)
|
||||
} else {
|
||||
dst.Value = nil
|
||||
}
|
||||
case *types.Duration:
|
||||
src := src.(*types.Duration)
|
||||
*dst = *src
|
||||
|
|
38
vendor/github.com/docker/swarmkit/api/equality/equality.go
generated
vendored
38
vendor/github.com/docker/swarmkit/api/equality/equality.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package equality
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"reflect"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
|
@ -27,3 +28,40 @@ func TaskStatusesEqualStable(a, b *api.TaskStatus) bool {
|
|||
copyA.Timestamp, copyB.Timestamp = nil, nil
|
||||
return reflect.DeepEqual(©A, ©B)
|
||||
}
|
||||
|
||||
// RootCAEqualStable compares RootCAs, excluding join tokens, which are randomly generated
|
||||
func RootCAEqualStable(a, b *api.RootCA) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var aRotationKey, bRotationKey []byte
|
||||
if a.RootRotation != nil {
|
||||
aRotationKey = a.RootRotation.CAKey
|
||||
}
|
||||
if b.RootRotation != nil {
|
||||
bRotationKey = b.RootRotation.CAKey
|
||||
}
|
||||
if subtle.ConstantTimeCompare(a.CAKey, b.CAKey) != 1 || subtle.ConstantTimeCompare(aRotationKey, bRotationKey) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
copyA, copyB := *a, *b
|
||||
copyA.JoinTokens, copyB.JoinTokens = api.JoinTokens{}, api.JoinTokens{}
|
||||
return reflect.DeepEqual(copyA, copyB)
|
||||
}
|
||||
|
||||
// ExternalCAsEqualStable compares lists of external CAs and determines whether they are equal.
|
||||
func ExternalCAsEqualStable(a, b []*api.ExternalCA) bool {
|
||||
// because DeepEqual will treat an empty list and a nil list differently, we want to manually check this first
|
||||
if len(a) == 0 && len(b) == 0 {
|
||||
return true
|
||||
}
|
||||
// The assumption is that each individual api.ExternalCA within both lists are created from deserializing from a
|
||||
// protobuf, so no special affordances are made to treat a nil map and empty map in the Options field of an
|
||||
// api.ExternalCA as equivalent.
|
||||
return reflect.DeepEqual(a, b)
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/swarmkit/api/gen.go
generated
vendored
2
vendor/github.com/docker/swarmkit/api/gen.go
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
package api
|
||||
|
||||
//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin,Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto health.proto resource.proto logbroker.proto
|
||||
//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+storeobject+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin,Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto health.proto resource.proto logbroker.proto
|
||||
|
|
20
vendor/github.com/docker/swarmkit/api/naming/naming.go
generated
vendored
20
vendor/github.com/docker/swarmkit/api/naming/naming.go
generated
vendored
|
@ -2,11 +2,17 @@
|
|||
package naming
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnknownRuntime = errors.New("unrecognized runtime")
|
||||
)
|
||||
|
||||
// Task returns the task name from Annotations.Name,
|
||||
// and, in case Annotations.Name is missing, fallback
|
||||
// to construct the name from other information.
|
||||
|
@ -27,3 +33,17 @@ func Task(t *api.Task) string {
|
|||
}
|
||||
|
||||
// TODO(stevvooe): Consolidate "Hostname" style validation here.
|
||||
|
||||
// Runtime returns the runtime name from a given spec.
|
||||
func Runtime(t api.TaskSpec) (string, error) {
|
||||
switch r := t.GetRuntime().(type) {
|
||||
case *api.TaskSpec_Attachment:
|
||||
return "attachment", nil
|
||||
case *api.TaskSpec_Container:
|
||||
return "container", nil
|
||||
case *api.TaskSpec_Generic:
|
||||
return strings.ToLower(r.Generic.Kind), nil
|
||||
default:
|
||||
return "", errUnknownRuntime
|
||||
}
|
||||
}
|
||||
|
|
2128
vendor/github.com/docker/swarmkit/api/objects.pb.go
generated
vendored
2128
vendor/github.com/docker/swarmkit/api/objects.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
78
vendor/github.com/docker/swarmkit/api/objects.proto
generated
vendored
78
vendor/github.com/docker/swarmkit/api/objects.proto
generated
vendored
|
@ -6,6 +6,8 @@ import "types.proto";
|
|||
import "specs.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "plugin/plugin.proto";
|
||||
|
||||
// This file contains definitions for all first-class objects in the cluster
|
||||
// API. Such types typically have a corresponding specification, with the
|
||||
|
@ -24,6 +26,8 @@ message Meta {
|
|||
|
||||
// Node provides the internal node state as seen by the cluster.
|
||||
message Node {
|
||||
option (docker.protobuf.plugin.store_object) = { };
|
||||
|
||||
// ID specifies the identity of the node.
|
||||
string id = 1;
|
||||
|
||||
|
@ -63,16 +67,26 @@ message Node {
|
|||
}
|
||||
|
||||
message Service {
|
||||
option (docker.protobuf.plugin.store_object) = { };
|
||||
|
||||
string id = 1;
|
||||
|
||||
Meta meta = 2 [(gogoproto.nullable) = false];
|
||||
|
||||
ServiceSpec spec = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// SpecVersion versions Spec, to identify changes in the spec. Note that
|
||||
// this is not directly comparable to the service's Version.
|
||||
Version spec_version = 10;
|
||||
|
||||
// PreviousSpec is the previous service spec that was in place before
|
||||
// "Spec".
|
||||
ServiceSpec previous_spec = 6;
|
||||
|
||||
// PreviousSpecVersion versions PreviousSpec. Note that this is not
|
||||
// directly comparable to the service's Version.
|
||||
Version previous_spec_version = 11;
|
||||
|
||||
// Runtime state of service endpoint. This may be different
|
||||
// from the spec version because the user may not have entered
|
||||
// the optional fields like node_port or virtual_ip and it
|
||||
|
@ -123,6 +137,8 @@ message Endpoint {
|
|||
// immutable and idempotent. Once it is dispatched to a node, it will not be
|
||||
// dispatched to another node.
|
||||
message Task {
|
||||
option (docker.protobuf.plugin.store_object) = { };
|
||||
|
||||
string id = 1;
|
||||
|
||||
Meta meta = 2 [(gogoproto.nullable) = false];
|
||||
|
@ -131,6 +147,11 @@ message Task {
|
|||
// The system will honor this and will *never* modify it.
|
||||
TaskSpec spec = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// SpecVersion is copied from Service, to identify which version of the
|
||||
// spec this task has. Note that this is not directly comparable to the
|
||||
// service's Version.
|
||||
Version spec_version = 14;
|
||||
|
||||
// ServiceID indicates the service under which this task is orchestrated. This
|
||||
// should almost always be set.
|
||||
string service_id = 4;
|
||||
|
@ -204,6 +225,8 @@ message NetworkAttachment {
|
|||
}
|
||||
|
||||
message Network {
|
||||
option (docker.protobuf.plugin.store_object) = { };
|
||||
|
||||
string id = 1;
|
||||
|
||||
Meta meta = 2 [(gogoproto.nullable) = false];
|
||||
|
@ -220,6 +243,8 @@ message Network {
|
|||
|
||||
// Cluster provides global cluster settings.
|
||||
message Cluster {
|
||||
option (docker.protobuf.plugin.store_object) = { };
|
||||
|
||||
string id = 1;
|
||||
|
||||
Meta meta = 2 [(gogoproto.nullable) = false];
|
||||
|
@ -256,6 +281,8 @@ message Cluster {
|
|||
// information that is generated from the secret data in the `spec`, such as
|
||||
// the digest and size of the secret data.
|
||||
message Secret {
|
||||
option (docker.protobuf.plugin.store_object) = { };
|
||||
|
||||
string id = 1;
|
||||
|
||||
Meta meta = 2 [(gogoproto.nullable) = false];
|
||||
|
@ -267,3 +294,54 @@ message Secret {
|
|||
// Whether the secret is an internal secret (not set by a user) or not.
|
||||
bool internal = 4;
|
||||
}
|
||||
|
||||
// Resource is a top-level object with externally defined content and indexing.
|
||||
// SwarmKit can serve as a store for these objects without understanding their
|
||||
// meanings.
|
||||
message Resource {
|
||||
option (docker.protobuf.plugin.store_object) = { };
|
||||
|
||||
string id = 1 [(gogoproto.customname) = "ID"];
|
||||
|
||||
Meta meta = 2 [(gogoproto.nullable) = false];
|
||||
|
||||
Annotations annotations = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// Kind identifies this class of object. It is essentially a namespace
|
||||
// to keep IDs or indices from colliding between unrelated Resource
|
||||
// objects. This must correspond to the name of an Extension.
|
||||
string kind = 4;
|
||||
|
||||
// Payload bytes. This data is not interpreted in any way by SwarmKit.
|
||||
// By convention, it should be a marshalled protocol buffers message.
|
||||
google.protobuf.Any payload = 5;
|
||||
}
|
||||
|
||||
// Extension declares a type of "resource" object. This message provides some
|
||||
// metadata about the objects.
|
||||
message Extension {
|
||||
option (docker.protobuf.plugin.store_object) = { };
|
||||
|
||||
string id = 1 [(gogoproto.customname) = "ID"];
|
||||
|
||||
Meta meta = 2 [(gogoproto.nullable) = false];
|
||||
|
||||
Annotations annotations = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
string description = 4;
|
||||
|
||||
// TODO(aaronl): Add optional indexing capabilities. It would be
|
||||
// extremely useful be able to automatically introspect protobuf, json,
|
||||
// etc. objects and automatically index them based on a schema and field
|
||||
// paths defined here.
|
||||
//
|
||||
//oneof Schema {
|
||||
// google.protobuf.Descriptor protobuf = 1;
|
||||
// bytes json = 2;
|
||||
//}
|
||||
//
|
||||
//Schema schema = 5;
|
||||
//
|
||||
// // Indices, with values expressed as Go templates.
|
||||
//repeated IndexEntry index_templates = 6;
|
||||
}
|
||||
|
|
346
vendor/github.com/docker/swarmkit/api/raft.pb.go
generated
vendored
346
vendor/github.com/docker/swarmkit/api/raft.pb.go
generated
vendored
|
@ -155,8 +155,8 @@ func (*ResolveAddressResponse) Descriptor() ([]byte, []int) { return fileDescrip
|
|||
// over the raft backend with a request ID to track when the
|
||||
// action is effectively applied
|
||||
type InternalRaftRequest struct {
|
||||
ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Action []*StoreAction `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
|
||||
ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Action []StoreAction `protobuf:"bytes,2,rep,name=action" json:"action"`
|
||||
}
|
||||
|
||||
func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
|
||||
|
@ -173,6 +173,8 @@ type StoreAction struct {
|
|||
// *StoreAction_Network
|
||||
// *StoreAction_Cluster
|
||||
// *StoreAction_Secret
|
||||
// *StoreAction_Resource
|
||||
// *StoreAction_Extension
|
||||
Target isStoreAction_Target `protobuf_oneof:"target"`
|
||||
}
|
||||
|
||||
|
@ -204,13 +206,21 @@ type StoreAction_Cluster struct {
|
|||
type StoreAction_Secret struct {
|
||||
Secret *Secret `protobuf:"bytes,7,opt,name=secret,oneof"`
|
||||
}
|
||||
type StoreAction_Resource struct {
|
||||
Resource *Resource `protobuf:"bytes,8,opt,name=resource,oneof"`
|
||||
}
|
||||
type StoreAction_Extension struct {
|
||||
Extension *Extension `protobuf:"bytes,9,opt,name=extension,oneof"`
|
||||
}
|
||||
|
||||
func (*StoreAction_Node) isStoreAction_Target() {}
|
||||
func (*StoreAction_Service) isStoreAction_Target() {}
|
||||
func (*StoreAction_Task) isStoreAction_Target() {}
|
||||
func (*StoreAction_Network) isStoreAction_Target() {}
|
||||
func (*StoreAction_Cluster) isStoreAction_Target() {}
|
||||
func (*StoreAction_Secret) isStoreAction_Target() {}
|
||||
func (*StoreAction_Node) isStoreAction_Target() {}
|
||||
func (*StoreAction_Service) isStoreAction_Target() {}
|
||||
func (*StoreAction_Task) isStoreAction_Target() {}
|
||||
func (*StoreAction_Network) isStoreAction_Target() {}
|
||||
func (*StoreAction_Cluster) isStoreAction_Target() {}
|
||||
func (*StoreAction_Secret) isStoreAction_Target() {}
|
||||
func (*StoreAction_Resource) isStoreAction_Target() {}
|
||||
func (*StoreAction_Extension) isStoreAction_Target() {}
|
||||
|
||||
func (m *StoreAction) GetTarget() isStoreAction_Target {
|
||||
if m != nil {
|
||||
|
@ -261,6 +271,20 @@ func (m *StoreAction) GetSecret() *Secret {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *StoreAction) GetResource() *Resource {
|
||||
if x, ok := m.GetTarget().(*StoreAction_Resource); ok {
|
||||
return x.Resource
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *StoreAction) GetExtension() *Extension {
|
||||
if x, ok := m.GetTarget().(*StoreAction_Extension); ok {
|
||||
return x.Extension
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*StoreAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _StoreAction_OneofMarshaler, _StoreAction_OneofUnmarshaler, _StoreAction_OneofSizer, []interface{}{
|
||||
|
@ -270,6 +294,8 @@ func (*StoreAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) e
|
|||
(*StoreAction_Network)(nil),
|
||||
(*StoreAction_Cluster)(nil),
|
||||
(*StoreAction_Secret)(nil),
|
||||
(*StoreAction_Resource)(nil),
|
||||
(*StoreAction_Extension)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -307,6 +333,16 @@ func _StoreAction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
|||
if err := b.EncodeMessage(x.Secret); err != nil {
|
||||
return err
|
||||
}
|
||||
case *StoreAction_Resource:
|
||||
_ = b.EncodeVarint(8<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.Resource); err != nil {
|
||||
return err
|
||||
}
|
||||
case *StoreAction_Extension:
|
||||
_ = b.EncodeVarint(9<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.Extension); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("StoreAction.Target has unexpected type %T", x)
|
||||
|
@ -365,6 +401,22 @@ func _StoreAction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Bu
|
|||
err := b.DecodeMessage(msg)
|
||||
m.Target = &StoreAction_Secret{msg}
|
||||
return true, err
|
||||
case 8: // target.resource
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(Resource)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Target = &StoreAction_Resource{msg}
|
||||
return true, err
|
||||
case 9: // target.extension
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(Extension)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Target = &StoreAction_Extension{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
|
@ -404,6 +456,16 @@ func _StoreAction_OneofSizer(msg proto.Message) (n int) {
|
|||
n += proto.SizeVarint(7<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *StoreAction_Resource:
|
||||
s := proto.Size(x.Resource)
|
||||
n += proto.SizeVarint(8<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *StoreAction_Extension:
|
||||
s := proto.Size(x.Extension)
|
||||
n += proto.SizeVarint(9<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
|
@ -624,10 +686,9 @@ func (m *InternalRaftRequest) CopyFrom(src interface{}) {
|
|||
o := src.(*InternalRaftRequest)
|
||||
*m = *o
|
||||
if o.Action != nil {
|
||||
m.Action = make([]*StoreAction, len(o.Action))
|
||||
m.Action = make([]StoreAction, len(o.Action))
|
||||
for i := range m.Action {
|
||||
m.Action[i] = &StoreAction{}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(m.Action[i], o.Action[i])
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(&m.Action[i], &o.Action[i])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -684,6 +745,18 @@ func (m *StoreAction) CopyFrom(src interface{}) {
|
|||
}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret())
|
||||
m.Target = &v
|
||||
case *StoreAction_Resource:
|
||||
v := StoreAction_Resource{
|
||||
Resource: &Resource{},
|
||||
}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(v.Resource, o.GetResource())
|
||||
m.Target = &v
|
||||
case *StoreAction_Extension:
|
||||
v := StoreAction_Extension{
|
||||
Extension: &Extension{},
|
||||
}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(v.Extension, o.GetExtension())
|
||||
m.Target = &v
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1308,6 +1381,34 @@ func (m *StoreAction_Secret) MarshalTo(dAtA []byte) (int, error) {
|
|||
}
|
||||
return i, nil
|
||||
}
|
||||
func (m *StoreAction_Resource) MarshalTo(dAtA []byte) (int, error) {
|
||||
i := 0
|
||||
if m.Resource != nil {
|
||||
dAtA[i] = 0x42
|
||||
i++
|
||||
i = encodeVarintRaft(dAtA, i, uint64(m.Resource.Size()))
|
||||
n13, err := m.Resource.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n13
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
func (m *StoreAction_Extension) MarshalTo(dAtA []byte) (int, error) {
|
||||
i := 0
|
||||
if m.Extension != nil {
|
||||
dAtA[i] = 0x4a
|
||||
i++
|
||||
i = encodeVarintRaft(dAtA, i, uint64(m.Extension.Size()))
|
||||
n14, err := m.Extension.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n14
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
func encodeFixed64Raft(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
|
@ -1803,6 +1904,24 @@ func (m *StoreAction_Secret) Size() (n int) {
|
|||
}
|
||||
return n
|
||||
}
|
||||
func (m *StoreAction_Resource) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Resource != nil {
|
||||
l = m.Resource.Size()
|
||||
n += 1 + l + sovRaft(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
func (m *StoreAction_Extension) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Extension != nil {
|
||||
l = m.Extension.Size()
|
||||
n += 1 + l + sovRaft(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovRaft(x uint64) (n int) {
|
||||
for {
|
||||
|
@ -1916,7 +2035,7 @@ func (this *InternalRaftRequest) String() string {
|
|||
}
|
||||
s := strings.Join([]string{`&InternalRaftRequest{`,
|
||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`Action:` + strings.Replace(fmt.Sprintf("%v", this.Action), "StoreAction", "StoreAction", 1) + `,`,
|
||||
`Action:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Action), "StoreAction", "StoreAction", 1), `&`, ``, 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -1992,6 +2111,26 @@ func (this *StoreAction_Secret) String() string {
|
|||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *StoreAction_Resource) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&StoreAction_Resource{`,
|
||||
`Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *StoreAction_Extension) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&StoreAction_Extension{`,
|
||||
`Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringRaft(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
|
@ -2886,7 +3025,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Action = append(m.Action, &StoreAction{})
|
||||
m.Action = append(m.Action, StoreAction{})
|
||||
if err := m.Action[len(m.Action)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3152,6 +3291,70 @@ func (m *StoreAction) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.Target = &StoreAction_Secret{v}
|
||||
iNdEx = postIndex
|
||||
case 8:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRaft
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRaft
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
v := &Resource{}
|
||||
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Target = &StoreAction_Resource{v}
|
||||
iNdEx = postIndex
|
||||
case 9:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRaft
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRaft
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
v := &Extension{}
|
||||
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Target = &StoreAction_Extension{v}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRaft(dAtA[iNdEx:])
|
||||
|
@ -3281,61 +3484,64 @@ var (
|
|||
func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) }
|
||||
|
||||
var fileDescriptorRaft = []byte{
|
||||
// 885 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x95, 0xcf, 0x73, 0xdb, 0x44,
|
||||
0x14, 0xc7, 0x2d, 0x59, 0x95, 0xe1, 0xb9, 0x89, 0x33, 0x1b, 0x12, 0x54, 0xc1, 0x28, 0xaa, 0xca,
|
||||
0x4c, 0xdd, 0x0e, 0x91, 0x07, 0xc1, 0x4c, 0x19, 0xe0, 0x12, 0x27, 0x9e, 0x89, 0x69, 0xeb, 0x74,
|
||||
0x94, 0x04, 0x7a, 0x0b, 0xb2, 0xb4, 0x71, 0x85, 0x63, 0xad, 0xd9, 0x5d, 0x3b, 0xc3, 0x85, 0xe9,
|
||||
0x91, 0xc9, 0x95, 0xe1, 0xc7, 0xa5, 0x27, 0x38, 0xf7, 0x0f, 0xe0, 0x2f, 0xc8, 0x70, 0xe2, 0x06,
|
||||
0xa7, 0x0c, 0xf5, 0x1f, 0x00, 0xff, 0x02, 0xb3, 0x2b, 0x29, 0x49, 0x1d, 0xc5, 0xc9, 0x25, 0x59,
|
||||
0xaf, 0x3e, 0xdf, 0xf7, 0xdd, 0xf7, 0x56, 0xef, 0x09, 0x80, 0x06, 0xfb, 0xdc, 0x1d, 0x52, 0xc2,
|
||||
0x09, 0x42, 0x11, 0x09, 0xfb, 0x98, 0xba, 0xec, 0x30, 0xa0, 0x83, 0x7e, 0xcc, 0xdd, 0xf1, 0x07,
|
||||
0xe6, 0x1c, 0xe9, 0x7e, 0x8d, 0x43, 0xce, 0x52, 0xc4, 0xac, 0xf2, 0x6f, 0x87, 0x38, 0xff, 0xb1,
|
||||
0xda, 0x8b, 0xf9, 0xb3, 0x51, 0xd7, 0x0d, 0xc9, 0xa0, 0x11, 0x12, 0x8a, 0x09, 0x6b, 0x60, 0x1e,
|
||||
0x46, 0x0d, 0x11, 0x52, 0xfe, 0x19, 0x76, 0x1b, 0x67, 0xe1, 0xcd, 0xb7, 0x7a, 0xa4, 0x47, 0xe4,
|
||||
0xb2, 0x21, 0x56, 0xd9, 0xee, 0xe2, 0xf0, 0x60, 0xd4, 0x8b, 0x93, 0x46, 0xfa, 0x2f, 0xdd, 0x74,
|
||||
0x5e, 0x2a, 0x00, 0x7e, 0xb0, 0xcf, 0x1f, 0xe3, 0x41, 0x17, 0x53, 0x74, 0x07, 0x2a, 0x22, 0xce,
|
||||
0x5e, 0x1c, 0x19, 0x8a, 0xad, 0xd4, 0xb5, 0x26, 0x4c, 0x4e, 0x56, 0x74, 0x01, 0xb4, 0x37, 0x7c,
|
||||
0x5d, 0x3c, 0x6a, 0x47, 0x02, 0x4a, 0x48, 0x84, 0x05, 0xa4, 0xda, 0x4a, 0xfd, 0xcd, 0x14, 0xea,
|
||||
0x90, 0x08, 0x0b, 0x48, 0x3c, 0x6a, 0x47, 0x08, 0x81, 0x16, 0x44, 0x11, 0x35, 0xca, 0x82, 0xf0,
|
||||
0xe5, 0x1a, 0x35, 0x41, 0x67, 0x3c, 0xe0, 0x23, 0x66, 0x68, 0xb6, 0x52, 0xaf, 0x7a, 0xef, 0xb9,
|
||||
0x17, 0xeb, 0xe0, 0x9e, 0x9d, 0x66, 0x5b, 0xb2, 0x4d, 0xed, 0xf8, 0x64, 0xa5, 0xe4, 0x67, 0x4a,
|
||||
0xe7, 0x36, 0x54, 0x3f, 0x27, 0x71, 0xe2, 0xe3, 0x6f, 0x46, 0x98, 0xf1, 0x53, 0x1b, 0xe5, 0xcc,
|
||||
0xc6, 0xf9, 0x49, 0x81, 0x9b, 0x29, 0xc3, 0x86, 0x24, 0x61, 0xf8, 0x7a, 0x59, 0x7d, 0x0c, 0x95,
|
||||
0x81, 0xb4, 0x65, 0x86, 0x6a, 0x97, 0xeb, 0x55, 0xcf, 0x9a, 0x7d, 0x3a, 0x3f, 0xc7, 0xd1, 0x5d,
|
||||
0xa8, 0x51, 0x3c, 0x20, 0x63, 0x1c, 0xed, 0xe5, 0x11, 0xca, 0x76, 0xb9, 0xae, 0xf9, 0xf3, 0xd9,
|
||||
0x76, 0x2a, 0x60, 0x4e, 0x13, 0x6e, 0x3e, 0xc2, 0xc1, 0x18, 0xe7, 0x87, 0xf7, 0x40, 0x13, 0xd5,
|
||||
0x92, 0x87, 0xba, 0xda, 0x4f, 0xb2, 0x4e, 0x0d, 0xe6, 0xb2, 0x18, 0x69, 0x72, 0xce, 0x23, 0xb8,
|
||||
0xf5, 0x84, 0x92, 0x10, 0x33, 0x96, 0xb2, 0x8c, 0x05, 0xbd, 0x53, 0x87, 0x7b, 0x22, 0x29, 0xb9,
|
||||
0x93, 0x99, 0xd4, 0xdc, 0xf4, 0x75, 0x71, 0x73, 0x30, 0x7f, 0xfe, 0x89, 0xf6, 0xfc, 0x67, 0xa7,
|
||||
0xe4, 0xbc, 0x0b, 0x66, 0x51, 0xb4, 0xcc, 0xeb, 0x33, 0x58, 0xf2, 0x31, 0x23, 0x07, 0x63, 0xbc,
|
||||
0x16, 0x45, 0x54, 0x40, 0x99, 0xcf, 0x75, 0x2a, 0xec, 0xbc, 0x0f, 0xcb, 0xd3, 0xea, 0xec, 0x82,
|
||||
0x8a, 0x6e, 0x71, 0x1f, 0x16, 0xdb, 0x09, 0xc7, 0x34, 0x09, 0x0e, 0x44, 0x9c, 0xdc, 0x69, 0x19,
|
||||
0xd4, 0x53, 0x13, 0x7d, 0x72, 0xb2, 0xa2, 0xb6, 0x37, 0x7c, 0x35, 0x8e, 0xd0, 0x03, 0xd0, 0x83,
|
||||
0x90, 0xc7, 0x24, 0xc9, 0x6e, 0x6f, 0xa5, 0xa8, 0x9a, 0xdb, 0x9c, 0x50, 0xbc, 0x26, 0x31, 0x3f,
|
||||
0xc3, 0x9d, 0x1f, 0xcb, 0x50, 0x3d, 0xb7, 0x8f, 0x3e, 0x3d, 0x0d, 0x24, 0x4c, 0xe6, 0xbd, 0x3b,
|
||||
0x57, 0x04, 0x7a, 0x18, 0x27, 0x51, 0x1e, 0x0c, 0xb9, 0xd9, 0x8d, 0xaa, 0xb2, 0xd8, 0x46, 0x91,
|
||||
0x54, 0xf4, 0xc9, 0x66, 0x29, 0xbd, 0x4d, 0xf4, 0x00, 0x2a, 0x0c, 0xd3, 0x71, 0x1c, 0x62, 0xd9,
|
||||
0x28, 0x55, 0xef, 0x9d, 0x42, 0xb7, 0x14, 0xd9, 0x2c, 0xf9, 0x39, 0x2d, 0x8c, 0x78, 0xc0, 0xfa,
|
||||
0x59, 0x23, 0x15, 0x1a, 0xed, 0x04, 0xac, 0x2f, 0x8c, 0x04, 0x27, 0x8c, 0x12, 0xcc, 0x0f, 0x09,
|
||||
0xed, 0x1b, 0x37, 0x2e, 0x37, 0xea, 0xa4, 0x88, 0x30, 0xca, 0x68, 0x21, 0x0c, 0x0f, 0x46, 0x8c,
|
||||
0x63, 0x6a, 0xe8, 0x97, 0x0b, 0xd7, 0x53, 0x44, 0x08, 0x33, 0x1a, 0x7d, 0x04, 0x3a, 0xc3, 0x21,
|
||||
0xc5, 0xdc, 0xa8, 0x48, 0x9d, 0x59, 0x9c, 0x99, 0x20, 0x36, 0x45, 0x7b, 0xcb, 0x55, 0xf3, 0x0d,
|
||||
0xd0, 0x79, 0x40, 0x7b, 0x98, 0xdf, 0xff, 0x4f, 0x81, 0xda, 0x54, 0x99, 0xd1, 0x5d, 0xa8, 0xec,
|
||||
0x76, 0x1e, 0x76, 0xb6, 0xbe, 0xec, 0x2c, 0x94, 0x4c, 0xf3, 0xe8, 0x85, 0xbd, 0x3c, 0x45, 0xec,
|
||||
0x26, 0xfd, 0x84, 0x1c, 0x26, 0xc8, 0x83, 0xc5, 0xed, 0x9d, 0x2d, 0xbf, 0xb5, 0xb7, 0xb6, 0xbe,
|
||||
0xd3, 0xde, 0xea, 0xec, 0xad, 0xfb, 0xad, 0xb5, 0x9d, 0xd6, 0x82, 0x62, 0xde, 0x3a, 0x7a, 0x61,
|
||||
0x2f, 0x4d, 0x89, 0xd6, 0x29, 0x0e, 0x38, 0xbe, 0xa0, 0xd9, 0x7d, 0xb2, 0x21, 0x34, 0x6a, 0xa1,
|
||||
0x66, 0x77, 0x18, 0x15, 0x69, 0xfc, 0xd6, 0xe3, 0xad, 0x2f, 0x5a, 0x0b, 0xe5, 0x42, 0x8d, 0x2f,
|
||||
0xa7, 0x81, 0xf9, 0xf6, 0xf7, 0xbf, 0x5a, 0xa5, 0xdf, 0x7f, 0xb3, 0xa6, 0xb3, 0xf3, 0x7e, 0x50,
|
||||
0x41, 0x13, 0xaf, 0x3a, 0x3a, 0x52, 0x00, 0x5d, 0xec, 0x42, 0xb4, 0x5a, 0x54, 0xc1, 0x4b, 0x7b,
|
||||
0xdf, 0x74, 0xaf, 0x8b, 0x67, 0xcd, 0xbd, 0xf4, 0xc7, 0xcb, 0x7f, 0x7f, 0x51, 0x6b, 0x30, 0x27,
|
||||
0xf9, 0xd5, 0x41, 0x90, 0x04, 0x3d, 0x4c, 0xd1, 0x77, 0x30, 0xff, 0x7a, 0xd7, 0xa2, 0x7b, 0x85,
|
||||
0x83, 0xaa, 0x68, 0x2e, 0x98, 0xf7, 0xaf, 0x83, 0xce, 0xf4, 0xf7, 0xfe, 0x52, 0x60, 0xfe, 0x6c,
|
||||
0x0a, 0xb2, 0x67, 0xf1, 0x10, 0x7d, 0x05, 0x9a, 0x98, 0xef, 0xa8, 0xb0, 0xc7, 0xcf, 0x7d, 0x1d,
|
||||
0x4c, 0xfb, 0x72, 0x60, 0x76, 0xd2, 0x21, 0xdc, 0x90, 0x53, 0x16, 0x15, 0x46, 0x38, 0x3f, 0xc4,
|
||||
0xcd, 0xdb, 0x33, 0x88, 0x99, 0x26, 0x4d, 0xe3, 0xf8, 0x95, 0x55, 0xfa, 0xfb, 0x95, 0x55, 0x7a,
|
||||
0x3e, 0xb1, 0x94, 0xe3, 0x89, 0xa5, 0xfc, 0x39, 0xb1, 0x94, 0x7f, 0x26, 0x96, 0xf2, 0xb4, 0xfc,
|
||||
0x54, 0xeb, 0xea, 0xf2, 0x03, 0xfd, 0xe1, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x5a, 0xbd,
|
||||
0x08, 0x38, 0x08, 0x00, 0x00,
|
||||
// 933 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x96, 0x41, 0x73, 0xdb, 0x44,
|
||||
0x14, 0xc7, 0x25, 0x5b, 0xb5, 0x9b, 0xe7, 0x26, 0xce, 0x6c, 0x48, 0x50, 0x45, 0x51, 0x5c, 0x95,
|
||||
0x99, 0xba, 0x1d, 0x22, 0x0f, 0x86, 0x19, 0x98, 0x42, 0x0f, 0x71, 0xe2, 0x19, 0x9b, 0xb6, 0x4e,
|
||||
0x47, 0x49, 0xa0, 0xb7, 0x20, 0x4b, 0x5b, 0x57, 0xd8, 0xd6, 0x9a, 0xdd, 0xb5, 0x03, 0x17, 0xa6,
|
||||
0x47, 0x26, 0x27, 0x66, 0x98, 0x01, 0x2e, 0x3d, 0xc1, 0xb9, 0x1f, 0x80, 0x4f, 0x90, 0xe1, 0xc4,
|
||||
0x0d, 0x4e, 0x19, 0xea, 0x0f, 0x00, 0x5f, 0x81, 0xd9, 0x95, 0x64, 0x07, 0x47, 0x76, 0x73, 0xb1,
|
||||
0xd7, 0xbb, 0xbf, 0xff, 0xfb, 0xef, 0xee, 0xd3, 0x7b, 0x32, 0x00, 0x75, 0x9f, 0x72, 0x7b, 0x40,
|
||||
0x09, 0x27, 0x08, 0xf9, 0xc4, 0xeb, 0x62, 0x6a, 0xb3, 0x63, 0x97, 0xf6, 0xbb, 0x01, 0xb7, 0x47,
|
||||
0xef, 0x19, 0xcb, 0xa4, 0xfd, 0x25, 0xf6, 0x38, 0x8b, 0x10, 0xa3, 0xc0, 0xbf, 0x19, 0xe0, 0xe4,
|
||||
0xc7, 0x56, 0x27, 0xe0, 0xcf, 0x86, 0x6d, 0xdb, 0x23, 0xfd, 0x8a, 0x47, 0x28, 0x26, 0xac, 0x82,
|
||||
0xb9, 0xe7, 0x57, 0x44, 0x48, 0xf9, 0x31, 0x68, 0x57, 0xa6, 0xe1, 0x8d, 0x37, 0x3a, 0xa4, 0x43,
|
||||
0xe4, 0xb0, 0x22, 0x46, 0xf1, 0xec, 0xda, 0xa0, 0x37, 0xec, 0x04, 0x61, 0x25, 0xfa, 0x8a, 0x26,
|
||||
0xad, 0x97, 0x2a, 0x80, 0xe3, 0x3e, 0xe5, 0x8f, 0x70, 0xbf, 0x8d, 0x29, 0xba, 0x05, 0x79, 0x11,
|
||||
0xe7, 0x28, 0xf0, 0x75, 0xb5, 0xa4, 0x96, 0xb5, 0x1a, 0x8c, 0xcf, 0x36, 0x73, 0x02, 0x68, 0xee,
|
||||
0x3a, 0x39, 0xb1, 0xd4, 0xf4, 0x05, 0x14, 0x12, 0x1f, 0x0b, 0x28, 0x53, 0x52, 0xcb, 0x4b, 0x11,
|
||||
0xd4, 0x22, 0x3e, 0x16, 0x90, 0x58, 0x6a, 0xfa, 0x08, 0x81, 0xe6, 0xfa, 0x3e, 0xd5, 0xb3, 0x82,
|
||||
0x70, 0xe4, 0x18, 0xd5, 0x20, 0xc7, 0xb8, 0xcb, 0x87, 0x4c, 0xd7, 0x4a, 0x6a, 0xb9, 0x50, 0x7d,
|
||||
0xc7, 0xbe, 0x78, 0x0f, 0xf6, 0x74, 0x37, 0xfb, 0x92, 0xad, 0x69, 0xa7, 0x67, 0x9b, 0x8a, 0x13,
|
||||
0x2b, 0xad, 0x9b, 0x50, 0xf8, 0x94, 0x04, 0xa1, 0x83, 0xbf, 0x1a, 0x62, 0xc6, 0x27, 0x36, 0xea,
|
||||
0xd4, 0xc6, 0xfa, 0x51, 0x85, 0x6b, 0x11, 0xc3, 0x06, 0x24, 0x64, 0xf8, 0x72, 0xa7, 0xfa, 0x08,
|
||||
0xf2, 0x7d, 0x69, 0xcb, 0xf4, 0x4c, 0x29, 0x5b, 0x2e, 0x54, 0xcd, 0xc5, 0xbb, 0x73, 0x12, 0x1c,
|
||||
0xdd, 0x86, 0x22, 0xc5, 0x7d, 0x32, 0xc2, 0xfe, 0x51, 0x12, 0x21, 0x5b, 0xca, 0x96, 0x35, 0x67,
|
||||
0x25, 0x9e, 0x8e, 0x04, 0xcc, 0xaa, 0xc1, 0xb5, 0x87, 0xd8, 0x1d, 0xe1, 0x64, 0xf3, 0x55, 0xd0,
|
||||
0xc4, 0x6d, 0xc9, 0x4d, 0xbd, 0xde, 0x4f, 0xb2, 0x56, 0x11, 0x96, 0xe3, 0x18, 0xd1, 0xe1, 0xac,
|
||||
0x87, 0x70, 0xfd, 0x31, 0x25, 0x1e, 0x66, 0x2c, 0x62, 0x19, 0x73, 0x3b, 0x13, 0x87, 0x3b, 0xe2,
|
||||
0x50, 0x72, 0x26, 0x36, 0x29, 0xda, 0xd1, 0xe3, 0x62, 0x27, 0x60, 0xb2, 0x7e, 0x4f, 0x7b, 0xfe,
|
||||
0x93, 0xa5, 0x58, 0x37, 0xc0, 0x48, 0x8b, 0x16, 0x7b, 0x7d, 0x02, 0xeb, 0x0e, 0x66, 0xa4, 0x37,
|
||||
0xc2, 0xdb, 0xbe, 0x4f, 0x05, 0x14, 0xfb, 0x5c, 0xe6, 0x86, 0xad, 0x77, 0x61, 0x63, 0x56, 0x1d,
|
||||
0x27, 0x28, 0x2d, 0x8b, 0x3d, 0x58, 0x6b, 0x86, 0x1c, 0xd3, 0xd0, 0xed, 0x89, 0x38, 0x89, 0xd3,
|
||||
0x06, 0x64, 0x26, 0x26, 0xb9, 0xf1, 0xd9, 0x66, 0xa6, 0xb9, 0xeb, 0x64, 0x02, 0x1f, 0xdd, 0x87,
|
||||
0x9c, 0xeb, 0xf1, 0x80, 0x84, 0x71, 0xf6, 0x36, 0xd3, 0x6e, 0x73, 0x9f, 0x13, 0x8a, 0xb7, 0x25,
|
||||
0x96, 0x3c, 0x56, 0x91, 0xc8, 0xfa, 0x5e, 0x83, 0xc2, 0xb9, 0x55, 0xf4, 0xf1, 0x24, 0x9c, 0xb0,
|
||||
0x5a, 0xa9, 0xde, 0x7a, 0x4d, 0xb8, 0x07, 0x41, 0xe8, 0x27, 0xc1, 0x90, 0x1d, 0xe7, 0x35, 0x23,
|
||||
0xaf, 0x5c, 0x4f, 0x93, 0x8a, 0x6a, 0x69, 0x28, 0x51, 0x4e, 0xd1, 0x87, 0x90, 0x67, 0x98, 0x8e,
|
||||
0x02, 0x0f, 0xcb, 0x72, 0x29, 0x54, 0xdf, 0x4a, 0x75, 0x8b, 0x90, 0x86, 0xe2, 0x24, 0xb4, 0x30,
|
||||
0xe2, 0x2e, 0xeb, 0xc6, 0xe5, 0x94, 0x6a, 0x74, 0xe0, 0xb2, 0xae, 0x30, 0x12, 0x9c, 0x30, 0x0a,
|
||||
0x31, 0x3f, 0x26, 0xb4, 0xab, 0x5f, 0x99, 0x6f, 0xd4, 0x8a, 0x10, 0x61, 0x14, 0xd3, 0x42, 0xe8,
|
||||
0xf5, 0x86, 0x8c, 0x63, 0xaa, 0xe7, 0xe6, 0x0b, 0x77, 0x22, 0x44, 0x08, 0x63, 0x1a, 0x7d, 0x00,
|
||||
0x39, 0x86, 0x3d, 0x8a, 0xb9, 0x9e, 0x97, 0x3a, 0x23, 0xfd, 0x64, 0x82, 0x68, 0x88, 0x22, 0x97,
|
||||
0x23, 0x74, 0x0f, 0xae, 0x52, 0xcc, 0xc8, 0x90, 0x7a, 0x58, 0xbf, 0x2a, 0x75, 0x37, 0x52, 0x8b,
|
||||
0x23, 0x66, 0x1a, 0x8a, 0x33, 0xe1, 0xd1, 0x7d, 0x58, 0xc2, 0x5f, 0x73, 0x1c, 0x32, 0x91, 0xbc,
|
||||
0x25, 0x29, 0x7e, 0x3b, 0x4d, 0x5c, 0x4f, 0xa0, 0x86, 0xe2, 0x4c, 0x15, 0xb5, 0xab, 0x90, 0xe3,
|
||||
0x2e, 0xed, 0x60, 0x7e, 0xf7, 0x5f, 0x15, 0x8a, 0x33, 0x19, 0x46, 0xb7, 0x21, 0x7f, 0xd8, 0x7a,
|
||||
0xd0, 0xda, 0xfb, 0xbc, 0xb5, 0xaa, 0x18, 0xc6, 0xc9, 0x8b, 0xd2, 0xc6, 0x0c, 0x71, 0x18, 0x76,
|
||||
0x43, 0x72, 0x1c, 0xa2, 0x2a, 0xac, 0xed, 0x1f, 0xec, 0x39, 0xf5, 0xa3, 0xed, 0x9d, 0x83, 0xe6,
|
||||
0x5e, 0xeb, 0x68, 0xc7, 0xa9, 0x6f, 0x1f, 0xd4, 0x57, 0x55, 0xe3, 0xfa, 0xc9, 0x8b, 0xd2, 0xfa,
|
||||
0x8c, 0x68, 0x87, 0x62, 0x97, 0xe3, 0x0b, 0x9a, 0xc3, 0xc7, 0xbb, 0x42, 0x93, 0x49, 0xd5, 0x1c,
|
||||
0x0e, 0xfc, 0x34, 0x8d, 0x53, 0x7f, 0xb4, 0xf7, 0x59, 0x7d, 0x35, 0x9b, 0xaa, 0x71, 0x64, 0x3b,
|
||||
0x32, 0xde, 0xfc, 0xee, 0x17, 0x53, 0xf9, 0xed, 0x57, 0x73, 0xf6, 0x74, 0xd5, 0x1f, 0x32, 0xa0,
|
||||
0x89, 0x5a, 0x43, 0x27, 0x2a, 0xa0, 0x8b, 0x6d, 0x00, 0x6d, 0xa5, 0xdd, 0xe3, 0xdc, 0xe6, 0x63,
|
||||
0xd8, 0x97, 0xc5, 0xe3, 0xee, 0xb2, 0xfe, 0xfb, 0xcb, 0x7f, 0x7e, 0xce, 0x14, 0x61, 0x59, 0xf2,
|
||||
0x5b, 0x7d, 0x37, 0x74, 0x3b, 0x98, 0xa2, 0x6f, 0x61, 0xe5, 0xff, 0x6d, 0x03, 0xdd, 0x99, 0xf7,
|
||||
0x30, 0x5c, 0x68, 0x4c, 0xc6, 0xdd, 0xcb, 0xa0, 0x0b, 0xfd, 0xab, 0x7f, 0xaa, 0xb0, 0x32, 0x6d,
|
||||
0xc3, 0xec, 0x59, 0x30, 0x40, 0x5f, 0x80, 0x26, 0x5e, 0x30, 0x28, 0xb5, 0xc9, 0x9c, 0x7b, 0x3d,
|
||||
0x19, 0xa5, 0xf9, 0xc0, 0xe2, 0x43, 0x7b, 0x70, 0x45, 0xb6, 0x79, 0x94, 0x1a, 0xe1, 0xfc, 0x5b,
|
||||
0xc4, 0xb8, 0xb9, 0x80, 0x58, 0x68, 0x52, 0xd3, 0x4f, 0x5f, 0x99, 0xca, 0x5f, 0xaf, 0x4c, 0xe5,
|
||||
0xf9, 0xd8, 0x54, 0x4f, 0xc7, 0xa6, 0xfa, 0xc7, 0xd8, 0x54, 0xff, 0x1e, 0x9b, 0xea, 0x93, 0xec,
|
||||
0x13, 0xad, 0x9d, 0x93, 0xff, 0x10, 0xde, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x98, 0x55, 0x8d,
|
||||
0x81, 0xb9, 0x08, 0x00, 0x00,
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/swarmkit/api/raft.proto
generated
vendored
4
vendor/github.com/docker/swarmkit/api/raft.proto
generated
vendored
|
@ -98,7 +98,7 @@ message ResolveAddressResponse {
|
|||
message InternalRaftRequest {
|
||||
uint64 id = 1;
|
||||
|
||||
repeated StoreAction action = 2;
|
||||
repeated StoreAction action = 2 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Storage actions may belong in another protobuf file. They
|
||||
|
@ -125,5 +125,7 @@ message StoreAction {
|
|||
Network network = 5;
|
||||
Cluster cluster = 6;
|
||||
Secret secret = 7;
|
||||
Resource resource = 8;
|
||||
Extension extension = 9;
|
||||
}
|
||||
}
|
||||
|
|
187
vendor/github.com/docker/swarmkit/api/snapshot.pb.go
generated
vendored
187
vendor/github.com/docker/swarmkit/api/snapshot.pb.go
generated
vendored
|
@ -43,12 +43,14 @@ func (Snapshot_Version) EnumDescriptor() ([]byte, []int) { return fileDescriptor
|
|||
|
||||
// StoreSnapshot is used to store snapshots of the store.
|
||||
type StoreSnapshot struct {
|
||||
Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"`
|
||||
Services []*Service `protobuf:"bytes,2,rep,name=services" json:"services,omitempty"`
|
||||
Networks []*Network `protobuf:"bytes,3,rep,name=networks" json:"networks,omitempty"`
|
||||
Tasks []*Task `protobuf:"bytes,4,rep,name=tasks" json:"tasks,omitempty"`
|
||||
Clusters []*Cluster `protobuf:"bytes,5,rep,name=clusters" json:"clusters,omitempty"`
|
||||
Secrets []*Secret `protobuf:"bytes,6,rep,name=secrets" json:"secrets,omitempty"`
|
||||
Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"`
|
||||
Services []*Service `protobuf:"bytes,2,rep,name=services" json:"services,omitempty"`
|
||||
Networks []*Network `protobuf:"bytes,3,rep,name=networks" json:"networks,omitempty"`
|
||||
Tasks []*Task `protobuf:"bytes,4,rep,name=tasks" json:"tasks,omitempty"`
|
||||
Clusters []*Cluster `protobuf:"bytes,5,rep,name=clusters" json:"clusters,omitempty"`
|
||||
Secrets []*Secret `protobuf:"bytes,6,rep,name=secrets" json:"secrets,omitempty"`
|
||||
Resources []*Resource `protobuf:"bytes,7,rep,name=resources" json:"resources,omitempty"`
|
||||
Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions" json:"extensions,omitempty"`
|
||||
}
|
||||
|
||||
func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} }
|
||||
|
@ -143,6 +145,22 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
if o.Resources != nil {
|
||||
m.Resources = make([]*Resource, len(o.Resources))
|
||||
for i := range m.Resources {
|
||||
m.Resources[i] = &Resource{}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources[i], o.Resources[i])
|
||||
}
|
||||
}
|
||||
|
||||
if o.Extensions != nil {
|
||||
m.Extensions = make([]*Extension, len(o.Extensions))
|
||||
for i := range m.Extensions {
|
||||
m.Extensions[i] = &Extension{}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(m.Extensions[i], o.Extensions[i])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *ClusterSnapshot) Copy() *ClusterSnapshot {
|
||||
|
@ -277,6 +295,30 @@ func (m *StoreSnapshot) MarshalTo(dAtA []byte) (int, error) {
|
|||
i += n
|
||||
}
|
||||
}
|
||||
if len(m.Resources) > 0 {
|
||||
for _, msg := range m.Resources {
|
||||
dAtA[i] = 0x3a
|
||||
i++
|
||||
i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if len(m.Extensions) > 0 {
|
||||
for _, msg := range m.Extensions {
|
||||
dAtA[i] = 0x42
|
||||
i++
|
||||
i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -433,6 +475,18 @@ func (m *StoreSnapshot) Size() (n int) {
|
|||
n += 1 + l + sovSnapshot(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Resources) > 0 {
|
||||
for _, e := range m.Resources {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovSnapshot(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Extensions) > 0 {
|
||||
for _, e := range m.Extensions {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovSnapshot(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -492,6 +546,8 @@ func (this *StoreSnapshot) String() string {
|
|||
`Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`,
|
||||
`Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`,
|
||||
`Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`,
|
||||
`Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1) + `,`,
|
||||
`Extensions:` + strings.Replace(fmt.Sprintf("%v", this.Extensions), "Extension", "Extension", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -742,6 +798,68 @@ func (m *StoreSnapshot) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 7:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSnapshot
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthSnapshot
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Resources = append(m.Resources, &Resource{})
|
||||
if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 8:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSnapshot
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthSnapshot
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Extensions = append(m.Extensions, &Extension{})
|
||||
if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSnapshot(dAtA[iNdEx:])
|
||||
|
@ -1143,31 +1261,34 @@ var (
|
|||
func init() { proto.RegisterFile("snapshot.proto", fileDescriptorSnapshot) }
|
||||
|
||||
var fileDescriptorSnapshot = []byte{
|
||||
// 404 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0xbd, 0x6e, 0xd4, 0x40,
|
||||
0x10, 0xc7, 0xbd, 0xbe, 0x0f, 0x47, 0x13, 0x25, 0xc0, 0x8a, 0x62, 0x65, 0x24, 0x73, 0x18, 0x8a,
|
||||
0xab, 0x0c, 0x1c, 0x48, 0xd0, 0x40, 0x11, 0x2a, 0x0a, 0x52, 0xec, 0xa1, 0x88, 0xd6, 0x67, 0x4f,
|
||||
0x12, 0x63, 0xec, 0x3d, 0xed, 0x2c, 0x4e, 0xcb, 0xe3, 0x5d, 0x99, 0x92, 0x0a, 0x11, 0x37, 0xbc,
|
||||
0x06, 0xb2, 0xd7, 0xb6, 0x4e, 0xc2, 0x47, 0x37, 0xb6, 0x7e, 0xff, 0x8f, 0x1d, 0x0d, 0x9c, 0x52,
|
||||
0x19, 0x6f, 0xe9, 0x5a, 0x99, 0x68, 0xab, 0x95, 0x51, 0x9c, 0xa7, 0x2a, 0xc9, 0x51, 0x47, 0x74,
|
||||
0x13, 0xeb, 0x22, 0xcf, 0x4c, 0x54, 0xbd, 0xf4, 0x4f, 0xd4, 0xe6, 0x2b, 0x26, 0x86, 0x2c, 0xe2,
|
||||
0x83, 0x8e, 0x2f, 0x3b, 0xdc, 0x7f, 0x78, 0xa5, 0xae, 0x54, 0x3b, 0x3e, 0x6f, 0x26, 0xfb, 0x37,
|
||||
0xbc, 0x75, 0xe1, 0x64, 0x6d, 0x94, 0xc6, 0x75, 0x67, 0xce, 0x23, 0x98, 0x95, 0x2a, 0x45, 0x12,
|
||||
0x6c, 0x31, 0x59, 0x1e, 0xaf, 0x44, 0xf4, 0x6f, 0x4c, 0x74, 0xae, 0x52, 0x94, 0x16, 0xe3, 0x6f,
|
||||
0xe0, 0x88, 0x50, 0x57, 0x59, 0x82, 0x24, 0xdc, 0x56, 0xf2, 0x68, 0x4c, 0xb2, 0xb6, 0x8c, 0x1c,
|
||||
0xe0, 0x46, 0x58, 0xa2, 0xb9, 0x51, 0x3a, 0x27, 0x31, 0x39, 0x2c, 0x3c, 0xb7, 0x8c, 0x1c, 0xe0,
|
||||
0xa6, 0xa1, 0x89, 0x29, 0x27, 0x31, 0x3d, 0xdc, 0xf0, 0x73, 0x4c, 0xb9, 0xb4, 0x58, 0x13, 0x94,
|
||||
0x7c, 0xfb, 0x4e, 0x06, 0x35, 0x89, 0xd9, 0xe1, 0xa0, 0x0f, 0x96, 0x91, 0x03, 0xcc, 0x5f, 0x83,
|
||||
0x47, 0x98, 0x68, 0x34, 0x24, 0xe6, 0xad, 0xce, 0x1f, 0x7f, 0x59, 0x83, 0xc8, 0x1e, 0x0d, 0x11,
|
||||
0xee, 0x75, 0x56, 0xc3, 0x4e, 0xdf, 0x82, 0x57, 0x60, 0xb1, 0x69, 0x0a, 0xd8, 0xad, 0x06, 0x63,
|
||||
0x46, 0x32, 0xbe, 0x34, 0x9f, 0x5a, 0x4c, 0xf6, 0x38, 0x17, 0xe0, 0x69, 0x2c, 0x54, 0x85, 0x69,
|
||||
0xbb, 0xdc, 0xa9, 0xec, 0x3f, 0xc3, 0x3f, 0x0c, 0x8e, 0x86, 0x80, 0xf7, 0xe0, 0x55, 0xa8, 0x29,
|
||||
0x53, 0xa5, 0x60, 0x0b, 0xb6, 0x3c, 0x5d, 0x3d, 0x1b, 0x6d, 0xda, 0x1f, 0xd0, 0x85, 0x65, 0x65,
|
||||
0x2f, 0xe2, 0x1f, 0x01, 0xba, 0xc4, 0xeb, 0x6c, 0x2b, 0xdc, 0x05, 0x5b, 0x1e, 0xaf, 0x9e, 0xfe,
|
||||
0x67, 0x49, 0xbd, 0xd3, 0xd9, 0x74, 0xf7, 0xeb, 0xb1, 0x23, 0xf7, 0xc4, 0xfc, 0x1d, 0xcc, 0xa8,
|
||||
0x39, 0x28, 0x31, 0x69, 0x5d, 0x9e, 0x8c, 0x16, 0xd9, 0xbf, 0xb8, 0xce, 0xc3, 0xaa, 0xc2, 0x07,
|
||||
0xe0, 0x75, 0xed, 0xf8, 0x1c, 0xdc, 0x8b, 0x17, 0xf7, 0x9d, 0x33, 0xb1, 0xbb, 0x0b, 0x9c, 0x9f,
|
||||
0x77, 0x81, 0xf3, 0xa3, 0x0e, 0xd8, 0xae, 0x0e, 0xd8, 0x6d, 0x1d, 0xb0, 0xdf, 0x75, 0xc0, 0xbe,
|
||||
0xb8, 0x9b, 0x79, 0x7b, 0xc6, 0xaf, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x5b, 0x6d, 0xab,
|
||||
0x1d, 0x03, 0x00, 0x00,
|
||||
// 449 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x93, 0xbf, 0x6f, 0xd3, 0x40,
|
||||
0x14, 0xc7, 0x73, 0xf9, 0xe5, 0xf0, 0xaa, 0x16, 0x38, 0x31, 0x9c, 0x02, 0x98, 0x10, 0x18, 0x32,
|
||||
0x19, 0x08, 0x48, 0x20, 0xa4, 0x32, 0x14, 0x31, 0x30, 0xd0, 0xe1, 0x82, 0x2a, 0x56, 0xc7, 0x79,
|
||||
0x6d, 0x83, 0x89, 0x2f, 0xba, 0x77, 0x75, 0x19, 0xf9, 0xf3, 0x22, 0xb1, 0x30, 0x32, 0x21, 0xea,
|
||||
0x85, 0x7f, 0x03, 0xdd, 0x9d, 0x6d, 0x22, 0xe1, 0x74, 0xbb, 0x44, 0x9f, 0xcf, 0xfb, 0xbe, 0xbb,
|
||||
0xf7, 0x0c, 0x07, 0x94, 0xc5, 0x6b, 0x3a, 0x57, 0x26, 0x5a, 0x6b, 0x65, 0x14, 0xe7, 0x0b, 0x95,
|
||||
0xa4, 0xa8, 0x23, 0xba, 0x8c, 0xf5, 0x2a, 0x5d, 0x9a, 0x28, 0x7f, 0x36, 0xdc, 0x57, 0xf3, 0xcf,
|
||||
0x98, 0x18, 0xf2, 0xc8, 0x10, 0x74, 0x7c, 0x5a, 0xe2, 0xc3, 0x3b, 0x67, 0xea, 0x4c, 0xb9, 0xe3,
|
||||
0x13, 0x7b, 0xf2, 0xff, 0x8e, 0xbf, 0x77, 0x60, 0x7f, 0x66, 0x94, 0xc6, 0x59, 0x59, 0x9c, 0x47,
|
||||
0xd0, 0xcb, 0xd4, 0x02, 0x49, 0xb0, 0x51, 0x67, 0xb2, 0x37, 0x15, 0xd1, 0xff, 0x31, 0xd1, 0xb1,
|
||||
0x5a, 0xa0, 0xf4, 0x18, 0x7f, 0x09, 0x03, 0x42, 0x9d, 0x2f, 0x13, 0x24, 0xd1, 0x76, 0xca, 0xdd,
|
||||
0x26, 0x65, 0xe6, 0x19, 0x59, 0xc3, 0x56, 0xcc, 0xd0, 0x5c, 0x2a, 0x9d, 0x92, 0xe8, 0xec, 0x16,
|
||||
0x8f, 0x3d, 0x23, 0x6b, 0xd8, 0x76, 0x68, 0x62, 0x4a, 0x49, 0x74, 0x77, 0x77, 0xf8, 0x31, 0xa6,
|
||||
0x54, 0x7a, 0xcc, 0x06, 0x25, 0x5f, 0x2e, 0xc8, 0xa0, 0x26, 0xd1, 0xdb, 0x1d, 0xf4, 0xd6, 0x33,
|
||||
0xb2, 0x86, 0xf9, 0x0b, 0x08, 0x08, 0x13, 0x8d, 0x86, 0x44, 0xdf, 0x79, 0xc3, 0xe6, 0x9b, 0x59,
|
||||
0x44, 0x56, 0x28, 0x7f, 0x0d, 0x37, 0x34, 0x92, 0xba, 0xd0, 0xf6, 0x45, 0x02, 0xe7, 0xdd, 0x6b,
|
||||
0xf2, 0x64, 0x09, 0xc9, 0x7f, 0x38, 0x3f, 0x04, 0xc0, 0xaf, 0x06, 0x33, 0x5a, 0xaa, 0x8c, 0xc4,
|
||||
0xc0, 0xc9, 0xf7, 0x9b, 0xe4, 0x77, 0x15, 0x25, 0xb7, 0x84, 0x31, 0xc2, 0xcd, 0xf2, 0x16, 0xf5,
|
||||
0x38, 0x5f, 0x41, 0xb0, 0xc2, 0xd5, 0xdc, 0xde, 0xdd, 0x0f, 0x34, 0x6c, 0xec, 0x25, 0x3e, 0x35,
|
||||
0x1f, 0x1c, 0x26, 0x2b, 0x9c, 0x0b, 0x08, 0x34, 0xae, 0x54, 0x8e, 0x0b, 0x37, 0xd7, 0xae, 0xac,
|
||||
0x7e, 0x8e, 0xff, 0x30, 0x18, 0xd4, 0x01, 0x6f, 0x20, 0xc8, 0x51, 0xdb, 0x7c, 0xc1, 0x46, 0x6c,
|
||||
0x72, 0x30, 0x7d, 0xdc, 0xf8, 0x48, 0xd5, 0xee, 0x9e, 0x78, 0x56, 0x56, 0x12, 0x7f, 0x0f, 0x50,
|
||||
0x26, 0x9e, 0x2f, 0xd7, 0xa2, 0x3d, 0x62, 0x93, 0xbd, 0xe9, 0xa3, 0x6b, 0xe6, 0x53, 0x55, 0x3a,
|
||||
0xea, 0x6e, 0x7e, 0x3d, 0x68, 0xc9, 0x2d, 0x99, 0x1f, 0x42, 0x8f, 0xec, 0x2e, 0x8b, 0x8e, 0xab,
|
||||
0xf2, 0xb0, 0xb1, 0x91, 0xed, 0x65, 0x2f, 0x6b, 0x78, 0x6b, 0x7c, 0x1b, 0x82, 0xb2, 0x3b, 0xde,
|
||||
0x87, 0xf6, 0xc9, 0xd3, 0x5b, 0xad, 0x23, 0xb1, 0xb9, 0x0a, 0x5b, 0x3f, 0xaf, 0xc2, 0xd6, 0xb7,
|
||||
0x22, 0x64, 0x9b, 0x22, 0x64, 0x3f, 0x8a, 0x90, 0xfd, 0x2e, 0x42, 0xf6, 0xa9, 0x3d, 0xef, 0xbb,
|
||||
0x2f, 0xe8, 0xf9, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x91, 0xfe, 0xed, 0x18, 0x98, 0x03, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/swarmkit/api/snapshot.proto
generated
vendored
2
vendor/github.com/docker/swarmkit/api/snapshot.proto
generated
vendored
|
@ -20,6 +20,8 @@ message StoreSnapshot {
|
|||
repeated Task tasks = 4;
|
||||
repeated Cluster clusters = 5;
|
||||
repeated Secret secrets = 6;
|
||||
repeated Resource resources = 7;
|
||||
repeated Extension extensions = 8;
|
||||
}
|
||||
|
||||
// ClusterSnapshot stores cluster membership information in snapshots.
|
||||
|
|
639
vendor/github.com/docker/swarmkit/api/specs.pb.go
generated
vendored
639
vendor/github.com/docker/swarmkit/api/specs.pb.go
generated
vendored
|
@ -9,6 +9,7 @@ import fmt "fmt"
|
|||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import google_protobuf1 "github.com/gogo/protobuf/types"
|
||||
import google_protobuf3 "github.com/gogo/protobuf/types"
|
||||
|
||||
import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
|
||||
|
||||
|
@ -106,7 +107,7 @@ func (x EndpointSpec_ResolutionMode) String() string {
|
|||
return proto.EnumName(EndpointSpec_ResolutionMode_name, int32(x))
|
||||
}
|
||||
func (EndpointSpec_ResolutionMode) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptorSpecs, []int{7, 0}
|
||||
return fileDescriptorSpecs, []int{8, 0}
|
||||
}
|
||||
|
||||
type NodeSpec struct {
|
||||
|
@ -289,6 +290,7 @@ type TaskSpec struct {
|
|||
// Types that are valid to be assigned to Runtime:
|
||||
// *TaskSpec_Attachment
|
||||
// *TaskSpec_Container
|
||||
// *TaskSpec_Generic
|
||||
Runtime isTaskSpec_Runtime `protobuf_oneof:"runtime"`
|
||||
// Resource requirements for the container.
|
||||
Resources *ResourceRequirements `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"`
|
||||
|
@ -326,9 +328,13 @@ type TaskSpec_Attachment struct {
|
|||
type TaskSpec_Container struct {
|
||||
Container *ContainerSpec `protobuf:"bytes,1,opt,name=container,oneof"`
|
||||
}
|
||||
type TaskSpec_Generic struct {
|
||||
Generic *GenericRuntimeSpec `protobuf:"bytes,10,opt,name=generic,oneof"`
|
||||
}
|
||||
|
||||
func (*TaskSpec_Attachment) isTaskSpec_Runtime() {}
|
||||
func (*TaskSpec_Container) isTaskSpec_Runtime() {}
|
||||
func (*TaskSpec_Generic) isTaskSpec_Runtime() {}
|
||||
|
||||
func (m *TaskSpec) GetRuntime() isTaskSpec_Runtime {
|
||||
if m != nil {
|
||||
|
@ -351,11 +357,19 @@ func (m *TaskSpec) GetContainer() *ContainerSpec {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *TaskSpec) GetGeneric() *GenericRuntimeSpec {
|
||||
if x, ok := m.GetRuntime().(*TaskSpec_Generic); ok {
|
||||
return x.Generic
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||
func (*TaskSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||
return _TaskSpec_OneofMarshaler, _TaskSpec_OneofUnmarshaler, _TaskSpec_OneofSizer, []interface{}{
|
||||
(*TaskSpec_Attachment)(nil),
|
||||
(*TaskSpec_Container)(nil),
|
||||
(*TaskSpec_Generic)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -373,6 +387,11 @@ func _TaskSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
|||
if err := b.EncodeMessage(x.Container); err != nil {
|
||||
return err
|
||||
}
|
||||
case *TaskSpec_Generic:
|
||||
_ = b.EncodeVarint(10<<3 | proto.WireBytes)
|
||||
if err := b.EncodeMessage(x.Generic); err != nil {
|
||||
return err
|
||||
}
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("TaskSpec.Runtime has unexpected type %T", x)
|
||||
|
@ -399,6 +418,14 @@ func _TaskSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffe
|
|||
err := b.DecodeMessage(msg)
|
||||
m.Runtime = &TaskSpec_Container{msg}
|
||||
return true, err
|
||||
case 10: // runtime.generic
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(GenericRuntimeSpec)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Runtime = &TaskSpec_Generic{msg}
|
||||
return true, err
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
|
@ -418,6 +445,11 @@ func _TaskSpec_OneofSizer(msg proto.Message) (n int) {
|
|||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *TaskSpec_Generic:
|
||||
s := proto.Size(x.Generic)
|
||||
n += proto.SizeVarint(10<<3 | proto.WireBytes)
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
default:
|
||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||
|
@ -425,6 +457,15 @@ func _TaskSpec_OneofSizer(msg proto.Message) (n int) {
|
|||
return n
|
||||
}
|
||||
|
||||
type GenericRuntimeSpec struct {
|
||||
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
|
||||
Payload *google_protobuf3.Any `protobuf:"bytes,2,opt,name=payload" json:"payload,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GenericRuntimeSpec) Reset() { *m = GenericRuntimeSpec{} }
|
||||
func (*GenericRuntimeSpec) ProtoMessage() {}
|
||||
func (*GenericRuntimeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{5} }
|
||||
|
||||
// NetworkAttachmentSpec specifies runtime parameters required to attach
|
||||
// a container to a network.
|
||||
type NetworkAttachmentSpec struct {
|
||||
|
@ -435,7 +476,7 @@ type NetworkAttachmentSpec struct {
|
|||
|
||||
func (m *NetworkAttachmentSpec) Reset() { *m = NetworkAttachmentSpec{} }
|
||||
func (*NetworkAttachmentSpec) ProtoMessage() {}
|
||||
func (*NetworkAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{5} }
|
||||
func (*NetworkAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6} }
|
||||
|
||||
// Container specifies runtime parameters for a container.
|
||||
type ContainerSpec struct {
|
||||
|
@ -525,7 +566,7 @@ type ContainerSpec struct {
|
|||
|
||||
func (m *ContainerSpec) Reset() { *m = ContainerSpec{} }
|
||||
func (*ContainerSpec) ProtoMessage() {}
|
||||
func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6} }
|
||||
func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{7} }
|
||||
|
||||
// PullOptions allows one to parameterize an image pull.
|
||||
type ContainerSpec_PullOptions struct {
|
||||
|
@ -539,7 +580,7 @@ type ContainerSpec_PullOptions struct {
|
|||
func (m *ContainerSpec_PullOptions) Reset() { *m = ContainerSpec_PullOptions{} }
|
||||
func (*ContainerSpec_PullOptions) ProtoMessage() {}
|
||||
func (*ContainerSpec_PullOptions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptorSpecs, []int{6, 1}
|
||||
return fileDescriptorSpecs, []int{7, 1}
|
||||
}
|
||||
|
||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||
|
@ -557,7 +598,7 @@ type ContainerSpec_DNSConfig struct {
|
|||
|
||||
func (m *ContainerSpec_DNSConfig) Reset() { *m = ContainerSpec_DNSConfig{} }
|
||||
func (*ContainerSpec_DNSConfig) ProtoMessage() {}
|
||||
func (*ContainerSpec_DNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6, 2} }
|
||||
func (*ContainerSpec_DNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{7, 2} }
|
||||
|
||||
// EndpointSpec defines the properties that can be configured to
|
||||
// access and loadbalance the service.
|
||||
|
@ -570,7 +611,7 @@ type EndpointSpec struct {
|
|||
|
||||
func (m *EndpointSpec) Reset() { *m = EndpointSpec{} }
|
||||
func (*EndpointSpec) ProtoMessage() {}
|
||||
func (*EndpointSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{7} }
|
||||
func (*EndpointSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8} }
|
||||
|
||||
// NetworkSpec specifies user defined network parameters.
|
||||
type NetworkSpec struct {
|
||||
|
@ -600,7 +641,7 @@ type NetworkSpec struct {
|
|||
|
||||
func (m *NetworkSpec) Reset() { *m = NetworkSpec{} }
|
||||
func (*NetworkSpec) ProtoMessage() {}
|
||||
func (*NetworkSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8} }
|
||||
func (*NetworkSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{9} }
|
||||
|
||||
// ClusterSpec specifies global cluster settings.
|
||||
type ClusterSpec struct {
|
||||
|
@ -625,7 +666,7 @@ type ClusterSpec struct {
|
|||
|
||||
func (m *ClusterSpec) Reset() { *m = ClusterSpec{} }
|
||||
func (*ClusterSpec) ProtoMessage() {}
|
||||
func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{9} }
|
||||
func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{10} }
|
||||
|
||||
// SecretSpec specifies a user-provided secret.
|
||||
type SecretSpec struct {
|
||||
|
@ -636,7 +677,7 @@ type SecretSpec struct {
|
|||
|
||||
func (m *SecretSpec) Reset() { *m = SecretSpec{} }
|
||||
func (*SecretSpec) ProtoMessage() {}
|
||||
func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{10} }
|
||||
func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{11} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*NodeSpec)(nil), "docker.swarmkit.v1.NodeSpec")
|
||||
|
@ -644,6 +685,7 @@ func init() {
|
|||
proto.RegisterType((*ReplicatedService)(nil), "docker.swarmkit.v1.ReplicatedService")
|
||||
proto.RegisterType((*GlobalService)(nil), "docker.swarmkit.v1.GlobalService")
|
||||
proto.RegisterType((*TaskSpec)(nil), "docker.swarmkit.v1.TaskSpec")
|
||||
proto.RegisterType((*GenericRuntimeSpec)(nil), "docker.swarmkit.v1.GenericRuntimeSpec")
|
||||
proto.RegisterType((*NetworkAttachmentSpec)(nil), "docker.swarmkit.v1.NetworkAttachmentSpec")
|
||||
proto.RegisterType((*ContainerSpec)(nil), "docker.swarmkit.v1.ContainerSpec")
|
||||
proto.RegisterType((*ContainerSpec_PullOptions)(nil), "docker.swarmkit.v1.ContainerSpec.PullOptions")
|
||||
|
@ -803,11 +845,36 @@ func (m *TaskSpec) CopyFrom(src interface{}) {
|
|||
}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(v.Container, o.GetContainer())
|
||||
m.Runtime = &v
|
||||
case *TaskSpec_Generic:
|
||||
v := TaskSpec_Generic{
|
||||
Generic: &GenericRuntimeSpec{},
|
||||
}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(v.Generic, o.GetGeneric())
|
||||
m.Runtime = &v
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *GenericRuntimeSpec) Copy() *GenericRuntimeSpec {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
o := &GenericRuntimeSpec{}
|
||||
o.CopyFrom(m)
|
||||
return o
|
||||
}
|
||||
|
||||
func (m *GenericRuntimeSpec) CopyFrom(src interface{}) {
|
||||
|
||||
o := src.(*GenericRuntimeSpec)
|
||||
*m = *o
|
||||
if o.Payload != nil {
|
||||
m.Payload = &google_protobuf3.Any{}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *NetworkAttachmentSpec) Copy() *NetworkAttachmentSpec {
|
||||
if m == nil {
|
||||
return nil
|
||||
|
@ -1335,6 +1402,54 @@ func (m *TaskSpec_Attachment) MarshalTo(dAtA []byte) (int, error) {
|
|||
}
|
||||
return i, nil
|
||||
}
|
||||
func (m *TaskSpec_Generic) MarshalTo(dAtA []byte) (int, error) {
|
||||
i := 0
|
||||
if m.Generic != nil {
|
||||
dAtA[i] = 0x52
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Generic.Size()))
|
||||
n17, err := m.Generic.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n17
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
func (m *GenericRuntimeSpec) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *GenericRuntimeSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Kind) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(len(m.Kind)))
|
||||
i += copy(dAtA[i:], m.Kind)
|
||||
}
|
||||
if m.Payload != nil {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Payload.Size()))
|
||||
n18, err := m.Payload.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n18
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *NetworkAttachmentSpec) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
|
@ -1470,21 +1585,21 @@ func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
dAtA[i] = 0x4a
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.StopGracePeriod.Size()))
|
||||
n17, err := m.StopGracePeriod.MarshalTo(dAtA[i:])
|
||||
n19, err := m.StopGracePeriod.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n17
|
||||
i += n19
|
||||
}
|
||||
if m.PullOptions != nil {
|
||||
dAtA[i] = 0x52
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.PullOptions.Size()))
|
||||
n18, err := m.PullOptions.MarshalTo(dAtA[i:])
|
||||
n20, err := m.PullOptions.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n18
|
||||
i += n20
|
||||
}
|
||||
if len(m.Groups) > 0 {
|
||||
for _, s := range m.Groups {
|
||||
|
@ -1533,11 +1648,11 @@ func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
dAtA[i] = 0x7a
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.DNSConfig.Size()))
|
||||
n19, err := m.DNSConfig.MarshalTo(dAtA[i:])
|
||||
n21, err := m.DNSConfig.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n19
|
||||
i += n21
|
||||
}
|
||||
if m.Healthcheck != nil {
|
||||
dAtA[i] = 0x82
|
||||
|
@ -1545,11 +1660,11 @@ func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
dAtA[i] = 0x1
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Healthcheck.Size()))
|
||||
n20, err := m.Healthcheck.MarshalTo(dAtA[i:])
|
||||
n22, err := m.Healthcheck.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n20
|
||||
i += n22
|
||||
}
|
||||
if len(m.Hosts) > 0 {
|
||||
for _, s := range m.Hosts {
|
||||
|
@ -1745,20 +1860,20 @@ func (m *NetworkSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size()))
|
||||
n21, err := m.Annotations.MarshalTo(dAtA[i:])
|
||||
n23, err := m.Annotations.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n21
|
||||
i += n23
|
||||
if m.DriverConfig != nil {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.DriverConfig.Size()))
|
||||
n22, err := m.DriverConfig.MarshalTo(dAtA[i:])
|
||||
n24, err := m.DriverConfig.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n22
|
||||
i += n24
|
||||
}
|
||||
if m.Ipv6Enabled {
|
||||
dAtA[i] = 0x18
|
||||
|
@ -1784,11 +1899,11 @@ func (m *NetworkSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
dAtA[i] = 0x2a
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.IPAM.Size()))
|
||||
n23, err := m.IPAM.MarshalTo(dAtA[i:])
|
||||
n25, err := m.IPAM.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n23
|
||||
i += n25
|
||||
}
|
||||
if m.Attachable {
|
||||
dAtA[i] = 0x30
|
||||
|
@ -1831,67 +1946,67 @@ func (m *ClusterSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size()))
|
||||
n24, err := m.Annotations.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n24
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.AcceptancePolicy.Size()))
|
||||
n25, err := m.AcceptancePolicy.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n25
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Orchestration.Size()))
|
||||
n26, err := m.Orchestration.MarshalTo(dAtA[i:])
|
||||
n26, err := m.Annotations.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n26
|
||||
dAtA[i] = 0x22
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Raft.Size()))
|
||||
n27, err := m.Raft.MarshalTo(dAtA[i:])
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.AcceptancePolicy.Size()))
|
||||
n27, err := m.AcceptancePolicy.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n27
|
||||
dAtA[i] = 0x2a
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Dispatcher.Size()))
|
||||
n28, err := m.Dispatcher.MarshalTo(dAtA[i:])
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Orchestration.Size()))
|
||||
n28, err := m.Orchestration.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n28
|
||||
dAtA[i] = 0x32
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.CAConfig.Size()))
|
||||
n29, err := m.CAConfig.MarshalTo(dAtA[i:])
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Raft.Size()))
|
||||
n29, err := m.Raft.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n29
|
||||
dAtA[i] = 0x3a
|
||||
dAtA[i] = 0x2a
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.TaskDefaults.Size()))
|
||||
n30, err := m.TaskDefaults.MarshalTo(dAtA[i:])
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Dispatcher.Size()))
|
||||
n30, err := m.Dispatcher.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n30
|
||||
dAtA[i] = 0x42
|
||||
dAtA[i] = 0x32
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.EncryptionConfig.Size()))
|
||||
n31, err := m.EncryptionConfig.MarshalTo(dAtA[i:])
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.CAConfig.Size()))
|
||||
n31, err := m.CAConfig.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n31
|
||||
dAtA[i] = 0x3a
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.TaskDefaults.Size()))
|
||||
n32, err := m.TaskDefaults.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n32
|
||||
dAtA[i] = 0x42
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.EncryptionConfig.Size()))
|
||||
n33, err := m.EncryptionConfig.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n33
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -1913,11 +2028,11 @@ func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size()))
|
||||
n32, err := m.Annotations.MarshalTo(dAtA[i:])
|
||||
n34, err := m.Annotations.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n32
|
||||
i += n34
|
||||
if len(m.Data) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
|
@ -2088,6 +2203,29 @@ func (m *TaskSpec_Attachment) Size() (n int) {
|
|||
}
|
||||
return n
|
||||
}
|
||||
func (m *TaskSpec_Generic) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Generic != nil {
|
||||
l = m.Generic.Size()
|
||||
n += 1 + l + sovSpecs(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
func (m *GenericRuntimeSpec) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Kind)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSpecs(uint64(l))
|
||||
}
|
||||
if m.Payload != nil {
|
||||
l = m.Payload.Size()
|
||||
n += 1 + l + sovSpecs(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *NetworkAttachmentSpec) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
|
@ -2427,6 +2565,27 @@ func (this *TaskSpec_Attachment) String() string {
|
|||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *TaskSpec_Generic) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&TaskSpec_Generic{`,
|
||||
`Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericRuntimeSpec", "GenericRuntimeSpec", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *GenericRuntimeSpec) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&GenericRuntimeSpec{`,
|
||||
`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
|
||||
`Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf3.Any", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *NetworkAttachmentSpec) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
|
@ -3396,6 +3555,150 @@ func (m *TaskSpec) Unmarshal(dAtA []byte) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
case 10:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
v := &GenericRuntimeSpec{}
|
||||
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Runtime = &TaskSpec_Generic{v}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSpecs(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *GenericRuntimeSpec) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: GenericRuntimeSpec: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: GenericRuntimeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Kind = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSpecs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthSpecs
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Payload == nil {
|
||||
m.Payload = &google_protobuf3.Any{}
|
||||
}
|
||||
if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSpecs(dAtA[iNdEx:])
|
||||
|
@ -5257,113 +5560,117 @@ var (
|
|||
func init() { proto.RegisterFile("specs.proto", fileDescriptorSpecs) }
|
||||
|
||||
var fileDescriptorSpecs = []byte{
|
||||
// 1717 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0x41, 0x73, 0x1b, 0xb7,
|
||||
0x15, 0x16, 0x25, 0x8a, 0x5a, 0xbe, 0xa5, 0x6c, 0x1a, 0x75, 0xd2, 0x35, 0xdd, 0x50, 0x34, 0xe3,
|
||||
0xa6, 0x4a, 0x33, 0xa5, 0xa6, 0x6a, 0x27, 0x75, 0xea, 0x66, 0x5a, 0x52, 0x64, 0x65, 0x55, 0x95,
|
||||
0xcc, 0x01, 0x15, 0x77, 0x7c, 0xe2, 0x80, 0xbb, 0x10, 0xb9, 0xa3, 0xe5, 0x62, 0x0b, 0x60, 0x99,
|
||||
0xe1, 0xad, 0xc7, 0x8c, 0x0f, 0x3d, 0xf5, 0xaa, 0xe9, 0xa1, 0x7f, 0xc6, 0xb7, 0xf6, 0xd8, 0x93,
|
||||
0xa6, 0xe1, 0x5f, 0xe8, 0x0f, 0x68, 0x07, 0x58, 0x2c, 0xb9, 0x4c, 0x56, 0xb1, 0x67, 0xe2, 0x1b,
|
||||
0xde, 0xdb, 0xef, 0x7b, 0x00, 0x1e, 0x3e, 0x3c, 0xbc, 0x05, 0x5b, 0x44, 0xd4, 0x15, 0xad, 0x88,
|
||||
0x33, 0xc9, 0x10, 0xf2, 0x98, 0x7b, 0x45, 0x79, 0x4b, 0x7c, 0x49, 0xf8, 0xf4, 0xca, 0x97, 0xad,
|
||||
0xd9, 0xcf, 0x6b, 0xb6, 0x9c, 0x47, 0xd4, 0x00, 0x6a, 0xf7, 0xc7, 0x6c, 0xcc, 0xf4, 0xf0, 0x40,
|
||||
0x8d, 0x8c, 0xb7, 0x3e, 0x66, 0x6c, 0x1c, 0xd0, 0x03, 0x6d, 0x8d, 0xe2, 0xcb, 0x03, 0x2f, 0xe6,
|
||||
0x44, 0xfa, 0x2c, 0x4c, 0xbe, 0x37, 0xaf, 0x8b, 0x60, 0x9d, 0x33, 0x8f, 0x0e, 0x22, 0xea, 0xa2,
|
||||
0x63, 0xb0, 0x49, 0x18, 0x32, 0xa9, 0x01, 0xc2, 0x29, 0x34, 0x0a, 0xfb, 0xf6, 0xe1, 0x5e, 0xeb,
|
||||
0xdb, 0x33, 0xb7, 0xda, 0x2b, 0x58, 0xa7, 0xf8, 0xfa, 0x66, 0x6f, 0x03, 0x67, 0x99, 0xe8, 0xb7,
|
||||
0x50, 0xf1, 0xa8, 0xf0, 0x39, 0xf5, 0x86, 0x9c, 0x05, 0xd4, 0xd9, 0x6c, 0x14, 0xf6, 0xef, 0x1c,
|
||||
0xfe, 0x28, 0x2f, 0x92, 0x9a, 0x1c, 0xb3, 0x80, 0x62, 0xdb, 0x30, 0x94, 0x81, 0x8e, 0x01, 0xa6,
|
||||
0x74, 0x3a, 0xa2, 0x5c, 0x4c, 0xfc, 0xc8, 0xd9, 0xd2, 0xf4, 0x9f, 0xdc, 0x46, 0x57, 0x6b, 0x6f,
|
||||
0x9d, 0x2d, 0xe1, 0x38, 0x43, 0x45, 0x67, 0x50, 0x21, 0x33, 0xe2, 0x07, 0x64, 0xe4, 0x07, 0xbe,
|
||||
0x9c, 0x3b, 0x45, 0x1d, 0xea, 0xe3, 0xef, 0x0c, 0xd5, 0xce, 0x10, 0xf0, 0x1a, 0xbd, 0xe9, 0x01,
|
||||
0xac, 0x26, 0x42, 0x1f, 0xc1, 0x4e, 0xbf, 0x77, 0xde, 0x3d, 0x39, 0x3f, 0xae, 0x6e, 0xd4, 0x1e,
|
||||
0xbc, 0xba, 0x6e, 0xbc, 0xa7, 0x62, 0xac, 0x00, 0x7d, 0x1a, 0x7a, 0x7e, 0x38, 0x46, 0xfb, 0x60,
|
||||
0xb5, 0x8f, 0x8e, 0x7a, 0xfd, 0x8b, 0x5e, 0xb7, 0x5a, 0xa8, 0xd5, 0x5e, 0x5d, 0x37, 0xde, 0x5f,
|
||||
0x07, 0xb6, 0x5d, 0x97, 0x46, 0x92, 0x7a, 0xb5, 0xe2, 0x57, 0xff, 0xa8, 0x6f, 0x34, 0xbf, 0x2a,
|
||||
0x40, 0x25, 0xbb, 0x08, 0xf4, 0x11, 0x94, 0xda, 0x47, 0x17, 0x27, 0x2f, 0x7a, 0xd5, 0x8d, 0x15,
|
||||
0x3d, 0x8b, 0x68, 0xbb, 0xd2, 0x9f, 0x51, 0xf4, 0x18, 0xb6, 0xfb, 0xed, 0x2f, 0x06, 0xbd, 0x6a,
|
||||
0x61, 0xb5, 0x9c, 0x2c, 0xac, 0x4f, 0x62, 0xa1, 0x51, 0x5d, 0xdc, 0x3e, 0x39, 0xaf, 0x6e, 0xe6,
|
||||
0xa3, 0xba, 0x9c, 0xf8, 0xa1, 0x59, 0xca, 0xdf, 0x8b, 0x60, 0x0f, 0x28, 0x9f, 0xf9, 0xee, 0x3b,
|
||||
0x96, 0xc8, 0xa7, 0x50, 0x94, 0x44, 0x5c, 0x69, 0x69, 0xd8, 0xf9, 0xd2, 0xb8, 0x20, 0xe2, 0x4a,
|
||||
0x4d, 0x6a, 0xe8, 0x1a, 0xaf, 0x94, 0xc1, 0x69, 0x14, 0xf8, 0x2e, 0x91, 0xd4, 0xd3, 0xca, 0xb0,
|
||||
0x0f, 0x7f, 0x9c, 0xc7, 0xc6, 0x4b, 0x94, 0x59, 0xff, 0xb3, 0x0d, 0x9c, 0xa1, 0xa2, 0xa7, 0x50,
|
||||
0x1a, 0x07, 0x6c, 0x44, 0x02, 0xad, 0x09, 0xfb, 0xf0, 0x51, 0x5e, 0x90, 0x63, 0x8d, 0x58, 0x05,
|
||||
0x30, 0x14, 0xf4, 0x04, 0x4a, 0x71, 0xe4, 0x11, 0x49, 0x9d, 0x92, 0x26, 0x37, 0xf2, 0xc8, 0x5f,
|
||||
0x68, 0xc4, 0x11, 0x0b, 0x2f, 0xfd, 0x31, 0x36, 0x78, 0x74, 0x0a, 0x56, 0x48, 0xe5, 0x97, 0x8c,
|
||||
0x5f, 0x09, 0x67, 0xa7, 0xb1, 0xb5, 0x6f, 0x1f, 0x7e, 0x92, 0x2b, 0xc6, 0x04, 0xd3, 0x96, 0x92,
|
||||
0xb8, 0x93, 0x29, 0x0d, 0x65, 0x12, 0xa6, 0xb3, 0xe9, 0x14, 0xf0, 0x32, 0x00, 0xfa, 0x0d, 0x58,
|
||||
0x34, 0xf4, 0x22, 0xe6, 0x87, 0xd2, 0xb1, 0x6e, 0x5f, 0x48, 0xcf, 0x60, 0x54, 0x32, 0xf1, 0x92,
|
||||
0xa1, 0xd8, 0x9c, 0x05, 0xc1, 0x88, 0xb8, 0x57, 0x4e, 0xf9, 0x2d, 0xb7, 0xb1, 0x64, 0x74, 0x4a,
|
||||
0x50, 0x9c, 0x32, 0x8f, 0x36, 0x0f, 0xe0, 0xde, 0xb7, 0x52, 0x8d, 0x6a, 0x60, 0x99, 0x54, 0x27,
|
||||
0x1a, 0x29, 0xe2, 0xa5, 0xdd, 0xbc, 0x0b, 0xbb, 0x6b, 0x69, 0x6d, 0xfe, 0xb5, 0x08, 0x56, 0x7a,
|
||||
0xd6, 0xa8, 0x0d, 0x65, 0x97, 0x85, 0x92, 0xf8, 0x21, 0xe5, 0x46, 0x5e, 0xb9, 0x27, 0x73, 0x94,
|
||||
0x82, 0x14, 0xeb, 0xd9, 0x06, 0x5e, 0xb1, 0xd0, 0xef, 0xa1, 0xcc, 0xa9, 0x60, 0x31, 0x77, 0xa9,
|
||||
0x30, 0xfa, 0xda, 0xcf, 0x57, 0x48, 0x02, 0xc2, 0xf4, 0xcf, 0xb1, 0xcf, 0xa9, 0xca, 0xb2, 0xc0,
|
||||
0x2b, 0x2a, 0x7a, 0x0a, 0x3b, 0x9c, 0x0a, 0x49, 0xb8, 0xfc, 0x2e, 0x89, 0xe0, 0x04, 0xd2, 0x67,
|
||||
0x81, 0xef, 0xce, 0x71, 0xca, 0x40, 0x4f, 0xa1, 0x1c, 0x05, 0xc4, 0xd5, 0x51, 0x9d, 0x6d, 0x4d,
|
||||
0xff, 0x20, 0x8f, 0xde, 0x4f, 0x41, 0x78, 0x85, 0x47, 0x9f, 0x01, 0x04, 0x6c, 0x3c, 0xf4, 0xb8,
|
||||
0x3f, 0xa3, 0xdc, 0x48, 0xac, 0x96, 0xc7, 0xee, 0x6a, 0x04, 0x2e, 0x07, 0x6c, 0x9c, 0x0c, 0xd1,
|
||||
0xf1, 0xf7, 0xd2, 0x57, 0x46, 0x5b, 0xa7, 0x00, 0x64, 0xf9, 0xd5, 0xa8, 0xeb, 0xe3, 0xb7, 0x0a,
|
||||
0x65, 0x4e, 0x24, 0x43, 0x47, 0x8f, 0xa0, 0x72, 0xc9, 0xb8, 0x4b, 0x87, 0xe6, 0xd6, 0x94, 0xb5,
|
||||
0x26, 0x6c, 0xed, 0x4b, 0xf4, 0xd5, 0x29, 0xc3, 0x0e, 0x8f, 0x43, 0xe9, 0x4f, 0x69, 0xf3, 0x14,
|
||||
0xde, 0xcb, 0x0d, 0x8a, 0x0e, 0xa1, 0xb2, 0x3c, 0xe6, 0xa1, 0xef, 0x69, 0x7d, 0x94, 0x3b, 0x77,
|
||||
0x17, 0x37, 0x7b, 0xf6, 0x52, 0x0f, 0x27, 0x5d, 0x6c, 0x2f, 0x41, 0x27, 0x5e, 0xf3, 0x6f, 0x16,
|
||||
0xec, 0xae, 0x89, 0x05, 0xdd, 0x87, 0x6d, 0x7f, 0x4a, 0xc6, 0x34, 0xa1, 0xe3, 0xc4, 0x40, 0x3d,
|
||||
0x28, 0x05, 0x64, 0x44, 0x03, 0x25, 0x19, 0x95, 0xb6, 0x9f, 0xbd, 0x51, 0x75, 0xad, 0x3f, 0x6a,
|
||||
0x7c, 0x2f, 0x94, 0x7c, 0x8e, 0x0d, 0x19, 0x39, 0xb0, 0xe3, 0xb2, 0xe9, 0x94, 0x84, 0xaa, 0x38,
|
||||
0x6d, 0xed, 0x97, 0x71, 0x6a, 0x22, 0x04, 0x45, 0xc2, 0xc7, 0xc2, 0x29, 0x6a, 0xb7, 0x1e, 0xa3,
|
||||
0x2a, 0x6c, 0xd1, 0x70, 0xe6, 0x6c, 0x6b, 0x97, 0x1a, 0x2a, 0x8f, 0xe7, 0x27, 0x67, 0x5e, 0xc6,
|
||||
0x6a, 0xa8, 0x78, 0xb1, 0xa0, 0xdc, 0xd9, 0xd1, 0x2e, 0x3d, 0x46, 0xbf, 0x82, 0xd2, 0x94, 0xc5,
|
||||
0xa1, 0x14, 0x8e, 0xa5, 0x17, 0xfb, 0x20, 0x6f, 0xb1, 0x67, 0x0a, 0x61, 0x8a, 0xa7, 0x81, 0xa3,
|
||||
0x1e, 0xdc, 0x13, 0x92, 0x45, 0xc3, 0x31, 0x27, 0x2e, 0x1d, 0x46, 0x94, 0xfb, 0xcc, 0x33, 0x97,
|
||||
0xff, 0x41, 0x2b, 0xe9, 0x15, 0x5a, 0x69, 0xaf, 0xd0, 0xea, 0x9a, 0x5e, 0x01, 0xdf, 0x55, 0x9c,
|
||||
0x63, 0x45, 0xe9, 0x6b, 0x06, 0xea, 0x43, 0x25, 0x8a, 0x83, 0x60, 0xc8, 0xa2, 0xe4, 0x1d, 0x00,
|
||||
0x1d, 0xe1, 0x2d, 0x52, 0xd6, 0x8f, 0x83, 0xe0, 0x79, 0x42, 0xc2, 0x76, 0xb4, 0x32, 0xd0, 0xfb,
|
||||
0x50, 0x1a, 0x73, 0x16, 0x47, 0xc2, 0xb1, 0x75, 0x32, 0x8c, 0x85, 0x3e, 0x87, 0x1d, 0x41, 0x5d,
|
||||
0x4e, 0xa5, 0x70, 0x2a, 0x7a, 0xab, 0x1f, 0xe6, 0x4d, 0x32, 0xd0, 0x10, 0x4c, 0x2f, 0x29, 0xa7,
|
||||
0xa1, 0x4b, 0x71, 0xca, 0x41, 0x0f, 0x60, 0x4b, 0xca, 0xb9, 0xb3, 0xdb, 0x28, 0xec, 0x5b, 0x9d,
|
||||
0x9d, 0xc5, 0xcd, 0xde, 0xd6, 0xc5, 0xc5, 0x4b, 0xac, 0x7c, 0xaa, 0x46, 0x4d, 0x98, 0x90, 0x21,
|
||||
0x99, 0x52, 0xe7, 0x8e, 0xce, 0xed, 0xd2, 0x46, 0x2f, 0x01, 0xbc, 0x50, 0x0c, 0x5d, 0x7d, 0x29,
|
||||
0x9c, 0xbb, 0x7a, 0x77, 0x9f, 0xbc, 0x79, 0x77, 0xdd, 0xf3, 0x81, 0xa9, 0xd3, 0xbb, 0x8b, 0x9b,
|
||||
0xbd, 0xf2, 0xd2, 0xc4, 0x65, 0x2f, 0x14, 0xc9, 0x10, 0x75, 0xc0, 0x9e, 0x50, 0x12, 0xc8, 0x89,
|
||||
0x3b, 0xa1, 0xee, 0x95, 0x53, 0xbd, 0xbd, 0xf0, 0x3e, 0xd3, 0x30, 0x13, 0x21, 0x4b, 0x52, 0x0a,
|
||||
0x56, 0x4b, 0x15, 0xce, 0x3d, 0x9d, 0xab, 0xc4, 0x40, 0x1f, 0x00, 0xb0, 0x88, 0x86, 0x43, 0x21,
|
||||
0x3d, 0x3f, 0x74, 0x90, 0xda, 0x32, 0x2e, 0x2b, 0xcf, 0x40, 0x39, 0xd0, 0x43, 0x55, 0x16, 0x89,
|
||||
0x37, 0x64, 0x61, 0x30, 0x77, 0x7e, 0xa0, 0xbf, 0x5a, 0xca, 0xf1, 0x3c, 0x0c, 0xe6, 0x68, 0x0f,
|
||||
0x6c, 0xad, 0x0b, 0xe1, 0x8f, 0x43, 0x12, 0x38, 0xf7, 0x75, 0x3e, 0x40, 0xb9, 0x06, 0xda, 0x53,
|
||||
0xfb, 0x0c, 0xec, 0x8c, 0xdc, 0x95, 0x4c, 0xaf, 0xe8, 0xdc, 0xdc, 0x20, 0x35, 0x54, 0x6b, 0x9a,
|
||||
0x91, 0x20, 0x4e, 0x9a, 0xbd, 0x32, 0x4e, 0x8c, 0x5f, 0x6f, 0x3e, 0x29, 0xd4, 0x0e, 0xc1, 0xce,
|
||||
0x1c, 0x3b, 0xfa, 0x10, 0x76, 0x39, 0x1d, 0xfb, 0x42, 0xf2, 0xf9, 0x90, 0xc4, 0x72, 0xe2, 0xfc,
|
||||
0x4e, 0x13, 0x2a, 0xa9, 0xb3, 0x1d, 0xcb, 0x49, 0x6d, 0x08, 0xab, 0xec, 0xa1, 0x06, 0xd8, 0xea,
|
||||
0x54, 0x04, 0xe5, 0x33, 0xca, 0xd5, 0x83, 0xa2, 0x36, 0x9d, 0x75, 0x29, 0xf5, 0x08, 0x4a, 0xb8,
|
||||
0x3b, 0xd1, 0x97, 0xb7, 0x8c, 0x8d, 0xa5, 0x6e, 0x63, 0x2a, 0x51, 0x73, 0x1b, 0x8d, 0xd9, 0xfc,
|
||||
0x6f, 0x01, 0x2a, 0xd9, 0x77, 0x11, 0x1d, 0x25, 0xef, 0x99, 0xde, 0xd2, 0x9d, 0xc3, 0x83, 0x37,
|
||||
0xbd, 0xa3, 0xfa, 0xf5, 0x08, 0x62, 0x15, 0xec, 0x4c, 0xb5, 0xb0, 0x9a, 0x8c, 0x7e, 0x09, 0xdb,
|
||||
0x11, 0xe3, 0x32, 0xad, 0x21, 0xf5, 0xdc, 0x8a, 0xcf, 0x78, 0x5a, 0x6d, 0x13, 0x70, 0x73, 0x02,
|
||||
0x77, 0xd6, 0xa3, 0xa1, 0xc7, 0xb0, 0xf5, 0xe2, 0xa4, 0x5f, 0xdd, 0xa8, 0x3d, 0x7c, 0x75, 0xdd,
|
||||
0xf8, 0xe1, 0xfa, 0xc7, 0x17, 0x3e, 0x97, 0x31, 0x09, 0x4e, 0xfa, 0xe8, 0xa7, 0xb0, 0xdd, 0x3d,
|
||||
0x1f, 0x60, 0x5c, 0x2d, 0xd4, 0xf6, 0x5e, 0x5d, 0x37, 0x1e, 0xae, 0xe3, 0xd4, 0x27, 0x16, 0x87,
|
||||
0x1e, 0x66, 0xa3, 0x65, 0x3b, 0xf7, 0xcf, 0x4d, 0xb0, 0x4d, 0x69, 0x7d, 0xd7, 0x1d, 0xff, 0x6e,
|
||||
0xf2, 0x5a, 0xa5, 0x77, 0x66, 0xf3, 0x8d, 0x8f, 0x56, 0x25, 0x21, 0x98, 0x33, 0x7e, 0x04, 0x15,
|
||||
0x3f, 0x9a, 0x7d, 0x3a, 0xa4, 0x21, 0x19, 0x05, 0xa6, 0xb3, 0xb3, 0xb0, 0xad, 0x7c, 0xbd, 0xc4,
|
||||
0xa5, 0x2e, 0xac, 0x1f, 0x4a, 0xca, 0x43, 0xd3, 0xb3, 0x59, 0x78, 0x69, 0xa3, 0xcf, 0xa1, 0xe8,
|
||||
0x47, 0x64, 0x6a, 0x5e, 0xda, 0xdc, 0x1d, 0x9c, 0xf4, 0xdb, 0x67, 0x46, 0x83, 0x1d, 0x6b, 0x71,
|
||||
0xb3, 0x57, 0x54, 0x0e, 0xac, 0x69, 0xa8, 0x9e, 0x3e, 0x76, 0x6a, 0x26, 0x5d, 0x7c, 0x2d, 0x9c,
|
||||
0xf1, 0x28, 0x1d, 0xf9, 0xe1, 0x98, 0x53, 0x21, 0x74, 0x19, 0xb6, 0x70, 0x6a, 0x36, 0xff, 0x57,
|
||||
0x04, 0xfb, 0x28, 0x88, 0x85, 0x34, 0x8f, 0xcb, 0x3b, 0xcb, 0xe8, 0x4b, 0xb8, 0x47, 0xf4, 0x6f,
|
||||
0x01, 0x09, 0x55, 0xa5, 0xd6, 0xed, 0x85, 0xc9, 0xea, 0xe3, 0xdc, 0x70, 0x4b, 0x70, 0xd2, 0x8a,
|
||||
0x74, 0x4a, 0x2a, 0xa6, 0x53, 0xc0, 0x55, 0xf2, 0x8d, 0x2f, 0x68, 0x00, 0xbb, 0x8c, 0xbb, 0x13,
|
||||
0x2a, 0x64, 0x52, 0xdf, 0x4d, 0x1b, 0x9d, 0xfb, 0x83, 0xf5, 0x3c, 0x0b, 0x34, 0xc5, 0x2d, 0x59,
|
||||
0xed, 0x7a, 0x0c, 0xf4, 0x04, 0x8a, 0x9c, 0x5c, 0xa6, 0xad, 0x52, 0xae, 0xf2, 0x31, 0xb9, 0x94,
|
||||
0x6b, 0x21, 0x34, 0x03, 0xfd, 0x01, 0xc0, 0xf3, 0x45, 0x44, 0xa4, 0x3b, 0xa1, 0xdc, 0x9c, 0x60,
|
||||
0xee, 0x16, 0xbb, 0x4b, 0xd4, 0x5a, 0x94, 0x0c, 0x1b, 0x9d, 0x42, 0xd9, 0x25, 0xa9, 0x06, 0x4b,
|
||||
0xb7, 0xff, 0x5b, 0x1c, 0xb5, 0x4d, 0x88, 0xaa, 0x0a, 0xb1, 0xb8, 0xd9, 0xb3, 0x52, 0x0f, 0xb6,
|
||||
0x5c, 0x62, 0x34, 0x79, 0x0a, 0xbb, 0xea, 0x9f, 0x63, 0xe8, 0xd1, 0x4b, 0x12, 0x07, 0x32, 0x39,
|
||||
0xfb, 0x5b, 0x8a, 0xb5, 0x6a, 0x60, 0xbb, 0x06, 0x67, 0xd6, 0x55, 0x91, 0x19, 0x1f, 0xfa, 0x13,
|
||||
0xdc, 0xa3, 0xa1, 0xcb, 0xe7, 0x5a, 0x81, 0xe9, 0x0a, 0xad, 0xdb, 0x37, 0xdb, 0x5b, 0x82, 0xd7,
|
||||
0x36, 0x5b, 0xa5, 0xdf, 0xf0, 0x37, 0x7d, 0x80, 0xe4, 0xf9, 0x7b, 0xb7, 0xfa, 0x43, 0x50, 0xf4,
|
||||
0x88, 0x24, 0x5a, 0x72, 0x15, 0xac, 0xc7, 0x1d, 0xe7, 0xf5, 0xd7, 0xf5, 0x8d, 0x7f, 0x7f, 0x5d,
|
||||
0xdf, 0xf8, 0xcb, 0xa2, 0x5e, 0x78, 0xbd, 0xa8, 0x17, 0xfe, 0xb5, 0xa8, 0x17, 0xfe, 0xb3, 0xa8,
|
||||
0x17, 0x46, 0x25, 0xdd, 0x34, 0xfc, 0xe2, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x64, 0x44, 0x1e,
|
||||
0x4f, 0xb4, 0x10, 0x00, 0x00,
|
||||
// 1779 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0x41, 0x73, 0x1b, 0x49,
|
||||
0x15, 0xb6, 0x6c, 0x59, 0x1a, 0xbd, 0x91, 0x13, 0xb9, 0xc9, 0x2e, 0x63, 0x85, 0x95, 0x15, 0x6d,
|
||||
0x08, 0x5e, 0xb6, 0x90, 0x0b, 0x43, 0x2d, 0x59, 0xc2, 0x16, 0x48, 0x96, 0x70, 0x8c, 0xb1, 0xa3,
|
||||
0x6a, 0x7b, 0x03, 0x39, 0xa9, 0xda, 0x33, 0x6d, 0x69, 0xca, 0xa3, 0xee, 0xa1, 0xa7, 0xc7, 0x5b,
|
||||
0xba, 0x71, 0xdc, 0xca, 0x99, 0xab, 0x8b, 0x03, 0x7f, 0x26, 0x37, 0x28, 0x4e, 0x9c, 0x5c, 0xac,
|
||||
0xfe, 0x02, 0x3f, 0x00, 0xaa, 0x7b, 0x7a, 0xa4, 0x51, 0x32, 0x4e, 0x52, 0x45, 0x6e, 0xfd, 0x5e,
|
||||
0x7f, 0xdf, 0x9b, 0xd7, 0xaf, 0xbf, 0xee, 0xd7, 0x03, 0x76, 0x14, 0x52, 0x37, 0x6a, 0x87, 0x82,
|
||||
0x4b, 0x8e, 0x90, 0xc7, 0xdd, 0x4b, 0x2a, 0xda, 0xd1, 0x37, 0x44, 0x4c, 0x2e, 0x7d, 0xd9, 0xbe,
|
||||
0xfa, 0x69, 0xdd, 0x96, 0xd3, 0x90, 0x1a, 0x40, 0xfd, 0xde, 0x88, 0x8f, 0xb8, 0x1e, 0xee, 0xaa,
|
||||
0x91, 0xf1, 0x36, 0x46, 0x9c, 0x8f, 0x02, 0xba, 0xab, 0xad, 0xf3, 0xf8, 0x62, 0xd7, 0x8b, 0x05,
|
||||
0x91, 0x3e, 0x67, 0x66, 0x7e, 0xeb, 0xf5, 0x79, 0xc2, 0xa6, 0xc9, 0x54, 0xeb, 0xba, 0x08, 0xd6,
|
||||
0x09, 0xf7, 0xe8, 0x69, 0x48, 0x5d, 0x74, 0x00, 0x36, 0x61, 0x8c, 0x4b, 0xcd, 0x8d, 0x9c, 0x42,
|
||||
0xb3, 0xb0, 0x63, 0xef, 0x6d, 0xb7, 0xdf, 0x4c, 0xaa, 0xdd, 0x59, 0xc0, 0xba, 0xc5, 0x57, 0x37,
|
||||
0xdb, 0x2b, 0x38, 0xcb, 0x44, 0xbf, 0x86, 0xaa, 0x47, 0x23, 0x5f, 0x50, 0x6f, 0x28, 0x78, 0x40,
|
||||
0x9d, 0xd5, 0x66, 0x61, 0xe7, 0xce, 0xde, 0x0f, 0xf2, 0x22, 0xa9, 0x8f, 0x63, 0x1e, 0x50, 0x6c,
|
||||
0x1b, 0x86, 0x32, 0xd0, 0x01, 0xc0, 0x84, 0x4e, 0xce, 0xa9, 0x88, 0xc6, 0x7e, 0xe8, 0xac, 0x69,
|
||||
0xfa, 0x8f, 0x6e, 0xa3, 0xab, 0xdc, 0xdb, 0xc7, 0x73, 0x38, 0xce, 0x50, 0xd1, 0x31, 0x54, 0xc9,
|
||||
0x15, 0xf1, 0x03, 0x72, 0xee, 0x07, 0xbe, 0x9c, 0x3a, 0x45, 0x1d, 0xea, 0xb3, 0xb7, 0x86, 0xea,
|
||||
0x64, 0x08, 0x78, 0x89, 0xde, 0xf2, 0x00, 0x16, 0x1f, 0x42, 0x8f, 0xa0, 0x3c, 0xe8, 0x9f, 0xf4,
|
||||
0x0e, 0x4f, 0x0e, 0x6a, 0x2b, 0xf5, 0xad, 0x97, 0xd7, 0xcd, 0x8f, 0x54, 0x8c, 0x05, 0x60, 0x40,
|
||||
0x99, 0xe7, 0xb3, 0x11, 0xda, 0x01, 0xab, 0xb3, 0xbf, 0xdf, 0x1f, 0x9c, 0xf5, 0x7b, 0xb5, 0x42,
|
||||
0xbd, 0xfe, 0xf2, 0xba, 0xf9, 0xf1, 0x32, 0xb0, 0xe3, 0xba, 0x34, 0x94, 0xd4, 0xab, 0x17, 0xbf,
|
||||
0xfd, 0x5b, 0x63, 0xa5, 0xf5, 0x6d, 0x01, 0xaa, 0xd9, 0x24, 0xd0, 0x23, 0x28, 0x75, 0xf6, 0xcf,
|
||||
0x0e, 0x9f, 0xf7, 0x6b, 0x2b, 0x0b, 0x7a, 0x16, 0xd1, 0x71, 0xa5, 0x7f, 0x45, 0xd1, 0x43, 0x58,
|
||||
0x1f, 0x74, 0xbe, 0x3e, 0xed, 0xd7, 0x0a, 0x8b, 0x74, 0xb2, 0xb0, 0x01, 0x89, 0x23, 0x8d, 0xea,
|
||||
0xe1, 0xce, 0xe1, 0x49, 0x6d, 0x35, 0x1f, 0xd5, 0x13, 0xc4, 0x67, 0x26, 0x95, 0xbf, 0x16, 0xc1,
|
||||
0x3e, 0xa5, 0xe2, 0xca, 0x77, 0x3f, 0xb0, 0x44, 0xbe, 0x80, 0xa2, 0x24, 0xd1, 0xa5, 0x96, 0x86,
|
||||
0x9d, 0x2f, 0x8d, 0x33, 0x12, 0x5d, 0xaa, 0x8f, 0x1a, 0xba, 0xc6, 0x2b, 0x65, 0x08, 0x1a, 0x06,
|
||||
0xbe, 0x4b, 0x24, 0xf5, 0xb4, 0x32, 0xec, 0xbd, 0x1f, 0xe6, 0xb1, 0xf1, 0x1c, 0x65, 0xf2, 0x7f,
|
||||
0xba, 0x82, 0x33, 0x54, 0xf4, 0x04, 0x4a, 0xa3, 0x80, 0x9f, 0x93, 0x40, 0x6b, 0xc2, 0xde, 0x7b,
|
||||
0x90, 0x17, 0xe4, 0x40, 0x23, 0x16, 0x01, 0x0c, 0x05, 0x3d, 0x86, 0x52, 0x1c, 0x7a, 0x44, 0x52,
|
||||
0xa7, 0xa4, 0xc9, 0xcd, 0x3c, 0xf2, 0xd7, 0x1a, 0xb1, 0xcf, 0xd9, 0x85, 0x3f, 0xc2, 0x06, 0x8f,
|
||||
0x8e, 0xc0, 0x62, 0x54, 0x7e, 0xc3, 0xc5, 0x65, 0xe4, 0x94, 0x9b, 0x6b, 0x3b, 0xf6, 0xde, 0xe7,
|
||||
0xb9, 0x62, 0x4c, 0x30, 0x1d, 0x29, 0x89, 0x3b, 0x9e, 0x50, 0x26, 0x93, 0x30, 0xdd, 0x55, 0xa7,
|
||||
0x80, 0xe7, 0x01, 0xd0, 0xaf, 0xc0, 0xa2, 0xcc, 0x0b, 0xb9, 0xcf, 0xa4, 0x63, 0xdd, 0x9e, 0x48,
|
||||
0xdf, 0x60, 0x54, 0x31, 0xf1, 0x9c, 0xa1, 0xd8, 0x82, 0x07, 0xc1, 0x39, 0x71, 0x2f, 0x9d, 0xca,
|
||||
0x7b, 0x2e, 0x63, 0xce, 0xe8, 0x96, 0xa0, 0x38, 0xe1, 0x1e, 0x6d, 0xed, 0xc2, 0xe6, 0x1b, 0xa5,
|
||||
0x46, 0x75, 0xb0, 0x4c, 0xa9, 0x13, 0x8d, 0x14, 0xf1, 0xdc, 0x6e, 0xdd, 0x85, 0x8d, 0xa5, 0xb2,
|
||||
0xb6, 0xfe, 0x59, 0x04, 0x2b, 0xdd, 0x6b, 0xd4, 0x81, 0x8a, 0xcb, 0x99, 0x24, 0x3e, 0xa3, 0xc2,
|
||||
0xc8, 0x2b, 0x77, 0x67, 0xf6, 0x53, 0x90, 0x62, 0x3d, 0x5d, 0xc1, 0x0b, 0x16, 0xfa, 0x2d, 0x54,
|
||||
0x04, 0x8d, 0x78, 0x2c, 0x5c, 0x1a, 0x19, 0x7d, 0xed, 0xe4, 0x2b, 0x24, 0x01, 0x61, 0xfa, 0xa7,
|
||||
0xd8, 0x17, 0x54, 0x55, 0x39, 0xc2, 0x0b, 0x2a, 0x7a, 0x02, 0x65, 0x41, 0x23, 0x49, 0x84, 0x7c,
|
||||
0x9b, 0x44, 0x70, 0x02, 0x19, 0xf0, 0xc0, 0x77, 0xa7, 0x38, 0x65, 0xa0, 0x27, 0x50, 0x09, 0x03,
|
||||
0xe2, 0xea, 0xa8, 0xce, 0xba, 0xa6, 0x7f, 0x92, 0x47, 0x1f, 0xa4, 0x20, 0xbc, 0xc0, 0xa3, 0x2f,
|
||||
0x01, 0x02, 0x3e, 0x1a, 0x7a, 0xc2, 0xbf, 0xa2, 0xc2, 0x48, 0xac, 0x9e, 0xc7, 0xee, 0x69, 0x04,
|
||||
0xae, 0x04, 0x7c, 0x94, 0x0c, 0xd1, 0xc1, 0xff, 0xa5, 0xaf, 0x8c, 0xb6, 0x8e, 0x00, 0xc8, 0x7c,
|
||||
0xd6, 0xa8, 0xeb, 0xb3, 0xf7, 0x0a, 0x65, 0x76, 0x24, 0x43, 0x47, 0x0f, 0xa0, 0x7a, 0xc1, 0x85,
|
||||
0x4b, 0x87, 0xe6, 0xd4, 0x54, 0xb4, 0x26, 0x6c, 0xed, 0x4b, 0xf4, 0x85, 0xba, 0x50, 0x1e, 0x51,
|
||||
0x46, 0x85, 0xef, 0x3a, 0xa0, 0x3f, 0xf6, 0x28, 0xf7, 0x40, 0x26, 0x10, 0x1c, 0x33, 0xe9, 0x4f,
|
||||
0xa8, 0xf9, 0x52, 0x4a, 0xec, 0x56, 0xa0, 0x2c, 0x92, 0x99, 0xd6, 0x1f, 0x01, 0xbd, 0x89, 0x45,
|
||||
0x08, 0x8a, 0x97, 0x3e, 0xf3, 0xb4, 0xb0, 0x2a, 0x58, 0x8f, 0x51, 0x1b, 0xca, 0x21, 0x99, 0x06,
|
||||
0x9c, 0x78, 0x46, 0x2c, 0xf7, 0xda, 0x49, 0xbf, 0x6c, 0xa7, 0xfd, 0xb2, 0xdd, 0x61, 0x53, 0x9c,
|
||||
0x82, 0x5a, 0x47, 0xf0, 0x51, 0xee, 0x92, 0xd1, 0x1e, 0x54, 0xe7, 0x22, 0x1c, 0xfa, 0xe6, 0x23,
|
||||
0xdd, 0xbb, 0xb3, 0x9b, 0x6d, 0x7b, 0xae, 0xd6, 0xc3, 0x1e, 0xb6, 0xe7, 0xa0, 0x43, 0xaf, 0xf5,
|
||||
0x17, 0x0b, 0x36, 0x96, 0xa4, 0x8c, 0xee, 0xc1, 0xba, 0x3f, 0x21, 0x23, 0x6a, 0x72, 0x4c, 0x0c,
|
||||
0xd4, 0x87, 0x52, 0x40, 0xce, 0x69, 0xa0, 0x04, 0xad, 0x36, 0xf5, 0x27, 0xef, 0x3c, 0x13, 0xed,
|
||||
0xdf, 0x6b, 0x7c, 0x9f, 0x49, 0x31, 0xc5, 0x86, 0x8c, 0x1c, 0x28, 0xbb, 0x7c, 0x32, 0x21, 0x4c,
|
||||
0x5d, 0x9d, 0x6b, 0x3b, 0x15, 0x9c, 0x9a, 0xaa, 0x32, 0x44, 0x8c, 0x22, 0xa7, 0xa8, 0xdd, 0x7a,
|
||||
0x8c, 0x6a, 0xb0, 0x46, 0xd9, 0x95, 0xb3, 0xae, 0x5d, 0x6a, 0xa8, 0x3c, 0x9e, 0x9f, 0x28, 0xb2,
|
||||
0x82, 0xd5, 0x50, 0xf1, 0xe2, 0x88, 0x0a, 0xa7, 0x9c, 0x54, 0x54, 0x8d, 0xd1, 0x2f, 0xa0, 0x34,
|
||||
0xe1, 0x31, 0x93, 0x91, 0x63, 0xe9, 0x64, 0xb7, 0xf2, 0x92, 0x3d, 0x56, 0x08, 0x73, 0xb5, 0x1b,
|
||||
0x38, 0xea, 0xc3, 0x66, 0x24, 0x79, 0x38, 0x1c, 0x09, 0xe2, 0xd2, 0x61, 0x48, 0x85, 0xcf, 0x3d,
|
||||
0x73, 0x35, 0x6d, 0xbd, 0xb1, 0x29, 0x3d, 0xf3, 0xc8, 0xc1, 0x77, 0x15, 0xe7, 0x40, 0x51, 0x06,
|
||||
0x9a, 0x81, 0x06, 0x50, 0x0d, 0xe3, 0x20, 0x18, 0xf2, 0x30, 0xe9, 0x52, 0x89, 0x9e, 0xde, 0xa3,
|
||||
0x64, 0x83, 0x38, 0x08, 0x9e, 0x25, 0x24, 0x6c, 0x87, 0x0b, 0x03, 0x7d, 0x0c, 0xa5, 0x91, 0xe0,
|
||||
0x71, 0x18, 0x39, 0xb6, 0x2e, 0x86, 0xb1, 0xd0, 0x57, 0x50, 0x8e, 0xa8, 0x2b, 0xa8, 0x8c, 0x9c,
|
||||
0xaa, 0x5e, 0xea, 0xa7, 0x79, 0x1f, 0x39, 0xd5, 0x10, 0x4c, 0x2f, 0xa8, 0xa0, 0xcc, 0xa5, 0x38,
|
||||
0xe5, 0xa0, 0x2d, 0x58, 0x93, 0x72, 0xea, 0x6c, 0x34, 0x0b, 0x3b, 0x56, 0xb7, 0x3c, 0xbb, 0xd9,
|
||||
0x5e, 0x3b, 0x3b, 0x7b, 0x81, 0x95, 0x4f, 0xdd, 0xa0, 0x63, 0x1e, 0x49, 0x46, 0x26, 0xd4, 0xb9,
|
||||
0xa3, 0x6b, 0x3b, 0xb7, 0xd1, 0x0b, 0x00, 0x8f, 0x45, 0x43, 0x57, 0x1f, 0x59, 0xe7, 0xae, 0x5e,
|
||||
0xdd, 0xe7, 0xef, 0x5e, 0x5d, 0xef, 0xe4, 0xd4, 0x74, 0x91, 0x8d, 0xd9, 0xcd, 0x76, 0x65, 0x6e,
|
||||
0xe2, 0x8a, 0xc7, 0xa2, 0x64, 0x88, 0xba, 0x60, 0x8f, 0x29, 0x09, 0xe4, 0xd8, 0x1d, 0x53, 0xf7,
|
||||
0xd2, 0xa9, 0xdd, 0xde, 0x16, 0x9e, 0x6a, 0x98, 0x89, 0x90, 0x25, 0x29, 0x05, 0xab, 0x54, 0x23,
|
||||
0x67, 0x53, 0xd7, 0x2a, 0x31, 0xd0, 0x27, 0x00, 0x3c, 0xa4, 0x6c, 0x18, 0x49, 0xcf, 0x67, 0x0e,
|
||||
0x52, 0x4b, 0xc6, 0x15, 0xe5, 0x39, 0x55, 0x0e, 0x74, 0x5f, 0x5d, 0xda, 0xc4, 0x1b, 0x72, 0x16,
|
||||
0x4c, 0x9d, 0xef, 0xe9, 0x59, 0x4b, 0x39, 0x9e, 0xb1, 0x60, 0x8a, 0xb6, 0xc1, 0xd6, 0xba, 0x88,
|
||||
0xfc, 0x11, 0x23, 0x81, 0x73, 0x4f, 0xd7, 0x03, 0x94, 0xeb, 0x54, 0x7b, 0xea, 0x5f, 0x82, 0x9d,
|
||||
0x91, 0xbb, 0x92, 0xe9, 0x25, 0x9d, 0x9a, 0x13, 0xa4, 0x86, 0x2a, 0xa7, 0x2b, 0x12, 0xc4, 0xc9,
|
||||
0x53, 0xb4, 0x82, 0x13, 0xe3, 0x97, 0xab, 0x8f, 0x0b, 0xf5, 0x3d, 0xb0, 0x33, 0xdb, 0x8e, 0x3e,
|
||||
0x85, 0x0d, 0x41, 0x47, 0x7e, 0x24, 0xc5, 0x74, 0x48, 0x62, 0x39, 0x76, 0x7e, 0xa3, 0x09, 0xd5,
|
||||
0xd4, 0xd9, 0x89, 0xe5, 0xb8, 0x3e, 0x84, 0x45, 0xf5, 0x50, 0x13, 0x6c, 0xb5, 0x2b, 0x11, 0x15,
|
||||
0x57, 0x54, 0xa8, 0x76, 0xa7, 0x16, 0x9d, 0x75, 0x29, 0xf5, 0x44, 0x94, 0x08, 0x77, 0xac, 0x0f,
|
||||
0x6f, 0x05, 0x1b, 0x4b, 0x9d, 0xc6, 0x54, 0xa2, 0xe6, 0x34, 0x1a, 0xb3, 0xf5, 0x9f, 0x02, 0x54,
|
||||
0xb3, 0x5d, 0x1b, 0xed, 0x27, 0xdd, 0x56, 0x2f, 0xe9, 0xce, 0xde, 0xee, 0xbb, 0xba, 0xbc, 0xee,
|
||||
0x6d, 0x41, 0xac, 0x82, 0x1d, 0xab, 0x07, 0xb6, 0x26, 0xa3, 0x9f, 0xc3, 0x7a, 0xc8, 0x85, 0x4c,
|
||||
0xef, 0x90, 0x46, 0x6e, 0x3f, 0xe2, 0x22, 0xed, 0x05, 0x09, 0xb8, 0x35, 0x86, 0x3b, 0xcb, 0xd1,
|
||||
0xd0, 0x43, 0x58, 0x7b, 0x7e, 0x38, 0xa8, 0xad, 0xd4, 0xef, 0xbf, 0xbc, 0x6e, 0x7e, 0x7f, 0x79,
|
||||
0xf2, 0xb9, 0x2f, 0x64, 0x4c, 0x82, 0xc3, 0x01, 0xfa, 0x31, 0xac, 0xf7, 0x4e, 0x4e, 0x31, 0xae,
|
||||
0x15, 0xea, 0xdb, 0x2f, 0xaf, 0x9b, 0xf7, 0x97, 0x71, 0x6a, 0x8a, 0xc7, 0xcc, 0xc3, 0xfc, 0x7c,
|
||||
0xfe, 0xd8, 0xfc, 0xfb, 0x2a, 0xd8, 0xe6, 0x6a, 0xfd, 0xd0, 0xff, 0x23, 0x1b, 0x49, 0x2f, 0x4d,
|
||||
0xcf, 0xcc, 0xea, 0x3b, 0x5b, 0x6a, 0x35, 0x21, 0x98, 0x3d, 0x7e, 0x00, 0x55, 0x3f, 0xbc, 0xfa,
|
||||
0x62, 0x48, 0x19, 0x39, 0x0f, 0xcc, 0xbb, 0xd3, 0xc2, 0xb6, 0xf2, 0xf5, 0x13, 0x97, 0x3a, 0xb0,
|
||||
0x3e, 0x93, 0x54, 0x30, 0xf3, 0xa2, 0xb4, 0xf0, 0xdc, 0x46, 0x5f, 0x41, 0xd1, 0x0f, 0xc9, 0xc4,
|
||||
0xbc, 0x03, 0x72, 0x57, 0x70, 0x38, 0xe8, 0x1c, 0x1b, 0x0d, 0x76, 0xad, 0xd9, 0xcd, 0x76, 0x51,
|
||||
0x39, 0xb0, 0xa6, 0xa1, 0x46, 0xda, 0x8a, 0xd5, 0x97, 0xf4, 0xe5, 0x6b, 0xe1, 0x8c, 0x47, 0xe9,
|
||||
0xc8, 0x67, 0x23, 0x41, 0xa3, 0x48, 0x5f, 0xc3, 0x16, 0x4e, 0xcd, 0xd6, 0x7f, 0x8b, 0x60, 0xef,
|
||||
0x07, 0x71, 0x24, 0x4d, 0x73, 0xf9, 0x60, 0x15, 0x7d, 0x01, 0x9b, 0x44, 0xff, 0xb4, 0x10, 0xa6,
|
||||
0x6e, 0x6a, 0xfd, 0xf8, 0x31, 0x55, 0x7d, 0x98, 0x1b, 0x6e, 0x0e, 0x4e, 0x1e, 0x4a, 0xdd, 0x92,
|
||||
0x8a, 0xe9, 0x14, 0x70, 0x8d, 0xbc, 0x36, 0x83, 0x4e, 0x61, 0x83, 0x0b, 0x77, 0x4c, 0x23, 0x99,
|
||||
0xdc, 0xef, 0xe6, 0x91, 0x9f, 0xfb, 0xfb, 0xf7, 0x2c, 0x0b, 0x34, 0x97, 0x5b, 0x92, 0xed, 0x72,
|
||||
0x0c, 0xf4, 0x18, 0x8a, 0x82, 0x5c, 0xa4, 0x0f, 0xb9, 0x5c, 0xe5, 0x63, 0x72, 0x21, 0x97, 0x42,
|
||||
0x68, 0x06, 0xfa, 0x1d, 0x80, 0xe7, 0x47, 0x21, 0x91, 0xee, 0x98, 0x0a, 0xb3, 0x83, 0xb9, 0x4b,
|
||||
0xec, 0xcd, 0x51, 0x4b, 0x51, 0x32, 0x6c, 0x74, 0x04, 0x15, 0x97, 0xa4, 0x1a, 0x2c, 0xdd, 0xfe,
|
||||
0xe7, 0xb3, 0xdf, 0x31, 0x21, 0x6a, 0x2a, 0xc4, 0xec, 0x66, 0xdb, 0x4a, 0x3d, 0xd8, 0x72, 0x89,
|
||||
0xd1, 0xe4, 0x11, 0x6c, 0xa8, 0x3f, 0xa2, 0xa1, 0x47, 0x2f, 0x48, 0x1c, 0xc8, 0x64, 0xef, 0x6f,
|
||||
0xb9, 0xac, 0xd5, 0xf3, 0xba, 0x67, 0x70, 0x26, 0xaf, 0xaa, 0xcc, 0xf8, 0xd0, 0x1f, 0x60, 0x93,
|
||||
0x32, 0x57, 0x4c, 0xb5, 0x02, 0xd3, 0x0c, 0xad, 0xdb, 0x17, 0xdb, 0x9f, 0x83, 0x97, 0x16, 0x5b,
|
||||
0xa3, 0xaf, 0xf9, 0x5b, 0x3e, 0x40, 0xd2, 0xfe, 0x3e, 0xac, 0xfe, 0x10, 0x14, 0x3d, 0x22, 0x89,
|
||||
0x96, 0x5c, 0x15, 0xeb, 0x71, 0xd7, 0x79, 0xf5, 0x5d, 0x63, 0xe5, 0x5f, 0xdf, 0x35, 0x56, 0xfe,
|
||||
0x3c, 0x6b, 0x14, 0x5e, 0xcd, 0x1a, 0x85, 0x7f, 0xcc, 0x1a, 0x85, 0x7f, 0xcf, 0x1a, 0x85, 0xf3,
|
||||
0x92, 0x7e, 0x34, 0xfc, 0xec, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x3e, 0xe8, 0xcb, 0x6d,
|
||||
0x11, 0x00, 0x00,
|
||||
}
|
||||
|
|
9
vendor/github.com/docker/swarmkit/api/specs.proto
generated
vendored
9
vendor/github.com/docker/swarmkit/api/specs.proto
generated
vendored
|
@ -5,6 +5,7 @@ package docker.swarmkit.v1;
|
|||
import "types.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
// Specs are container objects for user provided input. All creations and
|
||||
// updates are done through spec types. As a convention, user input from a spec
|
||||
|
@ -101,6 +102,7 @@ message TaskSpec {
|
|||
oneof runtime {
|
||||
NetworkAttachmentSpec attachment = 8;
|
||||
ContainerSpec container = 1;
|
||||
GenericRuntimeSpec generic = 10;
|
||||
}
|
||||
|
||||
// Resource requirements for the container.
|
||||
|
@ -128,6 +130,11 @@ message TaskSpec {
|
|||
uint64 force_update = 9;
|
||||
}
|
||||
|
||||
message GenericRuntimeSpec {
|
||||
string kind = 1;
|
||||
google.protobuf.Any payload = 2;
|
||||
}
|
||||
|
||||
// NetworkAttachmentSpec specifies runtime parameters required to attach
|
||||
// a container to a network.
|
||||
message NetworkAttachmentSpec {
|
||||
|
@ -321,7 +328,7 @@ message NetworkSpec {
|
|||
// enabled(default case) no manual attachment to this network
|
||||
// can happen.
|
||||
bool attachable = 6;
|
||||
|
||||
|
||||
// Ingress indicates this network will provide the routing-mesh.
|
||||
// In older versions, the network providing the routing mesh was
|
||||
// swarm internally created only and it was identified by the name
|
||||
|
|
80
vendor/github.com/docker/swarmkit/api/storeobject.go
generated
vendored
Normal file
80
vendor/github.com/docker/swarmkit/api/storeobject.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/go-events"
|
||||
)
|
||||
|
||||
var errUnknownStoreAction = errors.New("unrecognized action type")
|
||||
|
||||
// StoreObject is an abstract object that can be handled by the store.
|
||||
type StoreObject interface {
|
||||
GetID() string // Get ID
|
||||
GetMeta() Meta // Retrieve metadata
|
||||
SetMeta(Meta) // Set metadata
|
||||
CopyStoreObject() StoreObject // Return a copy of this object
|
||||
EventCreate() Event // Return a creation event
|
||||
EventUpdate() Event // Return an update event
|
||||
EventDelete() Event // Return a deletion event
|
||||
}
|
||||
|
||||
// Event is the type used for events passed over watcher channels, and also
|
||||
// the type used to specify filtering in calls to Watch.
|
||||
type Event interface {
|
||||
// TODO(stevvooe): Consider whether it makes sense to squish both the
|
||||
// matcher type and the primary type into the same type. It might be better
|
||||
// to build a matcher from an event prototype.
|
||||
|
||||
// Matches checks if this item in a watch queue Matches the event
|
||||
// description.
|
||||
Matches(events.Event) bool
|
||||
}
|
||||
|
||||
func customIndexer(kind string, annotations *Annotations) (bool, [][]byte, error) {
|
||||
var converted [][]byte
|
||||
|
||||
for _, entry := range annotations.Indices {
|
||||
index := make([]byte, 0, len(kind)+1+len(entry.Key)+1+len(entry.Val)+1)
|
||||
if kind != "" {
|
||||
index = append(index, []byte(kind)...)
|
||||
index = append(index, '|')
|
||||
}
|
||||
index = append(index, []byte(entry.Key)...)
|
||||
index = append(index, '|')
|
||||
index = append(index, []byte(entry.Val)...)
|
||||
index = append(index, '\x00')
|
||||
converted = append(converted, index)
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return len(converted) != 0, converted, nil
|
||||
}
|
||||
|
||||
func fromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
arg, ok := args[0].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
|
||||
}
|
||||
// Add the null character as a terminator
|
||||
arg += "\x00"
|
||||
return []byte(arg), nil
|
||||
}
|
||||
|
||||
func prefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
val, err := fromArgs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Strip the null terminator, the rest is a prefix
|
||||
n := len(val)
|
||||
if n > 0 {
|
||||
return val[:n-1], nil
|
||||
}
|
||||
return val, nil
|
||||
}
|
1393
vendor/github.com/docker/swarmkit/api/types.pb.go
generated
vendored
1393
vendor/github.com/docker/swarmkit/api/types.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
40
vendor/github.com/docker/swarmkit/api/types.proto
generated
vendored
40
vendor/github.com/docker/swarmkit/api/types.proto
generated
vendored
|
@ -14,11 +14,20 @@ message Version {
|
|||
uint64 index = 1;
|
||||
}
|
||||
|
||||
message IndexEntry {
|
||||
string key = 1;
|
||||
string val = 2;
|
||||
}
|
||||
|
||||
// Annotations provide useful information to identify API objects. They are
|
||||
// common to all API specs.
|
||||
message Annotations {
|
||||
string name = 1;
|
||||
map<string, string> labels = 2;
|
||||
|
||||
// Indices provides keys and values for indexing this object.
|
||||
// A single key may have multiple values.
|
||||
repeated IndexEntry indices = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message Resources {
|
||||
|
@ -315,6 +324,17 @@ message UpdateConfig {
|
|||
// roll back to the previous service spec. If the MaxFailureRatio
|
||||
// threshold is hit during the rollback, the rollback will pause.
|
||||
float max_failure_ratio = 5;
|
||||
|
||||
// UpdateOrder controls the order of operations when rolling out an
|
||||
// updated task. Either the old task is shut down before the new task
|
||||
// is started, or the new task is started before the old task is shut
|
||||
// down.
|
||||
enum UpdateOrder {
|
||||
STOP_FIRST = 0;
|
||||
START_FIRST = 1;
|
||||
}
|
||||
|
||||
UpdateOrder order = 6;
|
||||
}
|
||||
|
||||
// UpdateStatus is the status of an update in progress.
|
||||
|
@ -635,6 +655,9 @@ message ExternalCA {
|
|||
// Options is a set of additional key/value pairs whose interpretation
|
||||
// depends on the specified CA type.
|
||||
map<string, string> options = 3;
|
||||
|
||||
// CACert specifies which root CA is used by this external CA
|
||||
bytes ca_cert = 4 [(gogoproto.customname) = "CACert"];
|
||||
}
|
||||
|
||||
message CAConfig {
|
||||
|
@ -744,6 +767,10 @@ message RootCA {
|
|||
|
||||
// JoinTokens contains the join tokens for workers and managers.
|
||||
JoinTokens join_tokens = 4 [(gogoproto.nullable) = false];
|
||||
|
||||
// RootRotation contains the new root cert and key we want to rotate to - if this is nil, we are not in the
|
||||
// middle of a root rotation
|
||||
RootRotation root_rotation = 5;
|
||||
}
|
||||
|
||||
|
||||
|
@ -872,6 +899,11 @@ message HealthConfig {
|
|||
// Retries is the number of consecutive failures needed to consider a
|
||||
// container as unhealthy. Zero means inherit.
|
||||
int32 retries = 4;
|
||||
|
||||
// Start period is the period for container initialization during
|
||||
// which health check failures will note count towards the maximum
|
||||
// number of retries.
|
||||
google.protobuf.Duration start_period = 5;
|
||||
}
|
||||
|
||||
message MaybeEncryptedRecord {
|
||||
|
@ -884,3 +916,11 @@ message MaybeEncryptedRecord {
|
|||
bytes data = 2;
|
||||
bytes nonce = 3;
|
||||
}
|
||||
|
||||
|
||||
message RootRotation {
|
||||
bytes ca_cert = 1 [(gogoproto.customname) = "CACert"];
|
||||
bytes ca_key = 2 [(gogoproto.customname) = "CAKey"];
|
||||
// cross-signed CA cert is the CACert that has been cross-signed by the previous root
|
||||
bytes cross_signed_ca_cert = 3 [(gogoproto.customname) = "CrossSignedCACert"];;
|
||||
}
|
||||
|
|
284
vendor/github.com/docker/swarmkit/ca/certificates.go
generated
vendored
284
vendor/github.com/docker/swarmkit/ca/certificates.go
generated
vendored
|
@ -111,18 +111,25 @@ type LocalSigner struct {
|
|||
// Key will only be used by the original manager to put the private
|
||||
// key-material in raft, no signing operations depend on it.
|
||||
Key []byte
|
||||
|
||||
// Cert is one PEM encoded Certificate used as the signing CA. It must correspond to the key.
|
||||
Cert []byte
|
||||
|
||||
// just cached parsed values for validation, etc.
|
||||
parsedCert *x509.Certificate
|
||||
cryptoSigner crypto.Signer
|
||||
}
|
||||
|
||||
// RootCA is the representation of everything we need to sign certificates and/or to verify certificates
|
||||
//
|
||||
// RootCA.Cert: [signing CA cert][CA cert1][CA cert2]
|
||||
// RootCA.Cert: [CA cert1][CA cert2]
|
||||
// RootCA.Intermediates: [intermediate CA1][intermediate CA2][intermediate CA3]
|
||||
// RootCA.Signer.Key: [signing CA key]
|
||||
// RootCA.signer.Cert: [signing CA cert]
|
||||
// RootCA.signer.Key: [signing CA key]
|
||||
//
|
||||
// Requirements:
|
||||
//
|
||||
// - [signing CA key] must be the private key for [signing CA cert]
|
||||
// - [signing CA cert] must be the first cert in RootCA.Cert
|
||||
// - [signing CA key] must be the private key for [signing CA cert], and either both or none must be provided
|
||||
//
|
||||
// - [intermediate CA1] must have the same public key and subject as [signing CA cert], because otherwise when
|
||||
// appended to a leaf certificate, the intermediates will not form a chain (because [intermediate CA1] won't because
|
||||
|
@ -135,10 +142,29 @@ type LocalSigner struct {
|
|||
// valid chain from [leaf signed by signing CA cert] to one of the root certs ([signing CA cert], [CA cert1], [CA cert2])
|
||||
// using zero or more of the intermediate certs ([intermediate CA1][intermediate CA2][intermediate CA3]) as intermediates
|
||||
//
|
||||
// Example 1: Simple root rotation
|
||||
// - Initial state:
|
||||
// - RootCA.Cert: [Root CA1 self-signed]
|
||||
// - RootCA.Intermediates: []
|
||||
// - RootCA.signer.Cert: [Root CA1 self-signed]
|
||||
// - Issued TLS cert: [leaf signed by Root CA1]
|
||||
//
|
||||
// - Intermediate state (during root rotation):
|
||||
// - RootCA.Cert: [Root CA1 self-signed]
|
||||
// - RootCA.Intermediates: [Root CA2 signed by Root CA1]
|
||||
// - RootCA.signer.Cert: [Root CA2 signed by Root CA1]
|
||||
// - Issued TLS cert: [leaf signed by Root CA2][Root CA2 signed by Root CA1]
|
||||
//
|
||||
// - Final state:
|
||||
// - RootCA.Cert: [Root CA2 self-signed]
|
||||
// - RootCA.Intermediates: []
|
||||
// - RootCA.signer.Cert: [Root CA2 self-signed]
|
||||
// - Issued TLS cert: [leaf signed by Root CA2]
|
||||
//
|
||||
type RootCA struct {
|
||||
// Cert contains a bundle of PEM encoded Certificate for the Root CA, the first one of which
|
||||
// must correspond to the key in the local signer, if provided
|
||||
Cert []byte
|
||||
// Certs contains a bundle of self-signed, PEM encoded certificates for the Root CA to be used
|
||||
// as the root of trust.
|
||||
Certs []byte
|
||||
|
||||
// Intermediates contains a bundle of PEM encoded intermediate CA certificates to append to any
|
||||
// issued TLS (leaf) certificates. The first one must have the same public key and subject as the
|
||||
|
@ -153,16 +179,16 @@ type RootCA struct {
|
|||
Digest digest.Digest
|
||||
|
||||
// This signer will be nil if the node doesn't have the appropriate key material
|
||||
Signer *LocalSigner
|
||||
signer *LocalSigner
|
||||
}
|
||||
|
||||
// CanSign ensures that the signer has all three necessary elements needed to operate
|
||||
func (rca *RootCA) CanSign() bool {
|
||||
if rca.Cert == nil || rca.Pool == nil || rca.Signer == nil {
|
||||
return false
|
||||
// Signer is an accessor for the local signer that returns an error if this root cannot sign.
|
||||
func (rca *RootCA) Signer() (*LocalSigner, error) {
|
||||
if rca.Pool == nil || rca.signer == nil || len(rca.signer.Cert) == 0 || rca.signer.Signer == nil {
|
||||
return nil, ErrNoValidSigner
|
||||
}
|
||||
|
||||
return true
|
||||
return rca.signer, nil
|
||||
}
|
||||
|
||||
// IssueAndSaveNewCertificates generates a new key-pair, signs it with the local root-ca, and returns a
|
||||
|
@ -173,10 +199,6 @@ func (rca *RootCA) IssueAndSaveNewCertificates(kw KeyWriter, cn, ou, org string)
|
|||
return nil, errors.Wrap(err, "error when generating new node certs")
|
||||
}
|
||||
|
||||
if !rca.CanSign() {
|
||||
return nil, ErrNoValidSigner
|
||||
}
|
||||
|
||||
// Obtain a signed Certificate
|
||||
certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org)
|
||||
if err != nil {
|
||||
|
@ -322,13 +344,12 @@ func PrepareCSR(csrBytes []byte, cn, ou, org string) cfsigner.SignRequest {
|
|||
|
||||
// ParseValidateAndSignCSR returns a signed certificate from a particular rootCA and a CSR.
|
||||
func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string) ([]byte, error) {
|
||||
if !rca.CanSign() {
|
||||
return nil, ErrNoValidSigner
|
||||
}
|
||||
|
||||
signRequest := PrepareCSR(csrBytes, cn, ou, org)
|
||||
|
||||
cert, err := rca.Signer.Sign(signRequest)
|
||||
signer, err := rca.Signer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := signer.Sign(signRequest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to sign node certificate")
|
||||
}
|
||||
|
@ -338,20 +359,12 @@ func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string)
|
|||
|
||||
// CrossSignCACertificate takes a CA root certificate and generates an intermediate CA from it signed with the current root signer
|
||||
func (rca *RootCA) CrossSignCACertificate(otherCAPEM []byte) ([]byte, error) {
|
||||
if !rca.CanSign() {
|
||||
return nil, ErrNoValidSigner
|
||||
signer, err := rca.Signer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create a new cert with exactly the same parameters, including the public key and exact NotBefore and NotAfter
|
||||
rootCert, err := helpers.ParseCertificatePEM(rca.Cert)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse old CA certificate")
|
||||
}
|
||||
rootSigner, err := helpers.ParsePrivateKeyPEM(rca.Signer.Key)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse old CA key")
|
||||
}
|
||||
|
||||
newCert, err := helpers.ParseCertificatePEM(otherCAPEM)
|
||||
if err != nil {
|
||||
return nil, errors.New("could not parse new CA certificate")
|
||||
|
@ -361,7 +374,7 @@ func (rca *RootCA) CrossSignCACertificate(otherCAPEM []byte) ([]byte, error) {
|
|||
return nil, errors.New("certificate not a CA")
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(cryptorand.Reader, newCert, rootCert, newCert.PublicKey, rootSigner)
|
||||
derBytes, err := x509.CreateCertificate(cryptorand.Reader, newCert, signer.parsedCert, newCert.PublicKey, signer.cryptoSigner)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not cross-sign new CA certificate using old CA material")
|
||||
}
|
||||
|
@ -372,28 +385,34 @@ func (rca *RootCA) CrossSignCACertificate(otherCAPEM []byte) ([]byte, error) {
|
|||
}), nil
|
||||
}
|
||||
|
||||
func validateSignatureAlgorithm(cert *x509.Certificate) error {
|
||||
switch cert.SignatureAlgorithm {
|
||||
case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unsupported signature algorithm: %s", cert.SignatureAlgorithm.String())
|
||||
}
|
||||
}
|
||||
|
||||
// NewRootCA creates a new RootCA object from unparsed PEM cert bundle and key byte
|
||||
// slices. key may be nil, and in this case NewRootCA will return a RootCA
|
||||
// without a signer.
|
||||
func NewRootCA(certBytes, keyBytes []byte, certExpiry time.Duration, intermediates []byte) (RootCA, error) {
|
||||
func NewRootCA(rootCertBytes, signCertBytes, signKeyBytes []byte, certExpiry time.Duration, intermediates []byte) (RootCA, error) {
|
||||
// Parse all the certificates in the cert bundle
|
||||
parsedCerts, err := helpers.ParseCertificatesPEM(certBytes)
|
||||
parsedCerts, err := helpers.ParseCertificatesPEM(rootCertBytes)
|
||||
if err != nil {
|
||||
return RootCA{}, err
|
||||
return RootCA{}, errors.Wrap(err, "invalid root certificates")
|
||||
}
|
||||
// Check to see if we have at least one valid cert
|
||||
if len(parsedCerts) < 1 {
|
||||
return RootCA{}, errors.New("no valid Root CA certificates found")
|
||||
return RootCA{}, errors.New("no valid root CA certificates found")
|
||||
}
|
||||
|
||||
// Create a Pool with all of the certificates found
|
||||
pool := x509.NewCertPool()
|
||||
for _, cert := range parsedCerts {
|
||||
switch cert.SignatureAlgorithm {
|
||||
case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
|
||||
break
|
||||
default:
|
||||
return RootCA{}, fmt.Errorf("unsupported signature algorithm: %s", cert.SignatureAlgorithm.String())
|
||||
if err := validateSignatureAlgorithm(cert); err != nil {
|
||||
return RootCA{}, err
|
||||
}
|
||||
// Check to see if all of the certificates are valid, self-signed root CA certs
|
||||
selfpool := x509.NewCertPool()
|
||||
|
@ -405,81 +424,42 @@ func NewRootCA(certBytes, keyBytes []byte, certExpiry time.Duration, intermediat
|
|||
}
|
||||
|
||||
// Calculate the digest for our Root CA bundle
|
||||
digest := digest.FromBytes(certBytes)
|
||||
digest := digest.FromBytes(rootCertBytes)
|
||||
|
||||
// We do not yet support arbitrary chains of intermediates (e.g. the case of an offline root, and the swarm CA is an
|
||||
// intermediate CA). We currently only intermediates for which the first intermediate is cross-signed version of the
|
||||
// CA signing cert (the first cert of the root certs) for the purposes of root rotation. If we wanted to support
|
||||
// offline roots, we'd have to separate the CA signing cert from the self-signed root certs, but this intermediate
|
||||
// validation logic should remain the same. Either the first intermediate would BE the intermediate CA we sign with
|
||||
// (in which case it'd have the same subject and public key), or it would be a cross-signed intermediate with the
|
||||
// same subject and public key as our signing cert (which could be either an intermediate cert or a self-signed root
|
||||
// cert).
|
||||
// The intermediates supplied must be able to chain up to the root certificates, so that when they are appended to
|
||||
// a leaf certificate, the leaf certificate can be validated through the intermediates to the root certificates.
|
||||
var intermediatePool *x509.CertPool
|
||||
var parsedIntermediates []*x509.Certificate
|
||||
if len(intermediates) > 0 {
|
||||
parsedIntermediates, err := ValidateCertChain(pool, intermediates, false)
|
||||
parsedIntermediates, err = ValidateCertChain(pool, intermediates, false)
|
||||
if err != nil {
|
||||
return RootCA{}, errors.Wrap(err, "invalid intermediate chain")
|
||||
}
|
||||
if !bytes.Equal(parsedIntermediates[0].RawSubject, parsedCerts[0].RawSubject) ||
|
||||
!bytes.Equal(parsedIntermediates[0].RawSubjectPublicKeyInfo, parsedCerts[0].RawSubjectPublicKeyInfo) {
|
||||
return RootCA{}, errors.New("invalid intermediate chain - the first intermediate must have the same subject and public key as the root")
|
||||
intermediatePool = x509.NewCertPool()
|
||||
for _, cert := range parsedIntermediates {
|
||||
intermediatePool.AddCert(cert)
|
||||
}
|
||||
}
|
||||
|
||||
if len(keyBytes) == 0 {
|
||||
// This RootCA does not have a valid signer
|
||||
return RootCA{Cert: certBytes, Intermediates: intermediates, Digest: digest, Pool: pool}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
passphraseStr string
|
||||
passphrase, passphrasePrev []byte
|
||||
priv crypto.Signer
|
||||
)
|
||||
|
||||
// Attempt two distinct passphrases, so we can do a hitless passphrase rotation
|
||||
if passphraseStr = os.Getenv(PassphraseENVVar); passphraseStr != "" {
|
||||
passphrase = []byte(passphraseStr)
|
||||
}
|
||||
|
||||
if p := os.Getenv(PassphraseENVVarPrev); p != "" {
|
||||
passphrasePrev = []byte(p)
|
||||
}
|
||||
|
||||
// Attempt to decrypt the current private-key with the passphrases provided
|
||||
priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrase)
|
||||
if err != nil {
|
||||
priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrasePrev)
|
||||
if err != nil {
|
||||
return RootCA{}, errors.Wrap(err, "malformed private key")
|
||||
}
|
||||
}
|
||||
|
||||
// We will always use the first certificate inside of the root bundle as the active one
|
||||
if err := ensureCertKeyMatch(parsedCerts[0], priv.Public()); err != nil {
|
||||
return RootCA{}, err
|
||||
}
|
||||
|
||||
signer, err := local.NewSigner(priv, parsedCerts[0], cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry))
|
||||
if err != nil {
|
||||
return RootCA{}, err
|
||||
}
|
||||
|
||||
// If the key was loaded from disk unencrypted, but there is a passphrase set,
|
||||
// ensure it is encrypted, so it doesn't hit raft in plain-text
|
||||
keyBlock, _ := pem.Decode(keyBytes)
|
||||
if keyBlock == nil {
|
||||
// This RootCA does not have a valid signer.
|
||||
return RootCA{Cert: certBytes, Digest: digest, Pool: pool}, nil
|
||||
}
|
||||
if passphraseStr != "" && !x509.IsEncryptedPEMBlock(keyBlock) {
|
||||
keyBytes, err = EncryptECPrivateKey(keyBytes, passphraseStr)
|
||||
var localSigner *LocalSigner
|
||||
if len(signKeyBytes) != 0 || len(signCertBytes) != 0 {
|
||||
localSigner, err = newLocalSigner(signKeyBytes, signCertBytes, certExpiry, pool, intermediatePool)
|
||||
if err != nil {
|
||||
return RootCA{}, err
|
||||
}
|
||||
|
||||
// If a signer is provided and there are intermediates, then either the first intermediate would be the signer CA
|
||||
// certificate (in which case it'd have the same subject and public key), or it would be a cross-signed
|
||||
// intermediate with the same subject and public key as our signing CA certificate (which could be either an
|
||||
// intermediate cert or a self-signed root cert).
|
||||
if len(parsedIntermediates) > 0 && (!bytes.Equal(parsedIntermediates[0].RawSubject, localSigner.parsedCert.RawSubject) ||
|
||||
!bytes.Equal(parsedIntermediates[0].RawSubjectPublicKeyInfo, localSigner.parsedCert.RawSubjectPublicKeyInfo)) {
|
||||
return RootCA{}, errors.New(
|
||||
"invalid intermediate chain - the first intermediate must have the same subject and public key as the signing cert")
|
||||
}
|
||||
}
|
||||
|
||||
return RootCA{Signer: &LocalSigner{Signer: signer, Key: keyBytes}, Intermediates: intermediates, Digest: digest, Cert: certBytes, Pool: pool}, nil
|
||||
return RootCA{signer: localSigner, Intermediates: intermediates, Digest: digest, Certs: rootCertBytes, Pool: pool}, nil
|
||||
}
|
||||
|
||||
// ValidateCertChain checks checks that the certificates provided chain up to the root pool provided. In addition
|
||||
|
@ -579,6 +559,78 @@ func ValidateCertChain(rootPool *x509.CertPool, certs []byte, allowExpired bool)
|
|||
return parsedCerts, nil
|
||||
}
|
||||
|
||||
// newLocalSigner validates the signing cert and signing key to create a local signer, which accepts a crypto signer and a cert
|
||||
func newLocalSigner(keyBytes, certBytes []byte, certExpiry time.Duration, rootPool, intermediatePool *x509.CertPool) (*LocalSigner, error) {
|
||||
if len(keyBytes) == 0 || len(certBytes) == 0 {
|
||||
return nil, errors.New("must provide both a signing key and a signing cert, or neither")
|
||||
}
|
||||
|
||||
parsedCerts, err := helpers.ParseCertificatesPEM(certBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid signing CA cert")
|
||||
}
|
||||
if len(parsedCerts) == 0 {
|
||||
return nil, errors.New("no valid signing CA certificates found")
|
||||
}
|
||||
if err := validateSignatureAlgorithm(parsedCerts[0]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: rootPool,
|
||||
Intermediates: intermediatePool,
|
||||
}
|
||||
if _, err := parsedCerts[0].Verify(opts); err != nil {
|
||||
return nil, errors.Wrap(err, "error while validating signing CA certificate against roots and intermediates")
|
||||
}
|
||||
|
||||
var (
|
||||
passphraseStr string
|
||||
passphrase, passphrasePrev []byte
|
||||
priv crypto.Signer
|
||||
)
|
||||
|
||||
// Attempt two distinct passphrases, so we can do a hitless passphrase rotation
|
||||
if passphraseStr = os.Getenv(PassphraseENVVar); passphraseStr != "" {
|
||||
passphrase = []byte(passphraseStr)
|
||||
}
|
||||
|
||||
if p := os.Getenv(PassphraseENVVarPrev); p != "" {
|
||||
passphrasePrev = []byte(p)
|
||||
}
|
||||
|
||||
// Attempt to decrypt the current private-key with the passphrases provided
|
||||
priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrase)
|
||||
if err != nil {
|
||||
priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrasePrev)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "malformed private key")
|
||||
}
|
||||
}
|
||||
|
||||
// We will always use the first certificate inside of the root bundle as the active one
|
||||
if err := ensureCertKeyMatch(parsedCerts[0], priv.Public()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signer, err := local.NewSigner(priv, parsedCerts[0], cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the key was loaded from disk unencrypted, but there is a passphrase set,
|
||||
// ensure it is encrypted, so it doesn't hit raft in plain-text
|
||||
// we don't have to check for nil, because if we couldn't pem-decode the bytes, then parsing above would have failed
|
||||
keyBlock, _ := pem.Decode(keyBytes)
|
||||
if passphraseStr != "" && !x509.IsEncryptedPEMBlock(keyBlock) {
|
||||
keyBytes, err = EncryptECPrivateKey(keyBytes, passphraseStr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to encrypt signing CA key material")
|
||||
}
|
||||
}
|
||||
|
||||
return &LocalSigner{Cert: certBytes, Key: keyBytes, Signer: signer, parsedCert: parsedCerts[0], cryptoSigner: priv}, nil
|
||||
}
|
||||
|
||||
func ensureCertKeyMatch(cert *x509.Certificate, key crypto.PublicKey) error {
|
||||
switch certPub := cert.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
|
@ -620,6 +672,7 @@ func GetLocalRootCA(paths CertPaths) (RootCA, error) {
|
|||
|
||||
return RootCA{}, err
|
||||
}
|
||||
signingCert := cert
|
||||
|
||||
key, err := ioutil.ReadFile(paths.Key)
|
||||
if err != nil {
|
||||
|
@ -629,9 +682,10 @@ func GetLocalRootCA(paths CertPaths) (RootCA, error) {
|
|||
// There may not be a local key. It's okay to pass in a nil
|
||||
// key. We'll get a root CA without a signer.
|
||||
key = nil
|
||||
signingCert = nil
|
||||
}
|
||||
|
||||
return NewRootCA(cert, key, DefaultNodeCertExpiration, nil)
|
||||
return NewRootCA(cert, signingCert, key, DefaultNodeCertExpiration, nil)
|
||||
}
|
||||
|
||||
func getGRPCConnection(creds credentials.TransportCredentials, connBroker *connectionbroker.Broker, forceRemote bool) (*connectionbroker.Conn, error) {
|
||||
|
@ -686,12 +740,12 @@ func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbro
|
|||
|
||||
// NewRootCA will validate that the certificates are otherwise valid and create a RootCA object.
|
||||
// Since there is no key, the certificate expiry does not matter and will not be used.
|
||||
return NewRootCA(response.Certificate, nil, DefaultNodeCertExpiration, nil)
|
||||
return NewRootCA(response.Certificate, nil, nil, DefaultNodeCertExpiration, nil)
|
||||
}
|
||||
|
||||
// CreateRootCA creates a Certificate authority for a new Swarm Cluster, potentially
|
||||
// overwriting any existing CAs.
|
||||
func CreateRootCA(rootCN string, paths CertPaths) (RootCA, error) {
|
||||
func CreateRootCA(rootCN string) (RootCA, error) {
|
||||
// Create a simple CSR for the CA using the default CA validator and policy
|
||||
req := cfcsr.CertificateRequest{
|
||||
CN: rootCN,
|
||||
|
@ -705,16 +759,11 @@ func CreateRootCA(rootCN string, paths CertPaths) (RootCA, error) {
|
|||
return RootCA{}, err
|
||||
}
|
||||
|
||||
rootCA, err := NewRootCA(cert, key, DefaultNodeCertExpiration, nil)
|
||||
rootCA, err := NewRootCA(cert, cert, key, DefaultNodeCertExpiration, nil)
|
||||
if err != nil {
|
||||
return RootCA{}, err
|
||||
}
|
||||
|
||||
// save the cert to disk
|
||||
if err := saveRootCA(rootCA, paths); err != nil {
|
||||
return RootCA{}, err
|
||||
}
|
||||
|
||||
return rootCA, nil
|
||||
}
|
||||
|
||||
|
@ -818,7 +867,8 @@ func readCertValidity(kr KeyReader) (time.Time, time.Time, error) {
|
|||
|
||||
}
|
||||
|
||||
func saveRootCA(rootCA RootCA, paths CertPaths) error {
|
||||
// SaveRootCA saves a RootCA object to disk
|
||||
func SaveRootCA(rootCA RootCA, paths CertPaths) error {
|
||||
// Make sure the necessary dirs exist and they are writable
|
||||
err := os.MkdirAll(filepath.Dir(paths.Cert), 0755)
|
||||
if err != nil {
|
||||
|
@ -826,7 +876,7 @@ func saveRootCA(rootCA RootCA, paths CertPaths) error {
|
|||
}
|
||||
|
||||
// If the root certificate got returned successfully, save the rootCA to disk.
|
||||
return ioutils.AtomicWriteFile(paths.Cert, rootCA.Cert, 0644)
|
||||
return ioutils.AtomicWriteFile(paths.Cert, rootCA.Certs, 0644)
|
||||
}
|
||||
|
||||
// GenerateNewCSR returns a newly generated key and CSR signed with said key
|
||||
|
|
73
vendor/github.com/docker/swarmkit/ca/config.go
generated
vendored
73
vendor/github.com/docker/swarmkit/ca/config.go
generated
vendored
|
@ -71,6 +71,8 @@ type SecurityConfig struct {
|
|||
externalCA *ExternalCA
|
||||
keyReadWriter *KeyReadWriter
|
||||
|
||||
externalCAClientRootPool *x509.CertPool
|
||||
|
||||
ServerTLSCreds *MutableTLSCreds
|
||||
ClientTLSCreds *MutableTLSCreds
|
||||
}
|
||||
|
@ -95,11 +97,12 @@ func NewSecurityConfig(rootCA *RootCA, krw *KeyReadWriter, clientTLSCreds, serve
|
|||
}
|
||||
|
||||
return &SecurityConfig{
|
||||
rootCA: rootCA,
|
||||
keyReadWriter: krw,
|
||||
externalCA: NewExternalCA(rootCA, externalCATLSConfig),
|
||||
ClientTLSCreds: clientTLSCreds,
|
||||
ServerTLSCreds: serverTLSCreds,
|
||||
rootCA: rootCA,
|
||||
keyReadWriter: krw,
|
||||
externalCA: NewExternalCA(rootCA, externalCATLSConfig),
|
||||
ClientTLSCreds: clientTLSCreds,
|
||||
ServerTLSCreds: serverTLSCreds,
|
||||
externalCAClientRootPool: rootCA.Pool,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,18 +129,13 @@ func (s *SecurityConfig) KeyReader() KeyReader {
|
|||
return s.keyReadWriter
|
||||
}
|
||||
|
||||
// UpdateRootCA replaces the root CA with a new root CA based on the specified
|
||||
// certificate, key, and the number of hours the certificates issue should last.
|
||||
func (s *SecurityConfig) UpdateRootCA(cert, key []byte, certExpiry time.Duration) error {
|
||||
// UpdateRootCA replaces the root CA with a new root CA
|
||||
func (s *SecurityConfig) UpdateRootCA(rootCA *RootCA, externalCARootPool *x509.CertPool) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
rootCA, err := NewRootCA(cert, key, certExpiry, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.rootCA = &rootCA
|
||||
s.rootCA = rootCA
|
||||
s.externalCAClientRootPool = externalCARootPool
|
||||
clientTLSConfig := s.ClientTLSCreds.Config()
|
||||
return s.updateTLSCredentials(clientTLSConfig.Certificates)
|
||||
}
|
||||
|
@ -163,7 +161,7 @@ func (s *SecurityConfig) updateTLSCredentials(certificates []tls.Certificate) er
|
|||
// config using a copy without a serverName specified.
|
||||
s.externalCA.UpdateTLSConfig(&tls.Config{
|
||||
Certificates: certificates,
|
||||
RootCAs: s.rootCA.Pool,
|
||||
RootCAs: s.externalCAClientRootPool,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
})
|
||||
|
||||
|
@ -278,7 +276,7 @@ func DownloadRootCA(ctx context.Context, paths CertPaths, token string, connBrok
|
|||
}
|
||||
|
||||
// Save root CA certificate to disk
|
||||
if err = saveRootCA(rootCA, paths); err != nil {
|
||||
if err = SaveRootCA(rootCA, paths); err != nil {
|
||||
return RootCA{}, err
|
||||
}
|
||||
|
||||
|
@ -359,31 +357,14 @@ type CertificateRequestConfig struct {
|
|||
func (rootCA RootCA) CreateSecurityConfig(ctx context.Context, krw *KeyReadWriter, config CertificateRequestConfig) (*SecurityConfig, error) {
|
||||
ctx = log.WithModule(ctx, "tls")
|
||||
|
||||
var (
|
||||
tlsKeyPair *tls.Certificate
|
||||
err error
|
||||
)
|
||||
// Create a new random ID for this certificate
|
||||
cn := identity.NewID()
|
||||
org := identity.NewID()
|
||||
|
||||
if rootCA.CanSign() {
|
||||
// Create a new random ID for this certificate
|
||||
cn := identity.NewID()
|
||||
org := identity.NewID()
|
||||
|
||||
proposedRole := ManagerRole
|
||||
tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(krw, cn, proposedRole, org)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"node.id": cn,
|
||||
"node.role": proposedRole,
|
||||
}).WithError(err).Errorf("failed to issue and save new certificate")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"node.id": cn,
|
||||
"node.role": proposedRole,
|
||||
}).Debug("issued new TLS certificate")
|
||||
} else {
|
||||
proposedRole := ManagerRole
|
||||
tlsKeyPair, err := rootCA.IssueAndSaveNewCertificates(krw, cn, proposedRole, org)
|
||||
switch errors.Cause(err) {
|
||||
case ErrNoValidSigner:
|
||||
// Request certificate issuance from a remote CA.
|
||||
// Last argument is nil because at this point we don't have any valid TLS creds
|
||||
tlsKeyPair, err = rootCA.RequestAndSaveNewCertificates(ctx, krw, config)
|
||||
|
@ -391,7 +372,19 @@ func (rootCA RootCA) CreateSecurityConfig(ctx context.Context, krw *KeyReadWrite
|
|||
log.G(ctx).WithError(err).Error("failed to request save new certificate")
|
||||
return nil, err
|
||||
}
|
||||
case nil:
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"node.id": cn,
|
||||
"node.role": proposedRole,
|
||||
}).Debug("issued new TLS certificate")
|
||||
default:
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"node.id": cn,
|
||||
"node.role": proposedRole,
|
||||
}).WithError(err).Errorf("failed to issue and save new certificate")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the Server TLS Credentials for this node. These will not be used by workers.
|
||||
serverTLSCreds, err := rootCA.NewServerTLSCredentials(tlsKeyPair)
|
||||
if err != nil {
|
||||
|
|
19
vendor/github.com/docker/swarmkit/ca/external.go
generated
vendored
19
vendor/github.com/docker/swarmkit/ca/external.go
generated
vendored
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/cloudflare/cfssl/api"
|
||||
"github.com/cloudflare/cfssl/config"
|
||||
"github.com/cloudflare/cfssl/csr"
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/cloudflare/cfssl/signer"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -107,20 +106,14 @@ func (eca *ExternalCA) Sign(ctx context.Context, req signer.SignRequest) (cert [
|
|||
// CrossSignRootCA takes a RootCA object, generates a CA CSR, sends a signing request with the CA CSR to the external
|
||||
// CFSSL API server in order to obtain a cross-signed root
|
||||
func (eca *ExternalCA) CrossSignRootCA(ctx context.Context, rca RootCA) ([]byte, error) {
|
||||
if !rca.CanSign() {
|
||||
return nil, errors.Wrap(ErrNoValidSigner, "cannot generate CSR for a cross-signed root")
|
||||
}
|
||||
rootCert, err := helpers.ParseCertificatePEM(rca.Cert)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse CA certificate")
|
||||
}
|
||||
rootSigner, err := helpers.ParsePrivateKeyPEM(rca.Signer.Key)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse old CA key")
|
||||
}
|
||||
// ExtractCertificateRequest generates a new key request, and we want to continue to use the old
|
||||
// key. However, ExtractCertificateRequest will also convert the pkix.Name to csr.Name, which we
|
||||
// need in order to generate a signing request
|
||||
rcaSigner, err := rca.Signer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootCert := rcaSigner.parsedCert
|
||||
cfCSRObj := csr.ExtractCertificateRequest(rootCert)
|
||||
|
||||
der, err := x509.CreateCertificateRequest(cryptorand.Reader, &x509.CertificateRequest{
|
||||
|
@ -132,7 +125,7 @@ func (eca *ExternalCA) CrossSignRootCA(ctx context.Context, rca RootCA) ([]byte,
|
|||
DNSNames: rootCert.DNSNames,
|
||||
EmailAddresses: rootCert.EmailAddresses,
|
||||
IPAddresses: rootCert.IPAddresses,
|
||||
}, rootSigner)
|
||||
}, rcaSigner.cryptoSigner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
150
vendor/github.com/docker/swarmkit/ca/server.go
generated
vendored
150
vendor/github.com/docker/swarmkit/ca/server.go
generated
vendored
|
@ -1,12 +1,15 @@
|
|||
package ca
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"crypto/x509"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/api/equality"
|
||||
"github.com/docker/swarmkit/identity"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
|
@ -22,6 +25,13 @@ const (
|
|||
defaultReconciliationRetryInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// APISecurityConfigUpdater knows how to update a SecurityConfig from an api.Cluster object
|
||||
type APISecurityConfigUpdater interface {
|
||||
UpdateRootCA(ctx context.Context, cluster *api.Cluster) error
|
||||
}
|
||||
|
||||
var _ APISecurityConfigUpdater = &Server{}
|
||||
|
||||
// Server is the CA and NodeCA API gRPC server.
|
||||
// TODO(aaronl): At some point we may want to have separate implementations of
|
||||
// CA, NodeCA, and other hypothetical future CA services. At the moment,
|
||||
|
@ -43,6 +53,14 @@ type Server struct {
|
|||
// started is a channel which gets closed once the server is running
|
||||
// and able to service RPCs.
|
||||
started chan struct{}
|
||||
|
||||
// these are cached values to ensure we only update the security config when
|
||||
// the cluster root CA and external CAs have changed - the cluster object
|
||||
// can change for other reasons, and it would not be necessary to update
|
||||
// the security config as a result
|
||||
lastSeenClusterRootCA *api.RootCA
|
||||
lastSeenExternalCAs []*api.ExternalCA
|
||||
secConfigMu sync.Mutex
|
||||
}
|
||||
|
||||
// DefaultCAConfig returns the default CA Config, with a default expiration.
|
||||
|
@ -109,9 +127,9 @@ func (s *Server) NodeCertificateStatus(ctx context.Context, request *api.NodeCer
|
|||
|
||||
var node *api.Node
|
||||
|
||||
event := state.EventUpdateNode{
|
||||
event := api.EventUpdateNode{
|
||||
Node: &api.Node{ID: request.NodeID},
|
||||
Checks: []state.NodeCheckFunc{state.NodeCheckID},
|
||||
Checks: []api.NodeCheckFunc{state.NodeCheckID},
|
||||
}
|
||||
|
||||
// Retrieve the current value of the certificate with this token, and create a watcher
|
||||
|
@ -158,7 +176,7 @@ func (s *Server) NodeCertificateStatus(ctx context.Context, request *api.NodeCer
|
|||
select {
|
||||
case event := <-updates:
|
||||
switch v := event.(type) {
|
||||
case state.EventUpdateNode:
|
||||
case api.EventUpdateNode:
|
||||
// We got an update on the certificate record. If the status is a final state,
|
||||
// return the certificate.
|
||||
if isFinalState(v.Node.Certificate.Status) {
|
||||
|
@ -361,7 +379,7 @@ func (s *Server) GetRootCACertificate(ctx context.Context, request *api.GetRootC
|
|||
})
|
||||
|
||||
return &api.GetRootCACertificateResponse{
|
||||
Certificate: s.securityConfig.RootCA().Cert,
|
||||
Certificate: s.securityConfig.RootCA().Certs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -396,8 +414,8 @@ func (s *Server) Run(ctx context.Context) error {
|
|||
nodes, err = store.FindNodes(readTx, store.All)
|
||||
return err
|
||||
},
|
||||
state.EventCreateNode{},
|
||||
state.EventUpdateNode{},
|
||||
api.EventCreateNode{},
|
||||
api.EventUpdateNode{},
|
||||
)
|
||||
|
||||
// Do this after updateCluster has been called, so isRunning never
|
||||
|
@ -441,9 +459,9 @@ func (s *Server) Run(ctx context.Context) error {
|
|||
select {
|
||||
case event := <-updates:
|
||||
switch v := event.(type) {
|
||||
case state.EventCreateNode:
|
||||
case api.EventCreateNode:
|
||||
s.evaluateAndSignNodeCert(ctx, v.Node)
|
||||
case state.EventUpdateNode:
|
||||
case api.EventUpdateNode:
|
||||
// If this certificate is already at a final state
|
||||
// no need to evaluate and sign it.
|
||||
if !isFinalState(v.Node.Certificate.Status) {
|
||||
|
@ -517,65 +535,111 @@ func (s *Server) isRunning() bool {
|
|||
// UpdateRootCA is called when there are cluster changes, and it ensures that the local RootCA is
|
||||
// always aware of changes in clusterExpiry and the Root CA key material - this can be called by
|
||||
// anything to update the root CA material
|
||||
func (s *Server) UpdateRootCA(ctx context.Context, cluster *api.Cluster) {
|
||||
func (s *Server) UpdateRootCA(ctx context.Context, cluster *api.Cluster) error {
|
||||
s.mu.Lock()
|
||||
s.joinTokens = cluster.RootCA.JoinTokens.Copy()
|
||||
s.mu.Unlock()
|
||||
var err error
|
||||
|
||||
// If the cluster has a RootCA, let's try to update our SecurityConfig to reflect the latest values
|
||||
s.secConfigMu.Lock()
|
||||
defer s.secConfigMu.Unlock()
|
||||
var err error
|
||||
rCA := cluster.RootCA
|
||||
if len(rCA.CACert) != 0 && len(rCA.CAKey) != 0 {
|
||||
rootCAChanged := len(rCA.CACert) != 0 && !equality.RootCAEqualStable(s.lastSeenClusterRootCA, &cluster.RootCA)
|
||||
externalCAChanged := !equality.ExternalCAsEqualStable(s.lastSeenExternalCAs, cluster.Spec.CAConfig.ExternalCAs)
|
||||
logger := log.G(ctx).WithFields(logrus.Fields{
|
||||
"cluster.id": cluster.ID,
|
||||
"method": "(*Server).UpdateRootCA",
|
||||
})
|
||||
|
||||
if rootCAChanged {
|
||||
logger.Debug("Updating security config due to change in cluster Root CA")
|
||||
expiry := DefaultNodeCertExpiration
|
||||
if cluster.Spec.CAConfig.NodeCertExpiry != nil {
|
||||
// NodeCertExpiry exists, let's try to parse the duration out of it
|
||||
clusterExpiry, err := gogotypes.DurationFromProto(cluster.Spec.CAConfig.NodeCertExpiry)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"cluster.id": cluster.ID,
|
||||
"method": "(*Server).updateCluster",
|
||||
}).WithError(err).Warn("failed to parse certificate expiration, using default")
|
||||
logger.WithError(err).Warn("failed to parse certificate expiration, using default")
|
||||
} else {
|
||||
// We were able to successfully parse the expiration out of the cluster.
|
||||
expiry = clusterExpiry
|
||||
}
|
||||
} else {
|
||||
// NodeCertExpiry seems to be nil
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"cluster.id": cluster.ID,
|
||||
"method": "(*Server).updateCluster",
|
||||
}).WithError(err).Warn("failed to parse certificate expiration, using default")
|
||||
|
||||
logger.WithError(err).Warn("failed to parse certificate expiration, using default")
|
||||
}
|
||||
// Attempt to update our local RootCA with the new parameters
|
||||
err = s.securityConfig.UpdateRootCA(rCA.CACert, rCA.CAKey, expiry)
|
||||
var intermediates []byte
|
||||
signingCert := rCA.CACert
|
||||
signingKey := rCA.CAKey
|
||||
if rCA.RootRotation != nil {
|
||||
signingCert = rCA.RootRotation.CrossSignedCACert
|
||||
signingKey = rCA.RootRotation.CAKey
|
||||
intermediates = rCA.RootRotation.CrossSignedCACert
|
||||
}
|
||||
if signingKey == nil {
|
||||
signingCert = nil
|
||||
}
|
||||
|
||||
updatedRootCA, err := NewRootCA(rCA.CACert, signingCert, signingKey, expiry, intermediates)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"cluster.id": cluster.ID,
|
||||
"method": "(*Server).updateCluster",
|
||||
}).WithError(err).Error("updating Root CA failed")
|
||||
} else {
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"cluster.id": cluster.ID,
|
||||
"method": "(*Server).updateCluster",
|
||||
}).Debugf("Root CA updated successfully")
|
||||
return errors.Wrap(err, "invalid Root CA object in cluster")
|
||||
}
|
||||
|
||||
externalCARootPool := updatedRootCA.Pool
|
||||
if rCA.RootRotation != nil {
|
||||
// the external CA has to trust the new CA cert
|
||||
externalCARootPool = x509.NewCertPool()
|
||||
externalCARootPool.AppendCertsFromPEM(rCA.CACert)
|
||||
externalCARootPool.AppendCertsFromPEM(rCA.RootRotation.CACert)
|
||||
}
|
||||
|
||||
// Attempt to update our local RootCA with the new parameters
|
||||
if err := s.securityConfig.UpdateRootCA(&updatedRootCA, externalCARootPool); err != nil {
|
||||
return errors.Wrap(err, "updating Root CA failed")
|
||||
}
|
||||
// only update the server cache if we've successfully updated the root CA
|
||||
logger.Debug("Root CA updated successfully")
|
||||
s.lastSeenClusterRootCA = cluster.RootCA.Copy()
|
||||
}
|
||||
|
||||
// Update our security config with the list of External CA URLs
|
||||
// from the new cluster state.
|
||||
|
||||
// TODO(aaronl): In the future, this will be abstracted with an
|
||||
// ExternalCA interface that has different implementations for
|
||||
// different CA types. At the moment, only CFSSL is supported.
|
||||
var cfsslURLs []string
|
||||
for _, ca := range cluster.Spec.CAConfig.ExternalCAs {
|
||||
if ca.Protocol == api.ExternalCA_CAProtocolCFSSL {
|
||||
cfsslURLs = append(cfsslURLs, ca.URL)
|
||||
// we want to update if the external CA changed, or if the root CA changed because the root CA could affect what
|
||||
// certificate for external CAs we want to filter by
|
||||
if rootCAChanged || externalCAChanged {
|
||||
logger.Debug("Updating security config due to change in cluster Root CA or cluster spec")
|
||||
wantedExternalCACert := rCA.CACert // we want to only add external CA URLs that use this cert
|
||||
if rCA.RootRotation != nil {
|
||||
// we're rotating to a new root, so we only want external CAs with the new root cert
|
||||
wantedExternalCACert = rCA.RootRotation.CACert
|
||||
}
|
||||
}
|
||||
// Update our security config with the list of External CA URLs
|
||||
// from the new cluster state.
|
||||
|
||||
s.securityConfig.externalCA.UpdateURLs(cfsslURLs...)
|
||||
// TODO(aaronl): In the future, this will be abstracted with an
|
||||
// ExternalCA interface that has different implementations for
|
||||
// different CA types. At the moment, only CFSSL is supported.
|
||||
var cfsslURLs []string
|
||||
for i, extCA := range cluster.Spec.CAConfig.ExternalCAs {
|
||||
// We want to support old external CA specifications which did not have a CA cert. If there is no cert specified,
|
||||
// we assume it's the old cert
|
||||
certForExtCA := extCA.CACert
|
||||
if len(certForExtCA) == 0 {
|
||||
certForExtCA = rCA.CACert
|
||||
}
|
||||
if extCA.Protocol != api.ExternalCA_CAProtocolCFSSL {
|
||||
logger.Debugf("skipping external CA %d (url: %s) due to unknown protocol type", i, extCA.URL)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(certForExtCA, wantedExternalCACert) {
|
||||
logger.Debugf("skipping external CA %d (url: %s) because it has the wrong CA cert", i, extCA.URL)
|
||||
continue
|
||||
}
|
||||
cfsslURLs = append(cfsslURLs, extCA.URL)
|
||||
}
|
||||
|
||||
s.securityConfig.externalCA.UpdateURLs(cfsslURLs...)
|
||||
s.lastSeenExternalCAs = cluster.Spec.CAConfig.Copy().ExternalCAs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// evaluateAndSignNodeCert implements the logic of which certificates to sign
|
||||
|
|
23
vendor/github.com/docker/swarmkit/manager/allocator/allocator.go
generated
vendored
23
vendor/github.com/docker/swarmkit/manager/allocator/allocator.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/go-events"
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -99,17 +100,17 @@ func (a *Allocator) Run(ctx context.Context) error {
|
|||
|
||||
var actors []func() error
|
||||
watch, watchCancel := state.Watch(a.store.WatchQueue(),
|
||||
state.EventCreateNetwork{},
|
||||
state.EventDeleteNetwork{},
|
||||
state.EventCreateService{},
|
||||
state.EventUpdateService{},
|
||||
state.EventDeleteService{},
|
||||
state.EventCreateTask{},
|
||||
state.EventUpdateTask{},
|
||||
state.EventDeleteTask{},
|
||||
state.EventCreateNode{},
|
||||
state.EventUpdateNode{},
|
||||
state.EventDeleteNode{},
|
||||
api.EventCreateNetwork{},
|
||||
api.EventDeleteNetwork{},
|
||||
api.EventCreateService{},
|
||||
api.EventUpdateService{},
|
||||
api.EventDeleteService{},
|
||||
api.EventCreateTask{},
|
||||
api.EventUpdateTask{},
|
||||
api.EventDeleteTask{},
|
||||
api.EventCreateNode{},
|
||||
api.EventUpdateNode{},
|
||||
api.EventDeleteNode{},
|
||||
state.EventCommit{},
|
||||
)
|
||||
|
||||
|
|
91
vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
91
vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
|
@ -258,7 +258,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
nc := a.netCtx
|
||||
|
||||
switch v := ev.(type) {
|
||||
case state.EventCreateNetwork:
|
||||
case api.EventCreateNetwork:
|
||||
n := v.Network.Copy()
|
||||
if nc.nwkAllocator.IsAllocated(n) {
|
||||
break
|
||||
|
@ -288,7 +288,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
log.G(ctx).WithError(err).Error(err)
|
||||
}
|
||||
}
|
||||
case state.EventDeleteNetwork:
|
||||
case api.EventDeleteNetwork:
|
||||
n := v.Network.Copy()
|
||||
|
||||
if IsIngressNetwork(n) && nc.ingressNetwork.ID == n.ID {
|
||||
|
@ -307,8 +307,15 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
}
|
||||
|
||||
delete(nc.unallocatedNetworks, n.ID)
|
||||
case state.EventCreateService:
|
||||
s := v.Service.Copy()
|
||||
case api.EventCreateService:
|
||||
var s *api.Service
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
s = store.GetService(tx, v.Service.ID)
|
||||
})
|
||||
|
||||
if s == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
break
|
||||
|
@ -324,8 +331,19 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
}); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Failed to commit allocation for service %s", s.ID)
|
||||
}
|
||||
case state.EventUpdateService:
|
||||
s := v.Service.Copy()
|
||||
case api.EventUpdateService:
|
||||
// We may have already allocated this service. If a create or
|
||||
// update event is older than the current version in the store,
|
||||
// we run the risk of allocating the service a second time.
|
||||
// Only operate on the latest version of the service.
|
||||
var s *api.Service
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
s = store.GetService(tx, v.Service.ID)
|
||||
})
|
||||
|
||||
if s == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if nc.nwkAllocator.PortsAllocatedInHostPublishMode(s) {
|
||||
|
@ -347,7 +365,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
} else {
|
||||
delete(nc.unallocatedServices, s.ID)
|
||||
}
|
||||
case state.EventDeleteService:
|
||||
case api.EventDeleteService:
|
||||
s := v.Service.Copy()
|
||||
|
||||
if err := nc.nwkAllocator.ServiceDeallocate(s); err != nil {
|
||||
|
@ -357,9 +375,9 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
// Remove it from unallocatedServices just in case
|
||||
// it's still there.
|
||||
delete(nc.unallocatedServices, s.ID)
|
||||
case state.EventCreateNode, state.EventUpdateNode, state.EventDeleteNode:
|
||||
case api.EventCreateNode, api.EventUpdateNode, api.EventDeleteNode:
|
||||
a.doNodeAlloc(ctx, ev)
|
||||
case state.EventCreateTask, state.EventUpdateTask, state.EventDeleteTask:
|
||||
case api.EventCreateTask, api.EventUpdateTask, api.EventDeleteTask:
|
||||
a.doTaskAlloc(ctx, ev)
|
||||
case state.EventCommit:
|
||||
a.procTasksNetwork(ctx, false)
|
||||
|
@ -385,16 +403,28 @@ func (a *Allocator) doNodeAlloc(ctx context.Context, ev events.Event) {
|
|||
node *api.Node
|
||||
)
|
||||
|
||||
// We may have already allocated this node. If a create or update
|
||||
// event is older than the current version in the store, we run the
|
||||
// risk of allocating the node a second time. Only operate on the
|
||||
// latest version of the node.
|
||||
switch v := ev.(type) {
|
||||
case state.EventCreateNode:
|
||||
node = v.Node.Copy()
|
||||
case state.EventUpdateNode:
|
||||
node = v.Node.Copy()
|
||||
case state.EventDeleteNode:
|
||||
case api.EventCreateNode:
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
node = store.GetNode(tx, v.Node.ID)
|
||||
})
|
||||
case api.EventUpdateNode:
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
node = store.GetNode(tx, v.Node.ID)
|
||||
})
|
||||
case api.EventDeleteNode:
|
||||
isDelete = true
|
||||
node = v.Node.Copy()
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
nc := a.netCtx
|
||||
|
||||
if isDelete {
|
||||
|
@ -530,7 +560,8 @@ func taskUpdateEndpoint(t *api.Task, endpoint *api.Endpoint) {
|
|||
t.Endpoint = endpoint.Copy()
|
||||
}
|
||||
|
||||
func isIngressNetworkNeeded(s *api.Service) bool {
|
||||
// IsIngressNetworkNeeded checks whether the service requires the routing-mesh
|
||||
func IsIngressNetworkNeeded(s *api.Service) bool {
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
|
@ -560,7 +591,7 @@ func (a *Allocator) taskCreateNetworkAttachments(t *api.Task, s *api.Service) {
|
|||
}
|
||||
|
||||
var networks []*api.NetworkAttachment
|
||||
if isIngressNetworkNeeded(s) {
|
||||
if IsIngressNetworkNeeded(s) {
|
||||
networks = append(networks, &api.NetworkAttachment{Network: a.netCtx.ingressNetwork})
|
||||
}
|
||||
|
||||
|
@ -594,16 +625,28 @@ func (a *Allocator) doTaskAlloc(ctx context.Context, ev events.Event) {
|
|||
t *api.Task
|
||||
)
|
||||
|
||||
// We may have already allocated this task. If a create or update
|
||||
// event is older than the current version in the store, we run the
|
||||
// risk of allocating the task a second time. Only operate on the
|
||||
// latest version of the task.
|
||||
switch v := ev.(type) {
|
||||
case state.EventCreateTask:
|
||||
t = v.Task.Copy()
|
||||
case state.EventUpdateTask:
|
||||
t = v.Task.Copy()
|
||||
case state.EventDeleteTask:
|
||||
case api.EventCreateTask:
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
t = store.GetTask(tx, v.Task.ID)
|
||||
})
|
||||
case api.EventUpdateTask:
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
t = store.GetTask(tx, v.Task.ID)
|
||||
})
|
||||
case api.EventDeleteTask:
|
||||
isDelete = true
|
||||
t = v.Task.Copy()
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
nc := a.netCtx
|
||||
|
||||
// If the task has stopped running then we should free the network
|
||||
|
@ -728,7 +771,7 @@ func (a *Allocator) allocateService(ctx context.Context, s *api.Service) error {
|
|||
// The service is trying to expose ports to the external
|
||||
// world. Automatically attach the service to the ingress
|
||||
// network only if it is not already done.
|
||||
if isIngressNetworkNeeded(s) {
|
||||
if IsIngressNetworkNeeded(s) {
|
||||
if nc.ingressNetwork == nil {
|
||||
return fmt.Errorf("ingress network is missing")
|
||||
}
|
||||
|
@ -761,7 +804,7 @@ func (a *Allocator) allocateService(ctx context.Context, s *api.Service) error {
|
|||
// If the service doesn't expose ports any more and if we have
|
||||
// any lingering virtual IP references for ingress network
|
||||
// clean them up here.
|
||||
if !isIngressNetworkNeeded(s) {
|
||||
if !IsIngressNetworkNeeded(s) {
|
||||
if s.Endpoint != nil {
|
||||
for i, vip := range s.Endpoint.VirtualIPs {
|
||||
if vip.NetworkID == nc.ingressNetwork.ID {
|
||||
|
@ -870,7 +913,7 @@ func (a *Allocator) allocateTask(ctx context.Context, t *api.Task) (err error) {
|
|||
}
|
||||
|
||||
if err = nc.nwkAllocator.AllocateTask(t); err != nil {
|
||||
err = errors.Wrapf(err, "failed during networktask allocation for task %s", t.ID)
|
||||
err = errors.Wrapf(err, "failed during network allocation for task %s", t.ID)
|
||||
return
|
||||
}
|
||||
if nc.nwkAllocator.IsTaskAllocated(t) {
|
||||
|
|
17
vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go
generated
vendored
17
vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/ca"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/encryption"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
gogotypes "github.com/gogo/protobuf/types"
|
||||
|
@ -104,16 +105,28 @@ func (s *Server) UpdateCluster(ctx context.Context, request *api.UpdateClusterRe
|
|||
return grpc.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID)
|
||||
|
||||
}
|
||||
// This ensures that we always have the latest security config, so our ca.SecurityConfig.RootCA and
|
||||
// ca.SecurityConfig.externalCA objects are up-to-date with the current api.Cluster.RootCA and
|
||||
// api.Cluster.Spec.ExternalCA objects, respectively. Note that if, during this update, the cluster gets
|
||||
// updated again with different CA info and the security config gets changed under us, that's still fine because
|
||||
// this cluster update would fail anyway due to its version being too low on write.
|
||||
if err := s.scu.UpdateRootCA(ctx, cluster); err != nil {
|
||||
log.G(ctx).WithField(
|
||||
"method", "(*controlapi.Server).UpdateCluster").WithError(err).Error("could not update security config")
|
||||
return grpc.Errorf(codes.Internal, "could not update security config")
|
||||
}
|
||||
rootCA := s.securityConfig.RootCA()
|
||||
|
||||
cluster.Meta.Version = *request.ClusterVersion
|
||||
cluster.Spec = *request.Spec.Copy()
|
||||
|
||||
expireBlacklistedCerts(cluster)
|
||||
|
||||
if request.Rotation.WorkerJoinToken {
|
||||
cluster.RootCA.JoinTokens.Worker = ca.GenerateJoinToken(s.rootCA)
|
||||
cluster.RootCA.JoinTokens.Worker = ca.GenerateJoinToken(rootCA)
|
||||
}
|
||||
if request.Rotation.ManagerJoinToken {
|
||||
cluster.RootCA.JoinTokens.Manager = ca.GenerateJoinToken(s.rootCA)
|
||||
cluster.RootCA.JoinTokens.Manager = ca.GenerateJoinToken(rootCA)
|
||||
}
|
||||
|
||||
var unlockKeys []*api.EncryptionKey
|
||||
|
|
2
vendor/github.com/docker/swarmkit/manager/controlapi/network.go
generated
vendored
2
vendor/github.com/docker/swarmkit/manager/controlapi/network.go
generated
vendored
|
@ -219,7 +219,7 @@ func (s *Server) removeIngressNetwork(id string) error {
|
|||
return grpc.Errorf(codes.Internal, "could not find services using network %s: %v", id, err)
|
||||
}
|
||||
for _, srv := range services {
|
||||
if doesServiceNeedIngress(srv) {
|
||||
if allocator.IsIngressNetworkNeeded(srv) {
|
||||
return grpc.Errorf(codes.FailedPrecondition, "ingress network cannot be removed because service %s depends on it", srv.ID)
|
||||
}
|
||||
}
|
||||
|
|
21
vendor/github.com/docker/swarmkit/manager/controlapi/server.go
generated
vendored
21
vendor/github.com/docker/swarmkit/manager/controlapi/server.go
generated
vendored
|
@ -16,18 +16,21 @@ var (
|
|||
|
||||
// Server is the Cluster API gRPC server.
|
||||
type Server struct {
|
||||
store *store.MemoryStore
|
||||
raft *raft.Node
|
||||
rootCA *ca.RootCA
|
||||
pg plugingetter.PluginGetter
|
||||
store *store.MemoryStore
|
||||
raft *raft.Node
|
||||
securityConfig *ca.SecurityConfig
|
||||
scu ca.APISecurityConfigUpdater
|
||||
pg plugingetter.PluginGetter
|
||||
}
|
||||
|
||||
// NewServer creates a Cluster API server.
|
||||
func NewServer(store *store.MemoryStore, raft *raft.Node, rootCA *ca.RootCA, pg plugingetter.PluginGetter) *Server {
|
||||
func NewServer(store *store.MemoryStore, raft *raft.Node, securityConfig *ca.SecurityConfig,
|
||||
scu ca.APISecurityConfigUpdater, pg plugingetter.PluginGetter) *Server {
|
||||
return &Server{
|
||||
store: store,
|
||||
raft: raft,
|
||||
rootCA: rootCA,
|
||||
pg: pg,
|
||||
store: store,
|
||||
raft: raft,
|
||||
securityConfig: securityConfig,
|
||||
scu: scu,
|
||||
pg: pg,
|
||||
}
|
||||
}
|
||||
|
|
173
vendor/github.com/docker/swarmkit/manager/controlapi/service.go
generated
vendored
173
vendor/github.com/docker/swarmkit/manager/controlapi/service.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/api/naming"
|
||||
"github.com/docker/swarmkit/identity"
|
||||
"github.com/docker/swarmkit/manager/allocator"
|
||||
"github.com/docker/swarmkit/manager/constraint"
|
||||
|
@ -23,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
errNetworkUpdateNotSupported = errors.New("changing network in service is not supported")
|
||||
errNetworkUpdateNotSupported = errors.New("networks must be migrated to TaskSpec before being changed")
|
||||
errRenameNotSupported = errors.New("renaming services is not supported")
|
||||
errModeChangeNotAllowed = errors.New("service mode change is not allowed")
|
||||
)
|
||||
|
@ -118,9 +119,27 @@ func validateUpdate(uc *api.UpdateConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateContainerSpec(container *api.ContainerSpec) error {
|
||||
if container == nil {
|
||||
return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: missing in service spec")
|
||||
func validateContainerSpec(taskSpec api.TaskSpec) error {
|
||||
// Building a empty/dummy Task to validate the templating and
|
||||
// the resulting container spec as well. This is a *best effort*
|
||||
// validation.
|
||||
container, err := template.ExpandContainerSpec(&api.Task{
|
||||
Spec: taskSpec,
|
||||
ServiceID: "serviceid",
|
||||
Slot: 1,
|
||||
NodeID: "nodeid",
|
||||
Networks: []*api.NetworkAttachment{},
|
||||
Annotations: api.Annotations{
|
||||
Name: "taskname",
|
||||
},
|
||||
ServiceAnnotations: api.Annotations{
|
||||
Name: "servicename",
|
||||
},
|
||||
Endpoint: &api.Endpoint{},
|
||||
LogDriver: taskSpec.LogDriver,
|
||||
})
|
||||
if err != nil {
|
||||
return grpc.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
if container.Image == "" {
|
||||
|
@ -142,6 +161,37 @@ func validateContainerSpec(container *api.ContainerSpec) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateGenericRuntimeSpec(taskSpec api.TaskSpec) error {
|
||||
generic := taskSpec.GetGeneric()
|
||||
|
||||
if len(generic.Kind) < 3 {
|
||||
return grpc.Errorf(codes.InvalidArgument, "Generic runtime: Invalid name %q", generic.Kind)
|
||||
}
|
||||
|
||||
reservedNames := []string{"container", "attachment"}
|
||||
for _, n := range reservedNames {
|
||||
if strings.ToLower(generic.Kind) == n {
|
||||
return grpc.Errorf(codes.InvalidArgument, "Generic runtime: %q is a reserved name", generic.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
payload := generic.Payload
|
||||
|
||||
if payload == nil {
|
||||
return grpc.Errorf(codes.InvalidArgument, "Generic runtime is missing payload")
|
||||
}
|
||||
|
||||
if payload.TypeUrl == "" {
|
||||
return grpc.Errorf(codes.InvalidArgument, "Generic runtime is missing payload type")
|
||||
}
|
||||
|
||||
if len(payload.Value) == 0 {
|
||||
return grpc.Errorf(codes.InvalidArgument, "Generic runtime has an empty payload")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateTaskSpec(taskSpec api.TaskSpec) error {
|
||||
if err := validateResourceRequirements(taskSpec.Resources); err != nil {
|
||||
return err
|
||||
|
@ -164,36 +214,19 @@ func validateTaskSpec(taskSpec api.TaskSpec) error {
|
|||
return grpc.Errorf(codes.InvalidArgument, "TaskSpec: missing runtime")
|
||||
}
|
||||
|
||||
_, ok := taskSpec.GetRuntime().(*api.TaskSpec_Container)
|
||||
if !ok {
|
||||
switch taskSpec.GetRuntime().(type) {
|
||||
case *api.TaskSpec_Container:
|
||||
if err := validateContainerSpec(taskSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
case *api.TaskSpec_Generic:
|
||||
if err := validateGenericRuntimeSpec(taskSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return grpc.Errorf(codes.Unimplemented, "RuntimeSpec: unimplemented runtime in service spec")
|
||||
}
|
||||
|
||||
// Building a empty/dummy Task to validate the templating and
|
||||
// the resulting container spec as well. This is a *best effort*
|
||||
// validation.
|
||||
preparedSpec, err := template.ExpandContainerSpec(&api.Task{
|
||||
Spec: taskSpec,
|
||||
ServiceID: "serviceid",
|
||||
Slot: 1,
|
||||
NodeID: "nodeid",
|
||||
Networks: []*api.NetworkAttachment{},
|
||||
Annotations: api.Annotations{
|
||||
Name: "taskname",
|
||||
},
|
||||
ServiceAnnotations: api.Annotations{
|
||||
Name: "servicename",
|
||||
},
|
||||
Endpoint: &api.Endpoint{},
|
||||
LogDriver: taskSpec.LogDriver,
|
||||
})
|
||||
if err != nil {
|
||||
return grpc.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
if err := validateContainerSpec(preparedSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -425,36 +458,6 @@ func (s *Server) checkSecretExistence(tx store.Tx, spec *api.ServiceSpec) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func doesServiceNeedIngress(srv *api.Service) bool {
|
||||
// Only VIP mode with target ports needs routing mesh.
|
||||
// If no endpoint is specified, it defaults to VIP mode but no target ports
|
||||
// are specified, so the service does not need the routing mesh.
|
||||
if srv.Spec.Endpoint == nil || srv.Spec.Endpoint.Mode != api.ResolutionModeVirtualIP {
|
||||
return false
|
||||
}
|
||||
// Go through the ports' config
|
||||
for _, p := range srv.Spec.Endpoint.Ports {
|
||||
if p.PublishMode != api.PublishModeIngress {
|
||||
continue
|
||||
}
|
||||
if p.PublishedPort != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Go through the ports' state
|
||||
if srv.Endpoint != nil {
|
||||
for _, p := range srv.Endpoint.Ports {
|
||||
if p.PublishMode != api.PublishModeIngress {
|
||||
continue
|
||||
}
|
||||
if p.PublishedPort != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CreateService creates and returns a Service based on the provided ServiceSpec.
|
||||
// - Returns `InvalidArgument` if the ServiceSpec is malformed.
|
||||
// - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
|
||||
|
@ -476,11 +479,12 @@ func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRe
|
|||
// TODO(aluzzardi): Consider using `Name` as a primary key to handle
|
||||
// duplicate creations. See #65
|
||||
service := &api.Service{
|
||||
ID: identity.NewID(),
|
||||
Spec: *request.Spec,
|
||||
ID: identity.NewID(),
|
||||
Spec: *request.Spec,
|
||||
SpecVersion: &api.Version{},
|
||||
}
|
||||
|
||||
if doesServiceNeedIngress(service) {
|
||||
if allocator.IsIngressNetworkNeeded(service) {
|
||||
if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress {
|
||||
return nil, grpc.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present")
|
||||
}
|
||||
|
@ -558,18 +562,14 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe
|
|||
if service == nil {
|
||||
return grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
|
||||
}
|
||||
// temporary disable network update
|
||||
requestSpecNetworks := request.Spec.Task.Networks
|
||||
if len(requestSpecNetworks) == 0 {
|
||||
requestSpecNetworks = request.Spec.Networks
|
||||
}
|
||||
|
||||
specNetworks := service.Spec.Task.Networks
|
||||
if len(specNetworks) == 0 {
|
||||
specNetworks = service.Spec.Networks
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(requestSpecNetworks, specNetworks) {
|
||||
// It's not okay to update Service.Spec.Networks on its own.
|
||||
// However, if Service.Spec.Task.Networks is also being
|
||||
// updated, that's okay (for example when migrating from the
|
||||
// deprecated Spec.Networks field to Spec.Task.Networks).
|
||||
if (len(request.Spec.Networks) != 0 || len(service.Spec.Networks) != 0) &&
|
||||
!reflect.DeepEqual(request.Spec.Networks, service.Spec.Networks) &&
|
||||
reflect.DeepEqual(request.Spec.Task.Networks, service.Spec.Task.Networks) {
|
||||
return grpc.Errorf(codes.Unimplemented, errNetworkUpdateNotSupported.Error())
|
||||
}
|
||||
|
||||
|
@ -599,8 +599,11 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe
|
|||
}
|
||||
|
||||
curSpec := service.Spec.Copy()
|
||||
curSpecVersion := service.SpecVersion
|
||||
service.Spec = *service.PreviousSpec.Copy()
|
||||
service.SpecVersion = service.PreviousSpecVersion.Copy()
|
||||
service.PreviousSpec = curSpec
|
||||
service.PreviousSpecVersion = curSpecVersion
|
||||
|
||||
service.UpdateStatus = &api.UpdateStatus{
|
||||
State: api.UpdateStatus_ROLLBACK_STARTED,
|
||||
|
@ -609,13 +612,19 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe
|
|||
}
|
||||
} else {
|
||||
service.PreviousSpec = service.Spec.Copy()
|
||||
service.PreviousSpecVersion = service.SpecVersion
|
||||
service.Spec = *request.Spec.Copy()
|
||||
// Set spec version. Note that this will not match the
|
||||
// service's Meta.Version after the store update. The
|
||||
// versionsfor the spec and the service itself are not
|
||||
// meant to be directly comparable.
|
||||
service.SpecVersion = service.Meta.Version.Copy()
|
||||
|
||||
// Reset update status
|
||||
service.UpdateStatus = nil
|
||||
}
|
||||
|
||||
if doesServiceNeedIngress(service) {
|
||||
if allocator.IsIngressNetworkNeeded(service) {
|
||||
if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress {
|
||||
return grpc.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present")
|
||||
}
|
||||
|
@ -687,6 +696,8 @@ func (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequ
|
|||
services, err = store.FindServices(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))
|
||||
case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
|
||||
services, err = store.FindServices(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
|
||||
case request.Filters != nil && len(request.Filters.Runtimes) > 0:
|
||||
services, err = store.FindServices(tx, buildFilters(store.ByRuntime, request.Filters.Runtimes))
|
||||
default:
|
||||
services, err = store.FindServices(tx, store.All)
|
||||
}
|
||||
|
@ -709,6 +720,16 @@ func (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequ
|
|||
func(e *api.Service) bool {
|
||||
return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)
|
||||
},
|
||||
func(e *api.Service) bool {
|
||||
if len(request.Filters.Runtimes) == 0 {
|
||||
return true
|
||||
}
|
||||
r, err := naming.Runtime(e.Spec.Task)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return filterContains(r, request.Filters.Runtimes)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
|
|
12
vendor/github.com/docker/swarmkit/manager/controlapi/task.go
generated
vendored
12
vendor/github.com/docker/swarmkit/manager/controlapi/task.go
generated
vendored
|
@ -87,6 +87,8 @@ func (s *Server) ListTasks(ctx context.Context, request *api.ListTasksRequest) (
|
|||
tasks, err = store.FindTasks(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
|
||||
case request.Filters != nil && len(request.Filters.ServiceIDs) > 0:
|
||||
tasks, err = store.FindTasks(tx, buildFilters(store.ByServiceID, request.Filters.ServiceIDs))
|
||||
case request.Filters != nil && len(request.Filters.Runtimes) > 0:
|
||||
tasks, err = store.FindTasks(tx, buildFilters(store.ByRuntime, request.Filters.Runtimes))
|
||||
case request.Filters != nil && len(request.Filters.NodeIDs) > 0:
|
||||
tasks, err = store.FindTasks(tx, buildFilters(store.ByNodeID, request.Filters.NodeIDs))
|
||||
case request.Filters != nil && len(request.Filters.DesiredStates) > 0:
|
||||
|
@ -122,6 +124,16 @@ func (s *Server) ListTasks(ctx context.Context, request *api.ListTasksRequest) (
|
|||
func(e *api.Task) bool {
|
||||
return filterContains(e.NodeID, request.Filters.NodeIDs)
|
||||
},
|
||||
func(e *api.Task) bool {
|
||||
if len(request.Filters.Runtimes) == 0 {
|
||||
return true
|
||||
}
|
||||
r, err := naming.Runtime(e.Spec)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return filterContains(r, request.Filters.Runtimes)
|
||||
},
|
||||
func(e *api.Task) bool {
|
||||
if len(request.Filters.DesiredStates) == 0 {
|
||||
return true
|
||||
|
|
67
vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
generated
vendored
67
vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
generated
vendored
|
@ -200,7 +200,7 @@ func (d *Dispatcher) Run(ctx context.Context) error {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
state.EventUpdateCluster{},
|
||||
api.EventUpdateCluster{},
|
||||
)
|
||||
if err != nil {
|
||||
d.mu.Unlock()
|
||||
|
@ -249,7 +249,7 @@ func (d *Dispatcher) Run(ctx context.Context) error {
|
|||
d.processUpdates(ctx)
|
||||
batchTimer.Reset(maxBatchInterval)
|
||||
case v := <-configWatcher:
|
||||
cluster := v.(state.EventUpdateCluster)
|
||||
cluster := v.(api.EventUpdateCluster)
|
||||
d.mu.Lock()
|
||||
if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
|
||||
// ignore error, since Spec has passed validation before
|
||||
|
@ -346,9 +346,9 @@ func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
|
|||
|
||||
expireFunc := func() {
|
||||
log := log.WithField("node", nodeID)
|
||||
log.Debugf("heartbeat expiration for unknown node")
|
||||
log.Debug("heartbeat expiration for unknown node")
|
||||
if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
|
||||
log.WithError(err).Errorf(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
|
||||
log.WithError(err).Error(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
|
||||
}
|
||||
}
|
||||
if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
|
||||
|
@ -360,7 +360,7 @@ func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("node", n.ID).WithError(err).Errorf(`failed to move node to "unknown" state`)
|
||||
log.WithField("node", n.ID).WithError(err).Error(`failed to move node to "unknown" state`)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -457,7 +457,7 @@ func (d *Dispatcher) register(ctx context.Context, nodeID string, description *a
|
|||
|
||||
addr, err := nodeIPFromContext(ctx)
|
||||
if err != nil {
|
||||
log.G(ctx).Debugf(err.Error())
|
||||
log.G(ctx).Debug(err.Error())
|
||||
}
|
||||
|
||||
if err := d.markNodeReady(dctx, nodeID, description, addr); err != nil {
|
||||
|
@ -701,12 +701,12 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
|
|||
}
|
||||
return nil
|
||||
},
|
||||
state.EventCreateTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
api.EventCreateTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []api.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []api.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []api.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -742,10 +742,10 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
|
|||
select {
|
||||
case event := <-nodeTasks:
|
||||
switch v := event.(type) {
|
||||
case state.EventCreateTask:
|
||||
case api.EventCreateTask:
|
||||
tasksMap[v.Task.ID] = v.Task
|
||||
modificationCnt++
|
||||
case state.EventUpdateTask:
|
||||
case api.EventUpdateTask:
|
||||
if oldTask, exists := tasksMap[v.Task.ID]; exists {
|
||||
// States ASSIGNED and below are set by the orchestrator/scheduler,
|
||||
// not the agent, so tasks in these states need to be sent to the
|
||||
|
@ -758,7 +758,7 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
|
|||
}
|
||||
tasksMap[v.Task.ID] = v.Task
|
||||
modificationCnt++
|
||||
case state.EventDeleteTask:
|
||||
case api.EventDeleteTask:
|
||||
delete(tasksMap, v.Task.ID)
|
||||
modificationCnt++
|
||||
}
|
||||
|
@ -915,12 +915,12 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
|
|||
}
|
||||
return nil
|
||||
},
|
||||
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
state.EventUpdateSecret{},
|
||||
state.EventDeleteSecret{},
|
||||
api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []api.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
|
||||
Checks: []api.TaskCheckFunc{state.TaskCheckNodeID}},
|
||||
api.EventUpdateSecret{},
|
||||
api.EventDeleteSecret{},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -996,7 +996,7 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
|
|||
// created by the orchestrator, then the scheduler moves
|
||||
// them to ASSIGNED. If this ever changes, we will need
|
||||
// to monitor task creations as well.
|
||||
case state.EventUpdateTask:
|
||||
case api.EventUpdateTask:
|
||||
// We only care about tasks that are ASSIGNED or
|
||||
// higher.
|
||||
if v.Task.Status.State < api.TaskStateAssigned {
|
||||
|
@ -1038,7 +1038,7 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
|
|||
updateTasks[v.Task.ID] = v.Task
|
||||
|
||||
oneModification()
|
||||
case state.EventDeleteTask:
|
||||
case api.EventDeleteTask:
|
||||
if _, exists := tasksMap[v.Task.ID]; !exists {
|
||||
continue
|
||||
}
|
||||
|
@ -1056,14 +1056,14 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
|
|||
oneModification()
|
||||
// TODO(aaronl): For node secrets, we'll need to handle
|
||||
// EventCreateSecret.
|
||||
case state.EventUpdateSecret:
|
||||
case api.EventUpdateSecret:
|
||||
if _, exists := tasksUsingSecret[v.Secret.ID]; !exists {
|
||||
continue
|
||||
}
|
||||
log.Debugf("Secret %s (ID: %d) was updated though it was still referenced by one or more tasks",
|
||||
v.Secret.Spec.Annotations.Name, v.Secret.ID)
|
||||
|
||||
case state.EventDeleteSecret:
|
||||
case api.EventDeleteSecret:
|
||||
if _, exists := tasksUsingSecret[v.Secret.ID]; !exists {
|
||||
continue
|
||||
}
|
||||
|
@ -1171,7 +1171,16 @@ func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
|
|||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
if task.Status.State < api.TaskStateOrphaned {
|
||||
// Tasks running on an unreachable node need to be marked as
|
||||
// orphaned since we have no idea whether the task is still running
|
||||
// or not.
|
||||
//
|
||||
// This only applies for tasks that could have made progress since
|
||||
// the agent became unreachable (assigned<->running)
|
||||
//
|
||||
// Tasks in a final state (e.g. rejected) *cannot* have made
|
||||
// progress, therefore there's no point in marking them as orphaned
|
||||
if task.Status.State >= api.TaskStateAssigned && task.Status.State <= api.TaskStateRunning {
|
||||
task.Status.State = api.TaskStateOrphaned
|
||||
}
|
||||
|
||||
|
@ -1327,8 +1336,8 @@ func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_Sessio
|
|||
nodeUpdates, cancel, err := store.ViewAndWatch(d.store, func(readTx store.ReadTx) error {
|
||||
nodeObj = store.GetNode(readTx, nodeID)
|
||||
return nil
|
||||
}, state.EventUpdateNode{Node: &api.Node{ID: nodeID},
|
||||
Checks: []state.NodeCheckFunc{state.NodeCheckID}},
|
||||
}, api.EventUpdateNode{Node: &api.Node{ID: nodeID},
|
||||
Checks: []api.NodeCheckFunc{state.NodeCheckID}},
|
||||
)
|
||||
if cancel != nil {
|
||||
defer cancel()
|
||||
|
@ -1394,7 +1403,7 @@ func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_Sessio
|
|||
case ev := <-managerUpdates:
|
||||
mgrs = ev.([]*api.WeightedPeer)
|
||||
case ev := <-nodeUpdates:
|
||||
nodeObj = ev.(state.EventUpdateNode).Node
|
||||
nodeObj = ev.(api.EventUpdateNode).Node
|
||||
case <-stream.Context().Done():
|
||||
return stream.Context().Err()
|
||||
case <-node.Disconnect:
|
||||
|
|
6
vendor/github.com/docker/swarmkit/manager/logbroker/subscription.go
generated
vendored
6
vendor/github.com/docker/swarmkit/manager/logbroker/subscription.go
generated
vendored
|
@ -68,7 +68,7 @@ func (s *subscription) Run(ctx context.Context) {
|
|||
|
||||
if s.follow() {
|
||||
wq := s.store.WatchQueue()
|
||||
ch, cancel := state.Watch(wq, state.EventCreateTask{}, state.EventUpdateTask{})
|
||||
ch, cancel := state.Watch(wq, api.EventCreateTask{}, api.EventUpdateTask{})
|
||||
go func() {
|
||||
defer cancel()
|
||||
s.watch(ch)
|
||||
|
@ -227,9 +227,9 @@ func (s *subscription) watch(ch <-chan events.Event) error {
|
|||
return s.ctx.Err()
|
||||
case event := <-ch:
|
||||
switch v := event.(type) {
|
||||
case state.EventCreateTask:
|
||||
case api.EventCreateTask:
|
||||
t = v.Task
|
||||
case state.EventUpdateTask:
|
||||
case api.EventUpdateTask:
|
||||
t = v.Task
|
||||
}
|
||||
}
|
||||
|
|
147
vendor/github.com/docker/swarmkit/manager/manager.go
generated
vendored
147
vendor/github.com/docker/swarmkit/manager/manager.go
generated
vendored
|
@ -389,7 +389,7 @@ func (m *Manager) Run(parent context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig.RootCA(), m.config.PluginGetter)
|
||||
baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig, m.caserver, m.config.PluginGetter)
|
||||
baseResourceAPI := resourceapi.New(m.raftNode.MemoryStore())
|
||||
healthServer := health.NewHealthServer()
|
||||
localHealthServer := health.NewHealthServer()
|
||||
|
@ -688,12 +688,14 @@ func (m *Manager) watchForClusterChanges(ctx context.Context) error {
|
|||
if cluster == nil {
|
||||
return fmt.Errorf("unable to get current cluster")
|
||||
}
|
||||
m.caserver.UpdateRootCA(ctx, cluster)
|
||||
if err := m.caserver.UpdateRootCA(ctx, cluster); err != nil {
|
||||
log.G(ctx).WithError(err).Error("could not update security config")
|
||||
}
|
||||
return m.updateKEK(ctx, cluster)
|
||||
},
|
||||
state.EventUpdateCluster{
|
||||
api.EventUpdateCluster{
|
||||
Cluster: &api.Cluster{ID: clusterID},
|
||||
Checks: []state.ClusterCheckFunc{state.ClusterCheckID},
|
||||
Checks: []api.ClusterCheckFunc{state.ClusterCheckID},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -703,8 +705,10 @@ func (m *Manager) watchForClusterChanges(ctx context.Context) error {
|
|||
for {
|
||||
select {
|
||||
case event := <-clusterWatch:
|
||||
clusterEvent := event.(state.EventUpdateCluster)
|
||||
m.caserver.UpdateRootCA(ctx, clusterEvent.Cluster)
|
||||
clusterEvent := event.(api.EventUpdateCluster)
|
||||
if err := m.caserver.UpdateRootCA(ctx, clusterEvent.Cluster); err != nil {
|
||||
log.G(ctx).WithError(err).Error("could not update security config")
|
||||
}
|
||||
m.updateKEK(ctx, clusterEvent.Cluster)
|
||||
case <-ctx.Done():
|
||||
clusterWatchCancel()
|
||||
|
@ -724,10 +728,15 @@ func (m *Manager) watchForClusterChanges(ctx context.Context) error {
|
|||
func (m *Manager) rotateRootCAKEK(ctx context.Context, clusterID string) error {
|
||||
// If we don't have a KEK, we won't ever be rotating anything
|
||||
strPassphrase := os.Getenv(ca.PassphraseENVVar)
|
||||
if strPassphrase == "" {
|
||||
strPassphrasePrev := os.Getenv(ca.PassphraseENVVarPrev)
|
||||
if strPassphrase == "" && strPassphrasePrev == "" {
|
||||
return nil
|
||||
}
|
||||
strPassphrasePrev := os.Getenv(ca.PassphraseENVVarPrev)
|
||||
if strPassphrase != "" {
|
||||
log.G(ctx).Warn("Encrypting the root CA key in swarm using environment variables is deprecated. " +
|
||||
"Support for decrypting or rotating the key will be removed in the future.")
|
||||
}
|
||||
|
||||
passphrase := []byte(strPassphrase)
|
||||
passphrasePrev := []byte(strPassphrasePrev)
|
||||
|
||||
|
@ -738,72 +747,72 @@ func (m *Manager) rotateRootCAKEK(ctx context.Context, clusterID string) error {
|
|||
finalKey []byte
|
||||
)
|
||||
// Retrieve the cluster identified by ClusterID
|
||||
s.View(func(readTx store.ReadTx) {
|
||||
cluster = store.GetCluster(readTx, clusterID)
|
||||
})
|
||||
if cluster == nil {
|
||||
return fmt.Errorf("cluster not found: %s", clusterID)
|
||||
}
|
||||
|
||||
// Try to get the private key from the cluster
|
||||
privKeyPEM := cluster.RootCA.CAKey
|
||||
if len(privKeyPEM) == 0 {
|
||||
// We have no PEM root private key in this cluster.
|
||||
log.G(ctx).Warnf("cluster %s does not have private key material", clusterID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the PEM private key
|
||||
keyBlock, _ := pem.Decode(privKeyPEM)
|
||||
if keyBlock == nil {
|
||||
return fmt.Errorf("invalid PEM-encoded private key inside of cluster %s", clusterID)
|
||||
}
|
||||
// If this key is not encrypted, then we have to encrypt it
|
||||
if !x509.IsEncryptedPEMBlock(keyBlock) {
|
||||
finalKey, err = ca.EncryptECPrivateKey(privKeyPEM, strPassphrase)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// This key is already encrypted, let's try to decrypt with the current main passphrase
|
||||
_, err = x509.DecryptPEMBlock(keyBlock, []byte(passphrase))
|
||||
if err == nil {
|
||||
// The main key is the correct KEK, nothing to do here
|
||||
return nil
|
||||
}
|
||||
// This key is already encrypted, but failed with current main passphrase.
|
||||
// Let's try to decrypt with the previous passphrase
|
||||
unencryptedKey, err := x509.DecryptPEMBlock(keyBlock, []byte(passphrasePrev))
|
||||
if err != nil {
|
||||
// We were not able to decrypt either with the main or backup passphrase, error
|
||||
return err
|
||||
}
|
||||
unencryptedKeyBlock := &pem.Block{
|
||||
Type: keyBlock.Type,
|
||||
Bytes: unencryptedKey,
|
||||
Headers: keyBlock.Headers,
|
||||
}
|
||||
|
||||
// We were able to decrypt the key, but with the previous passphrase. Let's encrypt
|
||||
// with the new one and store it in raft
|
||||
finalKey, err = ca.EncryptECPrivateKey(pem.EncodeToMemory(unencryptedKeyBlock), strPassphrase)
|
||||
if err != nil {
|
||||
log.G(ctx).Debugf("failed to rotate the key-encrypting-key for the root key material of cluster %s", clusterID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.G(ctx).Infof("Re-encrypting the root key material of cluster %s", clusterID)
|
||||
// Let's update the key in the cluster object
|
||||
return s.Update(func(tx store.Tx) error {
|
||||
cluster = store.GetCluster(tx, clusterID)
|
||||
if cluster == nil {
|
||||
return fmt.Errorf("cluster not found: %s", clusterID)
|
||||
}
|
||||
|
||||
// Try to get the private key from the cluster
|
||||
privKeyPEM := cluster.RootCA.CAKey
|
||||
if len(privKeyPEM) == 0 {
|
||||
// We have no PEM root private key in this cluster.
|
||||
log.G(ctx).Warnf("cluster %s does not have private key material", clusterID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode the PEM private key
|
||||
keyBlock, _ := pem.Decode(privKeyPEM)
|
||||
if keyBlock == nil {
|
||||
return fmt.Errorf("invalid PEM-encoded private key inside of cluster %s", clusterID)
|
||||
}
|
||||
|
||||
if x509.IsEncryptedPEMBlock(keyBlock) {
|
||||
// This key is already encrypted, let's try to decrypt with the current main passphrase
|
||||
_, err = x509.DecryptPEMBlock(keyBlock, []byte(passphrase))
|
||||
if err == nil {
|
||||
// The main key is the correct KEK, nothing to do here
|
||||
return nil
|
||||
}
|
||||
// This key is already encrypted, but failed with current main passphrase.
|
||||
// Let's try to decrypt with the previous passphrase
|
||||
unencryptedKey, err := x509.DecryptPEMBlock(keyBlock, []byte(passphrasePrev))
|
||||
if err != nil {
|
||||
// We were not able to decrypt either with the main or backup passphrase, error
|
||||
return err
|
||||
}
|
||||
unencryptedKeyBlock := &pem.Block{
|
||||
Type: keyBlock.Type,
|
||||
Bytes: unencryptedKey,
|
||||
}
|
||||
|
||||
// we were able to decrypt the key with the previous passphrase - if the current passphrase is empty,
|
||||
// the we store the decrypted key in raft
|
||||
finalKey = pem.EncodeToMemory(unencryptedKeyBlock)
|
||||
|
||||
// the current passphrase is not empty, so let's encrypt with the new one and store it in raft
|
||||
if strPassphrase != "" {
|
||||
finalKey, err = ca.EncryptECPrivateKey(finalKey, strPassphrase)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("failed to rotate the key-encrypting-key for the root key material of cluster %s", clusterID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if strPassphrase != "" {
|
||||
// If this key is not encrypted, and the passphrase is not nil, then we have to encrypt it
|
||||
finalKey, err = ca.EncryptECPrivateKey(privKeyPEM, strPassphrase)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("failed to rotate the key-encrypting-key for the root key material of cluster %s", clusterID)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return nil // don't update if it's not encrypted and we don't want it encrypted
|
||||
}
|
||||
|
||||
log.G(ctx).Infof("Updating the encryption on the root key material of cluster %s", clusterID)
|
||||
cluster.RootCA.CAKey = finalKey
|
||||
return store.UpdateCluster(tx, cluster)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// handleLeadershipEvents handles the is leader event or is follower event.
|
||||
|
@ -1045,8 +1054,8 @@ func defaultClusterObject(
|
|||
initialUnlockKeys []*api.EncryptionKey,
|
||||
rootCA *ca.RootCA) *api.Cluster {
|
||||
var caKey []byte
|
||||
if rootCA.Signer != nil {
|
||||
caKey = rootCA.Signer.Key
|
||||
if rcaSigner, err := rootCA.Signer(); err == nil {
|
||||
caKey = rcaSigner.Key
|
||||
}
|
||||
|
||||
return &api.Cluster{
|
||||
|
@ -1067,7 +1076,7 @@ func defaultClusterObject(
|
|||
},
|
||||
RootCA: api.RootCA{
|
||||
CAKey: caKey,
|
||||
CACert: rootCA.Cert,
|
||||
CACert: rootCA.Certs,
|
||||
CACertHash: rootCA.Digest.String(),
|
||||
JoinTokens: api.JoinTokens{
|
||||
Worker: ca.GenerateJoinToken(rootCA),
|
||||
|
|
|
@ -32,7 +32,7 @@ func New(store *store.MemoryStore) *ConstraintEnforcer {
|
|||
func (ce *ConstraintEnforcer) Run() {
|
||||
defer close(ce.doneChan)
|
||||
|
||||
watcher, cancelWatch := state.Watch(ce.store.WatchQueue(), state.EventUpdateNode{})
|
||||
watcher, cancelWatch := state.Watch(ce.store.WatchQueue(), api.EventUpdateNode{})
|
||||
defer cancelWatch()
|
||||
|
||||
var (
|
||||
|
@ -53,7 +53,7 @@ func (ce *ConstraintEnforcer) Run() {
|
|||
for {
|
||||
select {
|
||||
case event := <-watcher:
|
||||
node := event.(state.EventUpdateNode).Node
|
||||
node := event.(api.EventUpdateNode).Node
|
||||
ce.rejectNoncompliantTasks(node)
|
||||
case <-ce.stopChan:
|
||||
return
|
||||
|
|
19
vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
generated
vendored
19
vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
generated
vendored
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/docker/swarmkit/manager/orchestrator/restart"
|
||||
"github.com/docker/swarmkit/manager/orchestrator/taskinit"
|
||||
"github.com/docker/swarmkit/manager/orchestrator/update"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -130,21 +129,21 @@ func (g *Orchestrator) Run(ctx context.Context) error {
|
|||
case event := <-watcher:
|
||||
// TODO(stevvooe): Use ctx to limit running time of operation.
|
||||
switch v := event.(type) {
|
||||
case state.EventUpdateCluster:
|
||||
case api.EventUpdateCluster:
|
||||
g.cluster = v.Cluster
|
||||
case state.EventCreateService:
|
||||
case api.EventCreateService:
|
||||
if !orchestrator.IsGlobalService(v.Service) {
|
||||
continue
|
||||
}
|
||||
g.updateService(v.Service)
|
||||
g.reconcileServices(ctx, []string{v.Service.ID})
|
||||
case state.EventUpdateService:
|
||||
case api.EventUpdateService:
|
||||
if !orchestrator.IsGlobalService(v.Service) {
|
||||
continue
|
||||
}
|
||||
g.updateService(v.Service)
|
||||
g.reconcileServices(ctx, []string{v.Service.ID})
|
||||
case state.EventDeleteService:
|
||||
case api.EventDeleteService:
|
||||
if !orchestrator.IsGlobalService(v.Service) {
|
||||
continue
|
||||
}
|
||||
|
@ -152,10 +151,10 @@ func (g *Orchestrator) Run(ctx context.Context) error {
|
|||
// delete the service from service map
|
||||
delete(g.globalServices, v.Service.ID)
|
||||
g.restarts.ClearServiceHistory(v.Service.ID)
|
||||
case state.EventCreateNode:
|
||||
case api.EventCreateNode:
|
||||
g.updateNode(v.Node)
|
||||
g.reconcileOneNode(ctx, v.Node)
|
||||
case state.EventUpdateNode:
|
||||
case api.EventUpdateNode:
|
||||
g.updateNode(v.Node)
|
||||
switch v.Node.Status.State {
|
||||
// NodeStatus_DISCONNECTED is a transient state, no need to make any change
|
||||
|
@ -165,12 +164,12 @@ func (g *Orchestrator) Run(ctx context.Context) error {
|
|||
// node could come back to READY from DOWN or DISCONNECT
|
||||
g.reconcileOneNode(ctx, v.Node)
|
||||
}
|
||||
case state.EventDeleteNode:
|
||||
case api.EventDeleteNode:
|
||||
g.removeTasksFromNode(ctx, v.Node)
|
||||
delete(g.nodes, v.Node.ID)
|
||||
case state.EventUpdateTask:
|
||||
case api.EventUpdateTask:
|
||||
g.handleTaskChange(ctx, v.Task)
|
||||
case state.EventDeleteTask:
|
||||
case api.EventDeleteTask:
|
||||
// CLI allows deleting task
|
||||
if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
|
||||
continue
|
||||
|
|
2
vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/replicated.go
generated
vendored
2
vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/replicated.go
generated
vendored
|
@ -83,7 +83,7 @@ func (r *Orchestrator) Run(ctx context.Context) error {
|
|||
switch v := event.(type) {
|
||||
case state.EventCommit:
|
||||
r.tick(ctx)
|
||||
case state.EventUpdateCluster:
|
||||
case api.EventUpdateCluster:
|
||||
r.cluster = v.Cluster
|
||||
}
|
||||
case <-r.stopChan:
|
||||
|
|
7
vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/services.go
generated
vendored
7
vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/services.go
generated
vendored
|
@ -7,7 +7,6 @@ import (
|
|||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/orchestrator"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -47,19 +46,19 @@ func (r *Orchestrator) initServices(readTx store.ReadTx) error {
|
|||
|
||||
func (r *Orchestrator) handleServiceEvent(ctx context.Context, event events.Event) {
|
||||
switch v := event.(type) {
|
||||
case state.EventDeleteService:
|
||||
case api.EventDeleteService:
|
||||
if !orchestrator.IsReplicatedService(v.Service) {
|
||||
return
|
||||
}
|
||||
orchestrator.DeleteServiceTasks(ctx, r.store, v.Service)
|
||||
r.restarts.ClearServiceHistory(v.Service.ID)
|
||||
delete(r.reconcileServices, v.Service.ID)
|
||||
case state.EventCreateService:
|
||||
case api.EventCreateService:
|
||||
if !orchestrator.IsReplicatedService(v.Service) {
|
||||
return
|
||||
}
|
||||
r.reconcileServices[v.Service.ID] = v.Service
|
||||
case state.EventUpdateService:
|
||||
case api.EventUpdateService:
|
||||
if !orchestrator.IsReplicatedService(v.Service) {
|
||||
return
|
||||
}
|
||||
|
|
13
vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/tasks.go
generated
vendored
13
vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/tasks.go
generated
vendored
|
@ -6,7 +6,6 @@ import (
|
|||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/orchestrator"
|
||||
"github.com/docker/swarmkit/manager/orchestrator/taskinit"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -22,13 +21,13 @@ func (r *Orchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error
|
|||
|
||||
func (r *Orchestrator) handleTaskEvent(ctx context.Context, event events.Event) {
|
||||
switch v := event.(type) {
|
||||
case state.EventDeleteNode:
|
||||
case api.EventDeleteNode:
|
||||
r.restartTasksByNodeID(ctx, v.Node.ID)
|
||||
case state.EventCreateNode:
|
||||
case api.EventCreateNode:
|
||||
r.handleNodeChange(ctx, v.Node)
|
||||
case state.EventUpdateNode:
|
||||
case api.EventUpdateNode:
|
||||
r.handleNodeChange(ctx, v.Node)
|
||||
case state.EventDeleteTask:
|
||||
case api.EventDeleteTask:
|
||||
if v.Task.DesiredState <= api.TaskStateRunning {
|
||||
service := r.resolveService(ctx, v.Task)
|
||||
if !orchestrator.IsReplicatedService(service) {
|
||||
|
@ -37,9 +36,9 @@ func (r *Orchestrator) handleTaskEvent(ctx context.Context, event events.Event)
|
|||
r.reconcileServices[service.ID] = service
|
||||
}
|
||||
r.restarts.Cancel(v.Task.ID)
|
||||
case state.EventUpdateTask:
|
||||
case api.EventUpdateTask:
|
||||
r.handleTaskChange(ctx, v.Task)
|
||||
case state.EventCreateTask:
|
||||
case api.EventCreateTask:
|
||||
r.handleTaskChange(ctx, v.Task)
|
||||
}
|
||||
}
|
||||
|
|
12
vendor/github.com/docker/swarmkit/manager/orchestrator/restart/restart.go
generated
vendored
12
vendor/github.com/docker/swarmkit/manager/orchestrator/restart/restart.go
generated
vendored
|
@ -327,17 +327,17 @@ func (r *Supervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask *api.Ta
|
|||
// node to become unavailable.
|
||||
watch, cancelWatch = state.Watch(
|
||||
r.store.WatchQueue(),
|
||||
state.EventUpdateTask{
|
||||
api.EventUpdateTask{
|
||||
Task: &api.Task{ID: oldTask.ID, Status: api.TaskStatus{State: api.TaskStateRunning}},
|
||||
Checks: []state.TaskCheckFunc{state.TaskCheckID, state.TaskCheckStateGreaterThan},
|
||||
Checks: []api.TaskCheckFunc{state.TaskCheckID, state.TaskCheckStateGreaterThan},
|
||||
},
|
||||
state.EventUpdateNode{
|
||||
api.EventUpdateNode{
|
||||
Node: &api.Node{ID: oldTask.NodeID, Status: api.NodeStatus{State: api.NodeStatus_DOWN}},
|
||||
Checks: []state.NodeCheckFunc{state.NodeCheckID, state.NodeCheckState},
|
||||
Checks: []api.NodeCheckFunc{state.NodeCheckID, state.NodeCheckState},
|
||||
},
|
||||
state.EventDeleteNode{
|
||||
api.EventDeleteNode{
|
||||
Node: &api.Node{ID: oldTask.NodeID},
|
||||
Checks: []state.NodeCheckFunc{state.NodeCheckID},
|
||||
Checks: []api.NodeCheckFunc{state.NodeCheckID},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
|
8
vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
generated
vendored
8
vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
generated
vendored
|
@ -29,6 +29,7 @@ func NewTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID str
|
|||
ID: taskID,
|
||||
ServiceAnnotations: service.Spec.Annotations,
|
||||
Spec: service.Spec.Task,
|
||||
SpecVersion: service.SpecVersion,
|
||||
ServiceID: service.ID,
|
||||
Slot: slot,
|
||||
Status: api.TaskStatus{
|
||||
|
@ -62,6 +63,13 @@ func RestartCondition(task *api.Task) api.RestartPolicy_RestartCondition {
|
|||
|
||||
// IsTaskDirty determines whether a task matches the given service's spec.
|
||||
func IsTaskDirty(s *api.Service, t *api.Task) bool {
|
||||
// If the spec version matches, we know the task is not dirty. However,
|
||||
// if it does not match, that doesn't mean the task is dirty, since
|
||||
// only a portion of the spec is included in the comparison.
|
||||
if t.SpecVersion != nil && *s.SpecVersion == *t.SpecVersion {
|
||||
return false
|
||||
}
|
||||
|
||||
return !reflect.DeepEqual(s.Spec.Task, t.Spec) ||
|
||||
(t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec))
|
||||
}
|
||||
|
|
8
vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
generated
vendored
8
vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
generated
vendored
|
@ -41,7 +41,7 @@ type TaskReaper struct {
|
|||
|
||||
// New creates a new TaskReaper.
|
||||
func New(store *store.MemoryStore) *TaskReaper {
|
||||
watcher, cancel := state.Watch(store.WatchQueue(), state.EventCreateTask{}, state.EventUpdateTask{}, state.EventUpdateCluster{})
|
||||
watcher, cancel := state.Watch(store.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventUpdateCluster{})
|
||||
|
||||
return &TaskReaper{
|
||||
store: store,
|
||||
|
@ -93,19 +93,19 @@ func (tr *TaskReaper) Run() {
|
|||
select {
|
||||
case event := <-tr.watcher:
|
||||
switch v := event.(type) {
|
||||
case state.EventCreateTask:
|
||||
case api.EventCreateTask:
|
||||
t := v.Task
|
||||
tr.dirty[instanceTuple{
|
||||
instance: t.Slot,
|
||||
serviceID: t.ServiceID,
|
||||
nodeID: t.NodeID,
|
||||
}] = struct{}{}
|
||||
case state.EventUpdateTask:
|
||||
case api.EventUpdateTask:
|
||||
t := v.Task
|
||||
if t.Status.State >= api.TaskStateOrphaned && t.ServiceID == "" {
|
||||
tr.orphaned = append(tr.orphaned, t.ID)
|
||||
}
|
||||
case state.EventUpdateCluster:
|
||||
case api.EventUpdateCluster:
|
||||
tr.taskHistory = v.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
|
||||
}
|
||||
|
||||
|
|
54
vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
generated
vendored
54
vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
generated
vendored
|
@ -162,6 +162,7 @@ func (u *Updater) Run(ctx context.Context, slots []orchestrator.Slot) {
|
|||
failureAction = api.UpdateConfig_PAUSE
|
||||
allowedFailureFraction = float32(0)
|
||||
monitoringPeriod = defaultMonitor
|
||||
order = api.UpdateConfig_STOP_FIRST
|
||||
)
|
||||
|
||||
updateConfig := service.Spec.Update
|
||||
|
@ -174,6 +175,7 @@ func (u *Updater) Run(ctx context.Context, slots []orchestrator.Slot) {
|
|||
allowedFailureFraction = updateConfig.MaxFailureRatio
|
||||
parallelism = int(updateConfig.Parallelism)
|
||||
delay = updateConfig.Delay
|
||||
order = updateConfig.Order
|
||||
|
||||
var err error
|
||||
if updateConfig.Monitor != nil {
|
||||
|
@ -196,7 +198,7 @@ func (u *Updater) Run(ctx context.Context, slots []orchestrator.Slot) {
|
|||
wg.Add(parallelism)
|
||||
for i := 0; i < parallelism; i++ {
|
||||
go func() {
|
||||
u.worker(ctx, slotQueue, delay)
|
||||
u.worker(ctx, slotQueue, delay, order)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
@ -207,9 +209,9 @@ func (u *Updater) Run(ctx context.Context, slots []orchestrator.Slot) {
|
|||
var cancelWatch func()
|
||||
failedTaskWatch, cancelWatch = state.Watch(
|
||||
u.store.WatchQueue(),
|
||||
state.EventUpdateTask{
|
||||
api.EventUpdateTask{
|
||||
Task: &api.Task{ServiceID: service.ID, Status: api.TaskStatus{State: api.TaskStateRunning}},
|
||||
Checks: []state.TaskCheckFunc{state.TaskCheckServiceID, state.TaskCheckStateGreaterThan},
|
||||
Checks: []api.TaskCheckFunc{state.TaskCheckServiceID, state.TaskCheckStateGreaterThan},
|
||||
},
|
||||
)
|
||||
defer cancelWatch()
|
||||
|
@ -270,7 +272,7 @@ slotsLoop:
|
|||
stopped = true
|
||||
break slotsLoop
|
||||
case ev := <-failedTaskWatch:
|
||||
if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
|
||||
if failureTriggersAction(ev.(api.EventUpdateTask).Task) {
|
||||
break slotsLoop
|
||||
}
|
||||
case slotQueue <- slot:
|
||||
|
@ -295,7 +297,7 @@ slotsLoop:
|
|||
case <-doneMonitoring:
|
||||
break monitorLoop
|
||||
case ev := <-failedTaskWatch:
|
||||
if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
|
||||
if failureTriggersAction(ev.(api.EventUpdateTask).Task) {
|
||||
break monitorLoop
|
||||
}
|
||||
}
|
||||
|
@ -310,7 +312,7 @@ slotsLoop:
|
|||
}
|
||||
}
|
||||
|
||||
func (u *Updater) worker(ctx context.Context, queue <-chan orchestrator.Slot, delay time.Duration) {
|
||||
func (u *Updater) worker(ctx context.Context, queue <-chan orchestrator.Slot, delay time.Duration, order api.UpdateConfig_UpdateOrder) {
|
||||
for slot := range queue {
|
||||
// Do we have a task with the new spec in desired state = RUNNING?
|
||||
// If so, all we have to do to complete the update is remove the
|
||||
|
@ -347,7 +349,7 @@ func (u *Updater) worker(ctx context.Context, queue <-chan orchestrator.Slot, de
|
|||
}
|
||||
updated.DesiredState = api.TaskStateReady
|
||||
|
||||
if err := u.updateTask(ctx, slot, updated); err != nil {
|
||||
if err := u.updateTask(ctx, slot, updated, order); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("task.id", updated.ID).Error("update failed")
|
||||
}
|
||||
}
|
||||
|
@ -362,11 +364,11 @@ func (u *Updater) worker(ctx context.Context, queue <-chan orchestrator.Slot, de
|
|||
}
|
||||
}
|
||||
|
||||
func (u *Updater) updateTask(ctx context.Context, slot orchestrator.Slot, updated *api.Task) error {
|
||||
func (u *Updater) updateTask(ctx context.Context, slot orchestrator.Slot, updated *api.Task, order api.UpdateConfig_UpdateOrder) error {
|
||||
// Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
|
||||
taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
|
||||
taskUpdates, cancel := state.Watch(u.watchQueue, api.EventUpdateTask{
|
||||
Task: &api.Task{ID: updated.ID},
|
||||
Checks: []state.TaskCheckFunc{state.TaskCheckID},
|
||||
Checks: []api.TaskCheckFunc{state.TaskCheckID},
|
||||
})
|
||||
defer cancel()
|
||||
|
||||
|
@ -377,15 +379,11 @@ func (u *Updater) updateTask(ctx context.Context, slot orchestrator.Slot, update
|
|||
u.updatedTasks[updated.ID] = time.Time{}
|
||||
u.updatedTasksMu.Unlock()
|
||||
|
||||
startThenStop := false
|
||||
var delayStartCh <-chan struct{}
|
||||
// Atomically create the updated task and bring down the old one.
|
||||
_, err := u.store.Batch(func(batch *store.Batch) error {
|
||||
oldTask, err := u.removeOldTasks(ctx, batch, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = batch.Update(func(tx store.Tx) error {
|
||||
err := batch.Update(func(tx store.Tx) error {
|
||||
if store.GetService(tx, updated.ServiceID) == nil {
|
||||
return errors.New("service was deleted")
|
||||
}
|
||||
|
@ -399,7 +397,16 @@ func (u *Updater) updateTask(ctx context.Context, slot orchestrator.Slot, update
|
|||
return err
|
||||
}
|
||||
|
||||
delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, updated.ID, 0, true)
|
||||
if order == api.UpdateConfig_START_FIRST {
|
||||
delayStartCh = u.restarts.DelayStart(ctx, nil, nil, updated.ID, 0, false)
|
||||
startThenStop = true
|
||||
} else {
|
||||
oldTask, err := u.removeOldTasks(ctx, batch, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, updated.ID, 0, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
|
@ -421,11 +428,22 @@ func (u *Updater) updateTask(ctx context.Context, slot orchestrator.Slot, update
|
|||
for {
|
||||
select {
|
||||
case e := <-taskUpdates:
|
||||
updated = e.(state.EventUpdateTask).Task
|
||||
updated = e.(api.EventUpdateTask).Task
|
||||
if updated.Status.State >= api.TaskStateRunning {
|
||||
u.updatedTasksMu.Lock()
|
||||
u.updatedTasks[updated.ID] = time.Now()
|
||||
u.updatedTasksMu.Unlock()
|
||||
|
||||
if startThenStop {
|
||||
_, err := u.store.Batch(func(batch *store.Batch) error {
|
||||
_, err := u.removeOldTasks(ctx, batch, slot)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("task.id", updated.ID).Warning("failed to remove old task after starting replacement")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case <-u.stopChan:
|
||||
|
|
5
vendor/github.com/docker/swarmkit/manager/role_manager.go
generated
vendored
5
vendor/github.com/docker/swarmkit/manager/role_manager.go
generated
vendored
|
@ -5,7 +5,6 @@ import (
|
|||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
"github.com/docker/swarmkit/manager/state/raft"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -57,7 +56,7 @@ func (rm *roleManager) Run(ctx context.Context) {
|
|||
nodes, err = store.FindNodes(readTx, store.All)
|
||||
return err
|
||||
},
|
||||
state.EventUpdateNode{})
|
||||
api.EventUpdateNode{})
|
||||
defer cancelWatch()
|
||||
|
||||
if err != nil {
|
||||
|
@ -76,7 +75,7 @@ func (rm *roleManager) Run(ctx context.Context) {
|
|||
for {
|
||||
select {
|
||||
case event := <-watcher:
|
||||
node := event.(state.EventUpdateNode).Node
|
||||
node := event.(api.EventUpdateNode).Node
|
||||
rm.pending[node.ID] = node
|
||||
rm.reconcileRole(ctx, node)
|
||||
if len(rm.pending) != 0 && ticker == nil {
|
||||
|
|
52
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
generated
vendored
52
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
generated
vendored
|
@ -142,19 +142,19 @@ func (s *Scheduler) Run(ctx context.Context) error {
|
|||
select {
|
||||
case event := <-updates:
|
||||
switch v := event.(type) {
|
||||
case state.EventCreateTask:
|
||||
case api.EventCreateTask:
|
||||
pendingChanges += s.createTask(ctx, v.Task)
|
||||
case state.EventUpdateTask:
|
||||
case api.EventUpdateTask:
|
||||
pendingChanges += s.updateTask(ctx, v.Task)
|
||||
case state.EventDeleteTask:
|
||||
case api.EventDeleteTask:
|
||||
s.deleteTask(ctx, v.Task)
|
||||
case state.EventCreateNode:
|
||||
case api.EventCreateNode:
|
||||
s.createOrUpdateNode(v.Node)
|
||||
pendingChanges++
|
||||
case state.EventUpdateNode:
|
||||
case api.EventUpdateNode:
|
||||
s.createOrUpdateNode(v.Node)
|
||||
pendingChanges++
|
||||
case state.EventDeleteNode:
|
||||
case api.EventDeleteNode:
|
||||
s.nodeSet.remove(v.Node.ID)
|
||||
case state.EventCommit:
|
||||
if commitDebounceTimer != nil {
|
||||
|
@ -331,7 +331,12 @@ func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
|
|||
|
||||
// tick attempts to schedule the queue.
|
||||
func (s *Scheduler) tick(ctx context.Context) {
|
||||
tasksByCommonSpec := make(map[string]map[string]*api.Task)
|
||||
type commonSpecKey struct {
|
||||
serviceID string
|
||||
specVersion api.Version
|
||||
}
|
||||
tasksByCommonSpec := make(map[commonSpecKey]map[string]*api.Task)
|
||||
var oneOffTasks []*api.Task
|
||||
schedulingDecisions := make(map[string]schedulingDecision, len(s.unassignedTasks))
|
||||
|
||||
for taskID, t := range s.unassignedTasks {
|
||||
|
@ -341,30 +346,31 @@ func (s *Scheduler) tick(ctx context.Context) {
|
|||
continue
|
||||
}
|
||||
|
||||
// Group common tasks with common specs by marshalling the spec
|
||||
// into taskKey and using it as a map key.
|
||||
// TODO(aaronl): Once specs are versioned, this will allow a
|
||||
// much more efficient fast path.
|
||||
fieldsToMarshal := api.Task{
|
||||
ServiceID: t.ServiceID,
|
||||
Spec: t.Spec,
|
||||
}
|
||||
marshalled, err := fieldsToMarshal.Marshal()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
taskGroupKey := string(marshalled)
|
||||
// Group tasks with common specs
|
||||
if t.SpecVersion != nil {
|
||||
taskGroupKey := commonSpecKey{
|
||||
serviceID: t.ServiceID,
|
||||
specVersion: *t.SpecVersion,
|
||||
}
|
||||
|
||||
if tasksByCommonSpec[taskGroupKey] == nil {
|
||||
tasksByCommonSpec[taskGroupKey] = make(map[string]*api.Task)
|
||||
if tasksByCommonSpec[taskGroupKey] == nil {
|
||||
tasksByCommonSpec[taskGroupKey] = make(map[string]*api.Task)
|
||||
}
|
||||
tasksByCommonSpec[taskGroupKey][taskID] = t
|
||||
} else {
|
||||
// This task doesn't have a spec version. We have to
|
||||
// schedule it as a one-off.
|
||||
oneOffTasks = append(oneOffTasks, t)
|
||||
}
|
||||
tasksByCommonSpec[taskGroupKey][taskID] = t
|
||||
delete(s.unassignedTasks, taskID)
|
||||
}
|
||||
|
||||
for _, taskGroup := range tasksByCommonSpec {
|
||||
s.scheduleTaskGroup(ctx, taskGroup, schedulingDecisions)
|
||||
}
|
||||
for _, t := range oneOffTasks {
|
||||
s.scheduleTaskGroup(ctx, map[string]*api.Task{t.ID: t}, schedulingDecisions)
|
||||
}
|
||||
|
||||
_, failed := s.applySchedulingDecisions(ctx, schedulingDecisions)
|
||||
for _, decision := range failed {
|
||||
|
|
2
vendor/github.com/docker/swarmkit/manager/state/proposer.go
generated
vendored
2
vendor/github.com/docker/swarmkit/manager/state/proposer.go
generated
vendored
|
@ -12,6 +12,6 @@ type Proposer interface {
|
|||
// proposed changes. The callback is necessary for the Proposer to make
|
||||
// sure that the changes are committed before it interacts further
|
||||
// with the store.
|
||||
ProposeValue(ctx context.Context, storeAction []*api.StoreAction, cb func()) error
|
||||
ProposeValue(ctx context.Context, storeAction []api.StoreAction, cb func()) error
|
||||
GetVersion() *api.Version
|
||||
}
|
||||
|
|
6
vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
generated
vendored
6
vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
generated
vendored
|
@ -1411,7 +1411,7 @@ func (n *Node) registerNode(node *api.RaftMember) error {
|
|||
|
||||
// ProposeValue calls Propose on the raft and waits
|
||||
// on the commit log action before returning a result
|
||||
func (n *Node) ProposeValue(ctx context.Context, storeAction []*api.StoreAction, cb func()) error {
|
||||
func (n *Node) ProposeValue(ctx context.Context, storeAction []api.StoreAction, cb func()) error {
|
||||
ctx, cancel := n.WithContext(ctx)
|
||||
defer cancel()
|
||||
_, err := n.processInternalRaftRequest(ctx, &api.InternalRaftRequest{Action: storeAction}, cb)
|
||||
|
@ -1663,10 +1663,6 @@ func (n *Node) processEntry(ctx context.Context, entry raftpb.Entry) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if r.Action == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !n.wait.trigger(r.ID, r) {
|
||||
// There was no wait on this ID, meaning we don't have a
|
||||
// transaction in progress that would be committed to the
|
||||
|
|
4
vendor/github.com/docker/swarmkit/manager/state/raft/util.go
generated
vendored
4
vendor/github.com/docker/swarmkit/manager/state/raft/util.go
generated
vendored
|
@ -59,7 +59,7 @@ func WaitForLeader(ctx context.Context, n *Node) error {
|
|||
// committed to raft. This ensures that we can see and serve informations
|
||||
// related to the cluster.
|
||||
func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) {
|
||||
watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), state.EventCreateCluster{})
|
||||
watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), api.EventCreateCluster{})
|
||||
defer cancel()
|
||||
|
||||
var clusters []*api.Cluster
|
||||
|
@ -76,7 +76,7 @@ func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err err
|
|||
} else {
|
||||
select {
|
||||
case e := <-watch:
|
||||
cluster = e.(state.EventCreateCluster).Cluster
|
||||
cluster = e.(api.EventCreateCluster).Cluster
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
|
25
vendor/github.com/docker/swarmkit/manager/state/store/apply.go
generated
vendored
25
vendor/github.com/docker/swarmkit/manager/state/store/apply.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
|
||||
"github.com/docker/go-events"
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
)
|
||||
|
||||
|
@ -12,32 +13,32 @@ import (
|
|||
func Apply(store *MemoryStore, item events.Event) (err error) {
|
||||
return store.Update(func(tx Tx) error {
|
||||
switch v := item.(type) {
|
||||
case state.EventCreateTask:
|
||||
case api.EventCreateTask:
|
||||
return CreateTask(tx, v.Task)
|
||||
case state.EventUpdateTask:
|
||||
case api.EventUpdateTask:
|
||||
return UpdateTask(tx, v.Task)
|
||||
case state.EventDeleteTask:
|
||||
case api.EventDeleteTask:
|
||||
return DeleteTask(tx, v.Task.ID)
|
||||
|
||||
case state.EventCreateService:
|
||||
case api.EventCreateService:
|
||||
return CreateService(tx, v.Service)
|
||||
case state.EventUpdateService:
|
||||
case api.EventUpdateService:
|
||||
return UpdateService(tx, v.Service)
|
||||
case state.EventDeleteService:
|
||||
case api.EventDeleteService:
|
||||
return DeleteService(tx, v.Service.ID)
|
||||
|
||||
case state.EventCreateNetwork:
|
||||
case api.EventCreateNetwork:
|
||||
return CreateNetwork(tx, v.Network)
|
||||
case state.EventUpdateNetwork:
|
||||
case api.EventUpdateNetwork:
|
||||
return UpdateNetwork(tx, v.Network)
|
||||
case state.EventDeleteNetwork:
|
||||
case api.EventDeleteNetwork:
|
||||
return DeleteNetwork(tx, v.Network.ID)
|
||||
|
||||
case state.EventCreateNode:
|
||||
case api.EventCreateNode:
|
||||
return CreateNode(tx, v.Node)
|
||||
case state.EventUpdateNode:
|
||||
case api.EventUpdateNode:
|
||||
return UpdateNode(tx, v.Node)
|
||||
case state.EventDeleteNode:
|
||||
case api.EventDeleteNode:
|
||||
return DeleteNode(tx, v.Node.ID)
|
||||
|
||||
case state.EventCommit:
|
||||
|
|
58
vendor/github.com/docker/swarmkit/manager/state/store/by.go
generated
vendored
58
vendor/github.com/docker/swarmkit/manager/state/store/by.go
generated
vendored
|
@ -54,6 +54,16 @@ type byService string
|
|||
func (b byService) isBy() {
|
||||
}
|
||||
|
||||
type byRuntime string
|
||||
|
||||
func (b byRuntime) isBy() {
|
||||
}
|
||||
|
||||
// ByRuntime creates an object to pass to Find to select by runtime.
|
||||
func ByRuntime(runtime string) By {
|
||||
return byRuntime(runtime)
|
||||
}
|
||||
|
||||
// ByServiceID creates an object to pass to Find to select by service.
|
||||
func ByServiceID(serviceID string) By {
|
||||
return byService(serviceID)
|
||||
|
@ -143,3 +153,51 @@ func (b byReferencedSecretID) isBy() {
|
|||
func ByReferencedSecretID(secretID string) By {
|
||||
return byReferencedSecretID(secretID)
|
||||
}
|
||||
|
||||
type byKind string
|
||||
|
||||
func (b byKind) isBy() {
|
||||
}
|
||||
|
||||
// ByKind creates an object to pass to Find to search for a Resource of a
|
||||
// particular kind.
|
||||
func ByKind(kind string) By {
|
||||
return byKind(kind)
|
||||
}
|
||||
|
||||
type byCustom struct {
|
||||
objType string
|
||||
index string
|
||||
value string
|
||||
}
|
||||
|
||||
func (b byCustom) isBy() {
|
||||
}
|
||||
|
||||
// ByCustom creates an object to pass to Find to search a custom index.
|
||||
func ByCustom(objType, index, value string) By {
|
||||
return byCustom{
|
||||
objType: objType,
|
||||
index: index,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
type byCustomPrefix struct {
|
||||
objType string
|
||||
index string
|
||||
value string
|
||||
}
|
||||
|
||||
func (b byCustomPrefix) isBy() {
|
||||
}
|
||||
|
||||
// ByCustomPrefix creates an object to pass to Find to search a custom index by
|
||||
// a value prefix.
|
||||
func ByCustomPrefix(objType, index, value string) By {
|
||||
return byCustomPrefix{
|
||||
objType: objType,
|
||||
index: index,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
|
123
vendor/github.com/docker/swarmkit/manager/state/store/clusters.go
generated
vendored
123
vendor/github.com/docker/swarmkit/manager/state/store/clusters.go
generated
vendored
|
@ -4,7 +4,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
|
@ -18,19 +17,23 @@ const (
|
|||
|
||||
func init() {
|
||||
register(ObjectStoreConfig{
|
||||
Name: tableCluster,
|
||||
Table: &memdb.TableSchema{
|
||||
Name: tableCluster,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
Unique: true,
|
||||
Indexer: clusterIndexerByID{},
|
||||
Indexer: api.ClusterIndexerByID{},
|
||||
},
|
||||
indexName: {
|
||||
Name: indexName,
|
||||
Unique: true,
|
||||
Indexer: clusterIndexerByName{},
|
||||
Indexer: api.ClusterIndexerByName{},
|
||||
},
|
||||
indexCustom: {
|
||||
Name: indexCustom,
|
||||
Indexer: api.ClusterCustomIndexer{},
|
||||
AllowMissing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -56,7 +59,7 @@ func init() {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
|
||||
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
|
||||
switch v := sa.Target.(type) {
|
||||
case *api.StoreAction_Cluster:
|
||||
obj := v.Cluster
|
||||
|
@ -71,64 +74,9 @@ func init() {
|
|||
}
|
||||
return errUnknownStoreAction
|
||||
},
|
||||
NewStoreAction: func(c state.Event) (api.StoreAction, error) {
|
||||
var sa api.StoreAction
|
||||
switch v := c.(type) {
|
||||
case state.EventCreateCluster:
|
||||
sa.Action = api.StoreActionKindCreate
|
||||
sa.Target = &api.StoreAction_Cluster{
|
||||
Cluster: v.Cluster,
|
||||
}
|
||||
case state.EventUpdateCluster:
|
||||
sa.Action = api.StoreActionKindUpdate
|
||||
sa.Target = &api.StoreAction_Cluster{
|
||||
Cluster: v.Cluster,
|
||||
}
|
||||
case state.EventDeleteCluster:
|
||||
sa.Action = api.StoreActionKindRemove
|
||||
sa.Target = &api.StoreAction_Cluster{
|
||||
Cluster: v.Cluster,
|
||||
}
|
||||
default:
|
||||
return api.StoreAction{}, errUnknownStoreAction
|
||||
}
|
||||
return sa, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type clusterEntry struct {
|
||||
*api.Cluster
|
||||
}
|
||||
|
||||
func (c clusterEntry) ID() string {
|
||||
return c.Cluster.ID
|
||||
}
|
||||
|
||||
func (c clusterEntry) Meta() api.Meta {
|
||||
return c.Cluster.Meta
|
||||
}
|
||||
|
||||
func (c clusterEntry) SetMeta(meta api.Meta) {
|
||||
c.Cluster.Meta = meta
|
||||
}
|
||||
|
||||
func (c clusterEntry) Copy() Object {
|
||||
return clusterEntry{c.Cluster.Copy()}
|
||||
}
|
||||
|
||||
func (c clusterEntry) EventCreate() state.Event {
|
||||
return state.EventCreateCluster{Cluster: c.Cluster}
|
||||
}
|
||||
|
||||
func (c clusterEntry) EventUpdate() state.Event {
|
||||
return state.EventUpdateCluster{Cluster: c.Cluster}
|
||||
}
|
||||
|
||||
func (c clusterEntry) EventDelete() state.Event {
|
||||
return state.EventDeleteCluster{Cluster: c.Cluster}
|
||||
}
|
||||
|
||||
// CreateCluster adds a new cluster to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func CreateCluster(tx Tx, c *api.Cluster) error {
|
||||
|
@ -137,7 +85,7 @@ func CreateCluster(tx Tx, c *api.Cluster) error {
|
|||
return ErrNameConflict
|
||||
}
|
||||
|
||||
return tx.create(tableCluster, clusterEntry{c})
|
||||
return tx.create(tableCluster, c)
|
||||
}
|
||||
|
||||
// UpdateCluster updates an existing cluster in the store.
|
||||
|
@ -145,12 +93,12 @@ func CreateCluster(tx Tx, c *api.Cluster) error {
|
|||
func UpdateCluster(tx Tx, c *api.Cluster) error {
|
||||
// Ensure the name is either not in use or already used by this same Cluster.
|
||||
if existing := tx.lookup(tableCluster, indexName, strings.ToLower(c.Spec.Annotations.Name)); existing != nil {
|
||||
if existing.ID() != c.ID {
|
||||
if existing.GetID() != c.ID {
|
||||
return ErrNameConflict
|
||||
}
|
||||
}
|
||||
|
||||
return tx.update(tableCluster, clusterEntry{c})
|
||||
return tx.update(tableCluster, c)
|
||||
}
|
||||
|
||||
// DeleteCluster removes a cluster from the store.
|
||||
|
@ -166,14 +114,14 @@ func GetCluster(tx ReadTx, id string) *api.Cluster {
|
|||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.(clusterEntry).Cluster
|
||||
return n.(*api.Cluster)
|
||||
}
|
||||
|
||||
// FindClusters selects a set of clusters and returns them.
|
||||
func FindClusters(tx ReadTx, by By) ([]*api.Cluster, error) {
|
||||
checkType := func(by By) error {
|
||||
switch by.(type) {
|
||||
case byName, byNamePrefix, byIDPrefix:
|
||||
case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidFindBy
|
||||
|
@ -181,51 +129,10 @@ func FindClusters(tx ReadTx, by By) ([]*api.Cluster, error) {
|
|||
}
|
||||
|
||||
clusterList := []*api.Cluster{}
|
||||
appendResult := func(o Object) {
|
||||
clusterList = append(clusterList, o.(clusterEntry).Cluster)
|
||||
appendResult := func(o api.StoreObject) {
|
||||
clusterList = append(clusterList, o.(*api.Cluster))
|
||||
}
|
||||
|
||||
err := tx.find(tableCluster, by, checkType, appendResult)
|
||||
return clusterList, err
|
||||
}
|
||||
|
||||
type clusterIndexerByID struct{}
|
||||
|
||||
func (ci clusterIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ci clusterIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
c, ok := obj.(clusterEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := c.Cluster.ID + "\x00"
|
||||
return true, []byte(val), nil
|
||||
}
|
||||
|
||||
func (ci clusterIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
type clusterIndexerByName struct{}
|
||||
|
||||
func (ci clusterIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ci clusterIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
c, ok := obj.(clusterEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strings.ToLower(c.Spec.Annotations.Name) + "\x00"), nil
|
||||
}
|
||||
|
||||
func (ci clusterIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
|
150
vendor/github.com/docker/swarmkit/manager/state/store/extensions.go
generated
vendored
Normal file
150
vendor/github.com/docker/swarmkit/manager/state/store/extensions.go
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
const tableExtension = "extension"
|
||||
|
||||
func init() {
|
||||
register(ObjectStoreConfig{
|
||||
Table: &memdb.TableSchema{
|
||||
Name: tableExtension,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
Unique: true,
|
||||
Indexer: api.ExtensionIndexerByID{},
|
||||
},
|
||||
indexName: {
|
||||
Name: indexName,
|
||||
Unique: true,
|
||||
Indexer: api.ExtensionIndexerByName{},
|
||||
},
|
||||
indexCustom: {
|
||||
Name: indexCustom,
|
||||
Indexer: api.ExtensionCustomIndexer{},
|
||||
AllowMissing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
|
||||
var err error
|
||||
snapshot.Extensions, err = FindExtensions(tx, All)
|
||||
return err
|
||||
},
|
||||
Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
|
||||
extensions, err := FindExtensions(tx, All)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, e := range extensions {
|
||||
if err := DeleteExtension(tx, e.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, e := range snapshot.Extensions {
|
||||
if err := CreateExtension(tx, e); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
|
||||
switch v := sa.Target.(type) {
|
||||
case *api.StoreAction_Extension:
|
||||
obj := v.Extension
|
||||
switch sa.Action {
|
||||
case api.StoreActionKindCreate:
|
||||
return CreateExtension(tx, obj)
|
||||
case api.StoreActionKindUpdate:
|
||||
return UpdateExtension(tx, obj)
|
||||
case api.StoreActionKindRemove:
|
||||
return DeleteExtension(tx, obj.ID)
|
||||
}
|
||||
}
|
||||
return errUnknownStoreAction
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type extensionEntry struct {
|
||||
*api.Extension
|
||||
}
|
||||
|
||||
// CreateExtension adds a new extension to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func CreateExtension(tx Tx, e *api.Extension) error {
|
||||
// Ensure the name is not already in use.
|
||||
if tx.lookup(tableExtension, indexName, strings.ToLower(e.Annotations.Name)) != nil {
|
||||
return ErrNameConflict
|
||||
}
|
||||
|
||||
// It can't conflict with built-in kinds either.
|
||||
if _, ok := schema.Tables[e.Annotations.Name]; ok {
|
||||
return ErrNameConflict
|
||||
}
|
||||
|
||||
return tx.create(tableExtension, extensionEntry{e})
|
||||
}
|
||||
|
||||
// UpdateExtension updates an existing extension in the store.
|
||||
// Returns ErrNotExist if the object doesn't exist.
|
||||
func UpdateExtension(tx Tx, e *api.Extension) error {
|
||||
// TODO(aaronl): For the moment, extensions are immutable
|
||||
return errors.New("extensions are immutable")
|
||||
}
|
||||
|
||||
// DeleteExtension removes an extension from the store.
|
||||
// Returns ErrNotExist if the object doesn't exist.
|
||||
func DeleteExtension(tx Tx, id string) error {
|
||||
e := tx.get(tableExtension, id)
|
||||
if e == nil {
|
||||
return ErrNotExist
|
||||
}
|
||||
|
||||
resources, err := FindResources(tx, ByKind(e.(extensionEntry).Annotations.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(resources) != 0 {
|
||||
return errors.New("cannot delete extension because objects of this type exist in the data store")
|
||||
}
|
||||
|
||||
return tx.delete(tableExtension, id)
|
||||
}
|
||||
|
||||
// GetExtension looks up an extension by ID.
|
||||
// Returns nil if the object doesn't exist.
|
||||
func GetExtension(tx ReadTx, id string) *api.Extension {
|
||||
e := tx.get(tableExtension, id)
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.(extensionEntry).Extension
|
||||
}
|
||||
|
||||
// FindExtensions selects a set of extensions and returns them.
|
||||
func FindExtensions(tx ReadTx, by By) ([]*api.Extension, error) {
|
||||
checkType := func(by By) error {
|
||||
switch by.(type) {
|
||||
case byIDPrefix, byName, byCustom, byCustomPrefix:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidFindBy
|
||||
}
|
||||
}
|
||||
|
||||
extensionList := []*api.Extension{}
|
||||
appendResult := func(o api.StoreObject) {
|
||||
extensionList = append(extensionList, o.(extensionEntry).Extension)
|
||||
}
|
||||
|
||||
err := tx.find(tableExtension, by, checkType, appendResult)
|
||||
return extensionList, err
|
||||
}
|
119
vendor/github.com/docker/swarmkit/manager/state/store/memory.go
generated
vendored
119
vendor/github.com/docker/swarmkit/manager/state/store/memory.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
const (
|
||||
indexID = "id"
|
||||
indexName = "name"
|
||||
indexRuntime = "runtime"
|
||||
indexServiceID = "serviceid"
|
||||
indexNodeID = "nodeid"
|
||||
indexSlot = "slot"
|
||||
|
@ -31,6 +32,8 @@ const (
|
|||
indexMembership = "membership"
|
||||
indexNetwork = "network"
|
||||
indexSecret = "secret"
|
||||
indexKind = "kind"
|
||||
indexCustom = "custom"
|
||||
|
||||
prefix = "_prefix"
|
||||
|
||||
|
@ -71,7 +74,7 @@ var (
|
|||
|
||||
func register(os ObjectStoreConfig) {
|
||||
objectStorers = append(objectStorers, os)
|
||||
schema.Tables[os.Name] = os.Table
|
||||
schema.Tables[os.Table.Name] = os.Table
|
||||
}
|
||||
|
||||
// MemoryStore is a concurrency-safe, in-memory implementation of the Store
|
||||
|
@ -140,9 +143,9 @@ func prefixFromArgs(args ...interface{}) ([]byte, error) {
|
|||
// consistent view of the data that cannot be affected by other
|
||||
// transactions.
|
||||
type ReadTx interface {
|
||||
lookup(table, index, id string) Object
|
||||
get(table, id string) Object
|
||||
find(table string, by By, checkType func(By) error, appendResult func(Object)) error
|
||||
lookup(table, index, id string) api.StoreObject
|
||||
get(table, id string) api.StoreObject
|
||||
find(table string, by By, checkType func(By) error, appendResult func(api.StoreObject)) error
|
||||
}
|
||||
|
||||
type readTx struct {
|
||||
|
@ -166,19 +169,19 @@ func (s *MemoryStore) View(cb func(ReadTx)) {
|
|||
// until the transaction is over.
|
||||
type Tx interface {
|
||||
ReadTx
|
||||
create(table string, o Object) error
|
||||
update(table string, o Object) error
|
||||
create(table string, o api.StoreObject) error
|
||||
update(table string, o api.StoreObject) error
|
||||
delete(table, id string) error
|
||||
}
|
||||
|
||||
type tx struct {
|
||||
readTx
|
||||
curVersion *api.Version
|
||||
changelist []state.Event
|
||||
changelist []api.Event
|
||||
}
|
||||
|
||||
// ApplyStoreActions updates a store based on StoreAction messages.
|
||||
func (s *MemoryStore) ApplyStoreActions(actions []*api.StoreAction) error {
|
||||
func (s *MemoryStore) ApplyStoreActions(actions []api.StoreAction) error {
|
||||
s.updateLock.Lock()
|
||||
memDBTx := s.memDB.Txn(true)
|
||||
|
||||
|
@ -208,7 +211,7 @@ func (s *MemoryStore) ApplyStoreActions(actions []*api.StoreAction) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func applyStoreAction(tx Tx, sa *api.StoreAction) error {
|
||||
func applyStoreAction(tx Tx, sa api.StoreAction) error {
|
||||
for _, os := range objectStorers {
|
||||
err := os.ApplyStoreAction(tx, sa)
|
||||
if err != errUnknownStoreAction {
|
||||
|
@ -238,7 +241,7 @@ func (s *MemoryStore) update(proposer state.Proposer, cb func(Tx) error) error {
|
|||
if proposer == nil {
|
||||
memDBTx.Commit()
|
||||
} else {
|
||||
var sa []*api.StoreAction
|
||||
var sa []api.StoreAction
|
||||
sa, err = tx.changelistStoreActions()
|
||||
|
||||
if err == nil {
|
||||
|
@ -310,7 +313,7 @@ func (batch *Batch) Update(cb func(Tx) error) error {
|
|||
batch.applied++
|
||||
|
||||
for batch.changelistLen < len(batch.tx.changelist) {
|
||||
sa, err := newStoreAction(batch.tx.changelist[batch.changelistLen])
|
||||
sa, err := api.NewStoreAction(batch.tx.changelist[batch.changelistLen])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -348,7 +351,7 @@ func (batch *Batch) newTx() {
|
|||
|
||||
func (batch *Batch) commit() error {
|
||||
if batch.store.proposer != nil {
|
||||
var sa []*api.StoreAction
|
||||
var sa []api.StoreAction
|
||||
sa, batch.err = batch.tx.changelistStoreActions()
|
||||
|
||||
if batch.err == nil {
|
||||
|
@ -423,24 +426,11 @@ func (tx *tx) init(memDBTx *memdb.Txn, curVersion *api.Version) {
|
|||
tx.changelist = nil
|
||||
}
|
||||
|
||||
func newStoreAction(c state.Event) (*api.StoreAction, error) {
|
||||
for _, os := range objectStorers {
|
||||
sa, err := os.NewStoreAction(c)
|
||||
if err == nil {
|
||||
return &sa, nil
|
||||
} else if err != errUnknownStoreAction {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("unrecognized event type")
|
||||
}
|
||||
|
||||
func (tx tx) changelistStoreActions() ([]*api.StoreAction, error) {
|
||||
var actions []*api.StoreAction
|
||||
func (tx tx) changelistStoreActions() ([]api.StoreAction, error) {
|
||||
var actions []api.StoreAction
|
||||
|
||||
for _, c := range tx.changelist {
|
||||
sa, err := newStoreAction(c)
|
||||
sa, err := api.NewStoreAction(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -451,26 +441,26 @@ func (tx tx) changelistStoreActions() ([]*api.StoreAction, error) {
|
|||
}
|
||||
|
||||
// lookup is an internal typed wrapper around memdb.
|
||||
func (tx readTx) lookup(table, index, id string) Object {
|
||||
func (tx readTx) lookup(table, index, id string) api.StoreObject {
|
||||
j, err := tx.memDBTx.First(table, index, id)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if j != nil {
|
||||
return j.(Object)
|
||||
return j.(api.StoreObject)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create adds a new object to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func (tx *tx) create(table string, o Object) error {
|
||||
if tx.lookup(table, indexID, o.ID()) != nil {
|
||||
func (tx *tx) create(table string, o api.StoreObject) error {
|
||||
if tx.lookup(table, indexID, o.GetID()) != nil {
|
||||
return ErrExist
|
||||
}
|
||||
|
||||
copy := o.Copy()
|
||||
meta := copy.Meta()
|
||||
copy := o.CopyStoreObject()
|
||||
meta := copy.GetMeta()
|
||||
if err := touchMeta(&meta, tx.curVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -486,20 +476,21 @@ func (tx *tx) create(table string, o Object) error {
|
|||
|
||||
// Update updates an existing object in the store.
|
||||
// Returns ErrNotExist if the object doesn't exist.
|
||||
func (tx *tx) update(table string, o Object) error {
|
||||
oldN := tx.lookup(table, indexID, o.ID())
|
||||
func (tx *tx) update(table string, o api.StoreObject) error {
|
||||
oldN := tx.lookup(table, indexID, o.GetID())
|
||||
if oldN == nil {
|
||||
return ErrNotExist
|
||||
}
|
||||
|
||||
meta := o.GetMeta()
|
||||
|
||||
if tx.curVersion != nil {
|
||||
if oldN.(Object).Meta().Version != o.Meta().Version {
|
||||
if oldN.GetMeta().Version != meta.Version {
|
||||
return ErrSequenceConflict
|
||||
}
|
||||
}
|
||||
|
||||
copy := o.Copy()
|
||||
meta := copy.Meta()
|
||||
copy := o.CopyStoreObject()
|
||||
if err := touchMeta(&meta, tx.curVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -530,12 +521,12 @@ func (tx *tx) delete(table, id string) error {
|
|||
|
||||
// Get looks up an object by ID.
|
||||
// Returns nil if the object doesn't exist.
|
||||
func (tx readTx) get(table, id string) Object {
|
||||
func (tx readTx) get(table, id string) api.StoreObject {
|
||||
o := tx.lookup(table, indexID, id)
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
return o.Copy()
|
||||
return o.CopyStoreObject()
|
||||
}
|
||||
|
||||
// findIterators returns a slice of iterators. The union of items from these
|
||||
|
@ -584,6 +575,12 @@ func (tx readTx) findIterators(table string, by By, checkType func(By) error) ([
|
|||
return nil, err
|
||||
}
|
||||
return []memdb.ResultIterator{it}, nil
|
||||
case byRuntime:
|
||||
it, err := tx.memDBTx.Get(table, indexRuntime, string(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []memdb.ResultIterator{it}, nil
|
||||
case byNode:
|
||||
it, err := tx.memDBTx.Get(table, indexNodeID, string(v))
|
||||
if err != nil {
|
||||
|
@ -638,13 +635,43 @@ func (tx readTx) findIterators(table string, by By, checkType func(By) error) ([
|
|||
return nil, err
|
||||
}
|
||||
return []memdb.ResultIterator{it}, nil
|
||||
case byKind:
|
||||
it, err := tx.memDBTx.Get(table, indexKind, string(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []memdb.ResultIterator{it}, nil
|
||||
case byCustom:
|
||||
var key string
|
||||
if v.objType != "" {
|
||||
key = v.objType + "|" + v.index + "|" + v.value
|
||||
} else {
|
||||
key = v.index + "|" + v.value
|
||||
}
|
||||
it, err := tx.memDBTx.Get(table, indexCustom, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []memdb.ResultIterator{it}, nil
|
||||
case byCustomPrefix:
|
||||
var key string
|
||||
if v.objType != "" {
|
||||
key = v.objType + "|" + v.index + "|" + v.value
|
||||
} else {
|
||||
key = v.index + "|" + v.value
|
||||
}
|
||||
it, err := tx.memDBTx.Get(table, indexCustom+prefix, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []memdb.ResultIterator{it}, nil
|
||||
default:
|
||||
return nil, ErrInvalidFindBy
|
||||
}
|
||||
}
|
||||
|
||||
// find selects a set of objects calls a callback for each matching object.
|
||||
func (tx readTx) find(table string, by By, checkType func(By) error, appendResult func(Object)) error {
|
||||
func (tx readTx) find(table string, by By, checkType func(By) error, appendResult func(api.StoreObject)) error {
|
||||
fromResultIterators := func(its ...memdb.ResultIterator) {
|
||||
ids := make(map[string]struct{})
|
||||
for _, it := range its {
|
||||
|
@ -653,10 +680,10 @@ func (tx readTx) find(table string, by By, checkType func(By) error, appendResul
|
|||
if obj == nil {
|
||||
break
|
||||
}
|
||||
o := obj.(Object)
|
||||
id := o.ID()
|
||||
o := obj.(api.StoreObject)
|
||||
id := o.GetID()
|
||||
if _, exists := ids[id]; !exists {
|
||||
appendResult(o.Copy())
|
||||
appendResult(o.CopyStoreObject())
|
||||
ids[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -709,7 +736,7 @@ func (s *MemoryStore) WatchQueue() *watch.Queue {
|
|||
// released with watch.StopWatch when it is no longer needed. The channel is
|
||||
// guaranteed to get all events after the moment of the snapshot, and only
|
||||
// those events.
|
||||
func ViewAndWatch(store *MemoryStore, cb func(ReadTx) error, specifiers ...state.Event) (watch chan events.Event, cancel func(), err error) {
|
||||
func ViewAndWatch(store *MemoryStore, cb func(ReadTx) error, specifiers ...api.Event) (watch chan events.Event, cancel func(), err error) {
|
||||
// Using Update to lock the store and guarantee consistency between
|
||||
// the watcher and the the state seen by the callback. snapshotReadTx
|
||||
// exposes this Tx as a ReadTx so the callback can't modify it.
|
||||
|
|
123
vendor/github.com/docker/swarmkit/manager/state/store/networks.go
generated
vendored
123
vendor/github.com/docker/swarmkit/manager/state/store/networks.go
generated
vendored
|
@ -4,7 +4,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
|
@ -12,19 +11,23 @@ const tableNetwork = "network"
|
|||
|
||||
func init() {
|
||||
register(ObjectStoreConfig{
|
||||
Name: tableNetwork,
|
||||
Table: &memdb.TableSchema{
|
||||
Name: tableNetwork,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
Unique: true,
|
||||
Indexer: networkIndexerByID{},
|
||||
Indexer: api.NetworkIndexerByID{},
|
||||
},
|
||||
indexName: {
|
||||
Name: indexName,
|
||||
Unique: true,
|
||||
Indexer: networkIndexerByName{},
|
||||
Indexer: api.NetworkIndexerByName{},
|
||||
},
|
||||
indexCustom: {
|
||||
Name: indexCustom,
|
||||
Indexer: api.NetworkCustomIndexer{},
|
||||
AllowMissing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -50,7 +53,7 @@ func init() {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
|
||||
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
|
||||
switch v := sa.Target.(type) {
|
||||
case *api.StoreAction_Network:
|
||||
obj := v.Network
|
||||
|
@ -65,64 +68,9 @@ func init() {
|
|||
}
|
||||
return errUnknownStoreAction
|
||||
},
|
||||
NewStoreAction: func(c state.Event) (api.StoreAction, error) {
|
||||
var sa api.StoreAction
|
||||
switch v := c.(type) {
|
||||
case state.EventCreateNetwork:
|
||||
sa.Action = api.StoreActionKindCreate
|
||||
sa.Target = &api.StoreAction_Network{
|
||||
Network: v.Network,
|
||||
}
|
||||
case state.EventUpdateNetwork:
|
||||
sa.Action = api.StoreActionKindUpdate
|
||||
sa.Target = &api.StoreAction_Network{
|
||||
Network: v.Network,
|
||||
}
|
||||
case state.EventDeleteNetwork:
|
||||
sa.Action = api.StoreActionKindRemove
|
||||
sa.Target = &api.StoreAction_Network{
|
||||
Network: v.Network,
|
||||
}
|
||||
default:
|
||||
return api.StoreAction{}, errUnknownStoreAction
|
||||
}
|
||||
return sa, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type networkEntry struct {
|
||||
*api.Network
|
||||
}
|
||||
|
||||
func (n networkEntry) ID() string {
|
||||
return n.Network.ID
|
||||
}
|
||||
|
||||
func (n networkEntry) Meta() api.Meta {
|
||||
return n.Network.Meta
|
||||
}
|
||||
|
||||
func (n networkEntry) SetMeta(meta api.Meta) {
|
||||
n.Network.Meta = meta
|
||||
}
|
||||
|
||||
func (n networkEntry) Copy() Object {
|
||||
return networkEntry{n.Network.Copy()}
|
||||
}
|
||||
|
||||
func (n networkEntry) EventCreate() state.Event {
|
||||
return state.EventCreateNetwork{Network: n.Network}
|
||||
}
|
||||
|
||||
func (n networkEntry) EventUpdate() state.Event {
|
||||
return state.EventUpdateNetwork{Network: n.Network}
|
||||
}
|
||||
|
||||
func (n networkEntry) EventDelete() state.Event {
|
||||
return state.EventDeleteNetwork{Network: n.Network}
|
||||
}
|
||||
|
||||
// CreateNetwork adds a new network to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func CreateNetwork(tx Tx, n *api.Network) error {
|
||||
|
@ -131,7 +79,7 @@ func CreateNetwork(tx Tx, n *api.Network) error {
|
|||
return ErrNameConflict
|
||||
}
|
||||
|
||||
return tx.create(tableNetwork, networkEntry{n})
|
||||
return tx.create(tableNetwork, n)
|
||||
}
|
||||
|
||||
// UpdateNetwork updates an existing network in the store.
|
||||
|
@ -139,12 +87,12 @@ func CreateNetwork(tx Tx, n *api.Network) error {
|
|||
func UpdateNetwork(tx Tx, n *api.Network) error {
|
||||
// Ensure the name is either not in use or already used by this same Network.
|
||||
if existing := tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)); existing != nil {
|
||||
if existing.ID() != n.ID {
|
||||
if existing.GetID() != n.ID {
|
||||
return ErrNameConflict
|
||||
}
|
||||
}
|
||||
|
||||
return tx.update(tableNetwork, networkEntry{n})
|
||||
return tx.update(tableNetwork, n)
|
||||
}
|
||||
|
||||
// DeleteNetwork removes a network from the store.
|
||||
|
@ -160,14 +108,14 @@ func GetNetwork(tx ReadTx, id string) *api.Network {
|
|||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.(networkEntry).Network
|
||||
return n.(*api.Network)
|
||||
}
|
||||
|
||||
// FindNetworks selects a set of networks and returns them.
|
||||
func FindNetworks(tx ReadTx, by By) ([]*api.Network, error) {
|
||||
checkType := func(by By) error {
|
||||
switch by.(type) {
|
||||
case byName, byNamePrefix, byIDPrefix:
|
||||
case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidFindBy
|
||||
|
@ -175,51 +123,10 @@ func FindNetworks(tx ReadTx, by By) ([]*api.Network, error) {
|
|||
}
|
||||
|
||||
networkList := []*api.Network{}
|
||||
appendResult := func(o Object) {
|
||||
networkList = append(networkList, o.(networkEntry).Network)
|
||||
appendResult := func(o api.StoreObject) {
|
||||
networkList = append(networkList, o.(*api.Network))
|
||||
}
|
||||
|
||||
err := tx.find(tableNetwork, by, checkType, appendResult)
|
||||
return networkList, err
|
||||
}
|
||||
|
||||
type networkIndexerByID struct{}
|
||||
|
||||
func (ni networkIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ni networkIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
n, ok := obj.(networkEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := n.Network.ID + "\x00"
|
||||
return true, []byte(val), nil
|
||||
}
|
||||
|
||||
func (ni networkIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
type networkIndexerByName struct{}
|
||||
|
||||
func (ni networkIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ni networkIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
n, ok := obj.(networkEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strings.ToLower(n.Spec.Annotations.Name) + "\x00"), nil
|
||||
}
|
||||
|
||||
func (ni networkIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
|
114
vendor/github.com/docker/swarmkit/manager/state/store/nodes.go
generated
vendored
114
vendor/github.com/docker/swarmkit/manager/state/store/nodes.go
generated
vendored
|
@ -5,7 +5,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
|
@ -13,14 +12,13 @@ const tableNode = "node"
|
|||
|
||||
func init() {
|
||||
register(ObjectStoreConfig{
|
||||
Name: tableNode,
|
||||
Table: &memdb.TableSchema{
|
||||
Name: tableNode,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
Unique: true,
|
||||
Indexer: nodeIndexerByID{},
|
||||
Indexer: api.NodeIndexerByID{},
|
||||
},
|
||||
// TODO(aluzzardi): Use `indexHostname` instead.
|
||||
indexName: {
|
||||
|
@ -36,6 +34,11 @@ func init() {
|
|||
Name: indexMembership,
|
||||
Indexer: nodeIndexerByMembership{},
|
||||
},
|
||||
indexCustom: {
|
||||
Name: indexCustom,
|
||||
Indexer: api.NodeCustomIndexer{},
|
||||
AllowMissing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
|
||||
|
@ -60,7 +63,7 @@ func init() {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
|
||||
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
|
||||
switch v := sa.Target.(type) {
|
||||
case *api.StoreAction_Node:
|
||||
obj := v.Node
|
||||
|
@ -75,74 +78,19 @@ func init() {
|
|||
}
|
||||
return errUnknownStoreAction
|
||||
},
|
||||
NewStoreAction: func(c state.Event) (api.StoreAction, error) {
|
||||
var sa api.StoreAction
|
||||
switch v := c.(type) {
|
||||
case state.EventCreateNode:
|
||||
sa.Action = api.StoreActionKindCreate
|
||||
sa.Target = &api.StoreAction_Node{
|
||||
Node: v.Node,
|
||||
}
|
||||
case state.EventUpdateNode:
|
||||
sa.Action = api.StoreActionKindUpdate
|
||||
sa.Target = &api.StoreAction_Node{
|
||||
Node: v.Node,
|
||||
}
|
||||
case state.EventDeleteNode:
|
||||
sa.Action = api.StoreActionKindRemove
|
||||
sa.Target = &api.StoreAction_Node{
|
||||
Node: v.Node,
|
||||
}
|
||||
default:
|
||||
return api.StoreAction{}, errUnknownStoreAction
|
||||
}
|
||||
return sa, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type nodeEntry struct {
|
||||
*api.Node
|
||||
}
|
||||
|
||||
func (n nodeEntry) ID() string {
|
||||
return n.Node.ID
|
||||
}
|
||||
|
||||
func (n nodeEntry) Meta() api.Meta {
|
||||
return n.Node.Meta
|
||||
}
|
||||
|
||||
func (n nodeEntry) SetMeta(meta api.Meta) {
|
||||
n.Node.Meta = meta
|
||||
}
|
||||
|
||||
func (n nodeEntry) Copy() Object {
|
||||
return nodeEntry{n.Node.Copy()}
|
||||
}
|
||||
|
||||
func (n nodeEntry) EventCreate() state.Event {
|
||||
return state.EventCreateNode{Node: n.Node}
|
||||
}
|
||||
|
||||
func (n nodeEntry) EventUpdate() state.Event {
|
||||
return state.EventUpdateNode{Node: n.Node}
|
||||
}
|
||||
|
||||
func (n nodeEntry) EventDelete() state.Event {
|
||||
return state.EventDeleteNode{Node: n.Node}
|
||||
}
|
||||
|
||||
// CreateNode adds a new node to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func CreateNode(tx Tx, n *api.Node) error {
|
||||
return tx.create(tableNode, nodeEntry{n})
|
||||
return tx.create(tableNode, n)
|
||||
}
|
||||
|
||||
// UpdateNode updates an existing node in the store.
|
||||
// Returns ErrNotExist if the node doesn't exist.
|
||||
func UpdateNode(tx Tx, n *api.Node) error {
|
||||
return tx.update(tableNode, nodeEntry{n})
|
||||
return tx.update(tableNode, n)
|
||||
}
|
||||
|
||||
// DeleteNode removes a node from the store.
|
||||
|
@ -158,14 +106,14 @@ func GetNode(tx ReadTx, id string) *api.Node {
|
|||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.(nodeEntry).Node
|
||||
return n.(*api.Node)
|
||||
}
|
||||
|
||||
// FindNodes selects a set of nodes and returns them.
|
||||
func FindNodes(tx ReadTx, by By) ([]*api.Node, error) {
|
||||
checkType := func(by By) error {
|
||||
switch by.(type) {
|
||||
case byName, byNamePrefix, byIDPrefix, byRole, byMembership:
|
||||
case byName, byNamePrefix, byIDPrefix, byRole, byMembership, byCustom, byCustomPrefix:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidFindBy
|
||||
|
@ -173,35 +121,14 @@ func FindNodes(tx ReadTx, by By) ([]*api.Node, error) {
|
|||
}
|
||||
|
||||
nodeList := []*api.Node{}
|
||||
appendResult := func(o Object) {
|
||||
nodeList = append(nodeList, o.(nodeEntry).Node)
|
||||
appendResult := func(o api.StoreObject) {
|
||||
nodeList = append(nodeList, o.(*api.Node))
|
||||
}
|
||||
|
||||
err := tx.find(tableNode, by, checkType, appendResult)
|
||||
return nodeList, err
|
||||
}
|
||||
|
||||
type nodeIndexerByID struct{}
|
||||
|
||||
func (ni nodeIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ni nodeIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
n, ok := obj.(nodeEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := n.Node.ID + "\x00"
|
||||
return true, []byte(val), nil
|
||||
}
|
||||
|
||||
func (ni nodeIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
type nodeIndexerByHostname struct{}
|
||||
|
||||
func (ni nodeIndexerByHostname) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
|
@ -209,10 +136,7 @@ func (ni nodeIndexerByHostname) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ni nodeIndexerByHostname) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
n, ok := obj.(nodeEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
n := obj.(*api.Node)
|
||||
|
||||
if n.Description == nil {
|
||||
return false, nil, nil
|
||||
|
@ -232,10 +156,7 @@ func (ni nodeIndexerByRole) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ni nodeIndexerByRole) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
n, ok := obj.(nodeEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
n := obj.(*api.Node)
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strconv.FormatInt(int64(n.Role), 10) + "\x00"), nil
|
||||
|
@ -248,10 +169,7 @@ func (ni nodeIndexerByMembership) FromArgs(args ...interface{}) ([]byte, error)
|
|||
}
|
||||
|
||||
func (ni nodeIndexerByMembership) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
n, ok := obj.(nodeEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
n := obj.(*api.Node)
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strconv.FormatInt(int64(n.Spec.Membership), 10) + "\x00"), nil
|
||||
|
|
16
vendor/github.com/docker/swarmkit/manager/state/store/object.go
generated
vendored
16
vendor/github.com/docker/swarmkit/manager/state/store/object.go
generated
vendored
|
@ -2,28 +2,14 @@ package store
|
|||
|
||||
import (
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
// Object is a generic object that can be handled by the store.
|
||||
type Object interface {
|
||||
ID() string // Get ID
|
||||
Meta() api.Meta // Retrieve metadata
|
||||
SetMeta(api.Meta) // Set metadata
|
||||
Copy() Object // Return a copy of this object
|
||||
EventCreate() state.Event // Return a creation event
|
||||
EventUpdate() state.Event // Return an update event
|
||||
EventDelete() state.Event // Return a deletion event
|
||||
}
|
||||
|
||||
// ObjectStoreConfig provides the necessary methods to store a particular object
|
||||
// type inside MemoryStore.
|
||||
type ObjectStoreConfig struct {
|
||||
Name string
|
||||
Table *memdb.TableSchema
|
||||
Save func(ReadTx, *api.StoreSnapshot) error
|
||||
Restore func(Tx, *api.StoreSnapshot) error
|
||||
ApplyStoreAction func(Tx, *api.StoreAction) error
|
||||
NewStoreAction func(state.Event) (api.StoreAction, error)
|
||||
ApplyStoreAction func(Tx, api.StoreAction) error
|
||||
}
|
||||
|
|
159
vendor/github.com/docker/swarmkit/manager/state/store/resources.go
generated
vendored
Normal file
159
vendor/github.com/docker/swarmkit/manager/state/store/resources.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"github.com/docker/swarmkit/api"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const tableResource = "resource"
|
||||
|
||||
func init() {
|
||||
register(ObjectStoreConfig{
|
||||
Table: &memdb.TableSchema{
|
||||
Name: tableResource,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
Unique: true,
|
||||
Indexer: api.ResourceIndexerByID{},
|
||||
},
|
||||
indexName: {
|
||||
Name: indexName,
|
||||
Unique: true,
|
||||
Indexer: api.ResourceIndexerByName{},
|
||||
},
|
||||
indexKind: {
|
||||
Name: indexKind,
|
||||
Indexer: resourceIndexerByKind{},
|
||||
},
|
||||
indexCustom: {
|
||||
Name: indexCustom,
|
||||
Indexer: api.ResourceCustomIndexer{},
|
||||
AllowMissing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
|
||||
var err error
|
||||
snapshot.Resources, err = FindResources(tx, All)
|
||||
return err
|
||||
},
|
||||
Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
|
||||
resources, err := FindResources(tx, All)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, r := range resources {
|
||||
if err := DeleteResource(tx, r.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, r := range snapshot.Resources {
|
||||
if err := CreateResource(tx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
|
||||
switch v := sa.Target.(type) {
|
||||
case *api.StoreAction_Resource:
|
||||
obj := v.Resource
|
||||
switch sa.Action {
|
||||
case api.StoreActionKindCreate:
|
||||
return CreateResource(tx, obj)
|
||||
case api.StoreActionKindUpdate:
|
||||
return UpdateResource(tx, obj)
|
||||
case api.StoreActionKindRemove:
|
||||
return DeleteResource(tx, obj.ID)
|
||||
}
|
||||
}
|
||||
return errUnknownStoreAction
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type resourceEntry struct {
|
||||
*api.Resource
|
||||
}
|
||||
|
||||
func confirmExtension(tx Tx, r *api.Resource) error {
|
||||
// There must be an extension corresponding to the Kind field.
|
||||
extensions, err := FindExtensions(tx, ByName(r.Kind))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to query extensions")
|
||||
}
|
||||
if len(extensions) == 0 {
|
||||
return errors.Errorf("object kind %s is unregistered", r.Kind)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateResource adds a new resource object to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func CreateResource(tx Tx, r *api.Resource) error {
|
||||
if err := confirmExtension(tx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.create(tableResource, resourceEntry{r})
|
||||
}
|
||||
|
||||
// UpdateResource updates an existing resource object in the store.
|
||||
// Returns ErrNotExist if the object doesn't exist.
|
||||
func UpdateResource(tx Tx, r *api.Resource) error {
|
||||
if err := confirmExtension(tx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.update(tableResource, resourceEntry{r})
|
||||
}
|
||||
|
||||
// DeleteResource removes a resource object from the store.
|
||||
// Returns ErrNotExist if the object doesn't exist.
|
||||
func DeleteResource(tx Tx, id string) error {
|
||||
return tx.delete(tableResource, id)
|
||||
}
|
||||
|
||||
// GetResource looks up a resource object by ID.
|
||||
// Returns nil if the object doesn't exist.
|
||||
func GetResource(tx ReadTx, id string) *api.Resource {
|
||||
r := tx.get(tableResource, id)
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return r.(resourceEntry).Resource
|
||||
}
|
||||
|
||||
// FindResources selects a set of resource objects and returns them.
|
||||
func FindResources(tx ReadTx, by By) ([]*api.Resource, error) {
|
||||
checkType := func(by By) error {
|
||||
switch by.(type) {
|
||||
case byIDPrefix, byName, byKind, byCustom, byCustomPrefix:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidFindBy
|
||||
}
|
||||
}
|
||||
|
||||
resourceList := []*api.Resource{}
|
||||
appendResult := func(o api.StoreObject) {
|
||||
resourceList = append(resourceList, o.(resourceEntry).Resource)
|
||||
}
|
||||
|
||||
err := tx.find(tableResource, by, checkType, appendResult)
|
||||
return resourceList, err
|
||||
}
|
||||
|
||||
type resourceIndexerByKind struct{}
|
||||
|
||||
func (ri resourceIndexerByKind) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ri resourceIndexerByKind) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
r := obj.(resourceEntry)
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := r.Resource.Kind + "\x00"
|
||||
return true, []byte(val), nil
|
||||
}
|
123
vendor/github.com/docker/swarmkit/manager/state/store/secrets.go
generated
vendored
123
vendor/github.com/docker/swarmkit/manager/state/store/secrets.go
generated
vendored
|
@ -4,7 +4,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
|
@ -12,19 +11,23 @@ const tableSecret = "secret"
|
|||
|
||||
func init() {
|
||||
register(ObjectStoreConfig{
|
||||
Name: tableSecret,
|
||||
Table: &memdb.TableSchema{
|
||||
Name: tableSecret,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
Unique: true,
|
||||
Indexer: secretIndexerByID{},
|
||||
Indexer: api.SecretIndexerByID{},
|
||||
},
|
||||
indexName: {
|
||||
Name: indexName,
|
||||
Unique: true,
|
||||
Indexer: secretIndexerByName{},
|
||||
Indexer: api.SecretIndexerByName{},
|
||||
},
|
||||
indexCustom: {
|
||||
Name: indexCustom,
|
||||
Indexer: api.SecretCustomIndexer{},
|
||||
AllowMissing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -50,7 +53,7 @@ func init() {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
|
||||
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
|
||||
switch v := sa.Target.(type) {
|
||||
case *api.StoreAction_Secret:
|
||||
obj := v.Secret
|
||||
|
@ -65,64 +68,9 @@ func init() {
|
|||
}
|
||||
return errUnknownStoreAction
|
||||
},
|
||||
NewStoreAction: func(c state.Event) (api.StoreAction, error) {
|
||||
var sa api.StoreAction
|
||||
switch v := c.(type) {
|
||||
case state.EventCreateSecret:
|
||||
sa.Action = api.StoreActionKindCreate
|
||||
sa.Target = &api.StoreAction_Secret{
|
||||
Secret: v.Secret,
|
||||
}
|
||||
case state.EventUpdateSecret:
|
||||
sa.Action = api.StoreActionKindUpdate
|
||||
sa.Target = &api.StoreAction_Secret{
|
||||
Secret: v.Secret,
|
||||
}
|
||||
case state.EventDeleteSecret:
|
||||
sa.Action = api.StoreActionKindRemove
|
||||
sa.Target = &api.StoreAction_Secret{
|
||||
Secret: v.Secret,
|
||||
}
|
||||
default:
|
||||
return api.StoreAction{}, errUnknownStoreAction
|
||||
}
|
||||
return sa, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type secretEntry struct {
|
||||
*api.Secret
|
||||
}
|
||||
|
||||
func (s secretEntry) ID() string {
|
||||
return s.Secret.ID
|
||||
}
|
||||
|
||||
func (s secretEntry) Meta() api.Meta {
|
||||
return s.Secret.Meta
|
||||
}
|
||||
|
||||
func (s secretEntry) SetMeta(meta api.Meta) {
|
||||
s.Secret.Meta = meta
|
||||
}
|
||||
|
||||
func (s secretEntry) Copy() Object {
|
||||
return secretEntry{s.Secret.Copy()}
|
||||
}
|
||||
|
||||
func (s secretEntry) EventCreate() state.Event {
|
||||
return state.EventCreateSecret{Secret: s.Secret}
|
||||
}
|
||||
|
||||
func (s secretEntry) EventUpdate() state.Event {
|
||||
return state.EventUpdateSecret{Secret: s.Secret}
|
||||
}
|
||||
|
||||
func (s secretEntry) EventDelete() state.Event {
|
||||
return state.EventDeleteSecret{Secret: s.Secret}
|
||||
}
|
||||
|
||||
// CreateSecret adds a new secret to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func CreateSecret(tx Tx, s *api.Secret) error {
|
||||
|
@ -131,7 +79,7 @@ func CreateSecret(tx Tx, s *api.Secret) error {
|
|||
return ErrNameConflict
|
||||
}
|
||||
|
||||
return tx.create(tableSecret, secretEntry{s})
|
||||
return tx.create(tableSecret, s)
|
||||
}
|
||||
|
||||
// UpdateSecret updates an existing secret in the store.
|
||||
|
@ -139,12 +87,12 @@ func CreateSecret(tx Tx, s *api.Secret) error {
|
|||
func UpdateSecret(tx Tx, s *api.Secret) error {
|
||||
// Ensure the name is either not in use or already used by this same Secret.
|
||||
if existing := tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil {
|
||||
if existing.ID() != s.ID {
|
||||
if existing.GetID() != s.ID {
|
||||
return ErrNameConflict
|
||||
}
|
||||
}
|
||||
|
||||
return tx.update(tableSecret, secretEntry{s})
|
||||
return tx.update(tableSecret, s)
|
||||
}
|
||||
|
||||
// DeleteSecret removes a secret from the store.
|
||||
|
@ -160,14 +108,14 @@ func GetSecret(tx ReadTx, id string) *api.Secret {
|
|||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.(secretEntry).Secret
|
||||
return n.(*api.Secret)
|
||||
}
|
||||
|
||||
// FindSecrets selects a set of secrets and returns them.
|
||||
func FindSecrets(tx ReadTx, by By) ([]*api.Secret, error) {
|
||||
checkType := func(by By) error {
|
||||
switch by.(type) {
|
||||
case byName, byNamePrefix, byIDPrefix:
|
||||
case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidFindBy
|
||||
|
@ -175,51 +123,10 @@ func FindSecrets(tx ReadTx, by By) ([]*api.Secret, error) {
|
|||
}
|
||||
|
||||
secretList := []*api.Secret{}
|
||||
appendResult := func(o Object) {
|
||||
secretList = append(secretList, o.(secretEntry).Secret)
|
||||
appendResult := func(o api.StoreObject) {
|
||||
secretList = append(secretList, o.(*api.Secret))
|
||||
}
|
||||
|
||||
err := tx.find(tableSecret, by, checkType, appendResult)
|
||||
return secretList, err
|
||||
}
|
||||
|
||||
type secretIndexerByID struct{}
|
||||
|
||||
func (ci secretIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ci secretIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
s, ok := obj.(secretEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := s.Secret.ID + "\x00"
|
||||
return true, []byte(val), nil
|
||||
}
|
||||
|
||||
func (ci secretIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
type secretIndexerByName struct{}
|
||||
|
||||
func (ci secretIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ci secretIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
s, ok := obj.(secretEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strings.ToLower(s.Spec.Annotations.Name) + "\x00"), nil
|
||||
}
|
||||
|
||||
func (ci secretIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
|
138
vendor/github.com/docker/swarmkit/manager/state/store/services.go
generated
vendored
138
vendor/github.com/docker/swarmkit/manager/state/store/services.go
generated
vendored
|
@ -4,7 +4,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
"github.com/docker/swarmkit/api/naming"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
|
@ -12,19 +12,23 @@ const tableService = "service"
|
|||
|
||||
func init() {
|
||||
register(ObjectStoreConfig{
|
||||
Name: tableService,
|
||||
Table: &memdb.TableSchema{
|
||||
Name: tableService,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
Unique: true,
|
||||
Indexer: serviceIndexerByID{},
|
||||
Indexer: api.ServiceIndexerByID{},
|
||||
},
|
||||
indexName: {
|
||||
Name: indexName,
|
||||
Unique: true,
|
||||
Indexer: serviceIndexerByName{},
|
||||
Indexer: api.ServiceIndexerByName{},
|
||||
},
|
||||
indexRuntime: {
|
||||
Name: indexRuntime,
|
||||
AllowMissing: true,
|
||||
Indexer: serviceIndexerByRuntime{},
|
||||
},
|
||||
indexNetwork: {
|
||||
Name: indexNetwork,
|
||||
|
@ -36,6 +40,11 @@ func init() {
|
|||
AllowMissing: true,
|
||||
Indexer: serviceIndexerBySecret{},
|
||||
},
|
||||
indexCustom: {
|
||||
Name: indexCustom,
|
||||
Indexer: api.ServiceCustomIndexer{},
|
||||
AllowMissing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
|
||||
|
@ -60,7 +69,7 @@ func init() {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
|
||||
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
|
||||
switch v := sa.Target.(type) {
|
||||
case *api.StoreAction_Service:
|
||||
obj := v.Service
|
||||
|
@ -75,64 +84,9 @@ func init() {
|
|||
}
|
||||
return errUnknownStoreAction
|
||||
},
|
||||
NewStoreAction: func(c state.Event) (api.StoreAction, error) {
|
||||
var sa api.StoreAction
|
||||
switch v := c.(type) {
|
||||
case state.EventCreateService:
|
||||
sa.Action = api.StoreActionKindCreate
|
||||
sa.Target = &api.StoreAction_Service{
|
||||
Service: v.Service,
|
||||
}
|
||||
case state.EventUpdateService:
|
||||
sa.Action = api.StoreActionKindUpdate
|
||||
sa.Target = &api.StoreAction_Service{
|
||||
Service: v.Service,
|
||||
}
|
||||
case state.EventDeleteService:
|
||||
sa.Action = api.StoreActionKindRemove
|
||||
sa.Target = &api.StoreAction_Service{
|
||||
Service: v.Service,
|
||||
}
|
||||
default:
|
||||
return api.StoreAction{}, errUnknownStoreAction
|
||||
}
|
||||
return sa, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type serviceEntry struct {
|
||||
*api.Service
|
||||
}
|
||||
|
||||
func (s serviceEntry) ID() string {
|
||||
return s.Service.ID
|
||||
}
|
||||
|
||||
func (s serviceEntry) Meta() api.Meta {
|
||||
return s.Service.Meta
|
||||
}
|
||||
|
||||
func (s serviceEntry) SetMeta(meta api.Meta) {
|
||||
s.Service.Meta = meta
|
||||
}
|
||||
|
||||
func (s serviceEntry) Copy() Object {
|
||||
return serviceEntry{s.Service.Copy()}
|
||||
}
|
||||
|
||||
func (s serviceEntry) EventCreate() state.Event {
|
||||
return state.EventCreateService{Service: s.Service}
|
||||
}
|
||||
|
||||
func (s serviceEntry) EventUpdate() state.Event {
|
||||
return state.EventUpdateService{Service: s.Service}
|
||||
}
|
||||
|
||||
func (s serviceEntry) EventDelete() state.Event {
|
||||
return state.EventDeleteService{Service: s.Service}
|
||||
}
|
||||
|
||||
// CreateService adds a new service to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func CreateService(tx Tx, s *api.Service) error {
|
||||
|
@ -141,7 +95,7 @@ func CreateService(tx Tx, s *api.Service) error {
|
|||
return ErrNameConflict
|
||||
}
|
||||
|
||||
return tx.create(tableService, serviceEntry{s})
|
||||
return tx.create(tableService, s)
|
||||
}
|
||||
|
||||
// UpdateService updates an existing service in the store.
|
||||
|
@ -149,12 +103,12 @@ func CreateService(tx Tx, s *api.Service) error {
|
|||
func UpdateService(tx Tx, s *api.Service) error {
|
||||
// Ensure the name is either not in use or already used by this same Service.
|
||||
if existing := tx.lookup(tableService, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil {
|
||||
if existing.ID() != s.ID {
|
||||
if existing.GetID() != s.ID {
|
||||
return ErrNameConflict
|
||||
}
|
||||
}
|
||||
|
||||
return tx.update(tableService, serviceEntry{s})
|
||||
return tx.update(tableService, s)
|
||||
}
|
||||
|
||||
// DeleteService removes a service from the store.
|
||||
|
@ -170,14 +124,14 @@ func GetService(tx ReadTx, id string) *api.Service {
|
|||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.(serviceEntry).Service
|
||||
return s.(*api.Service)
|
||||
}
|
||||
|
||||
// FindServices selects a set of services and returns them.
|
||||
func FindServices(tx ReadTx, by By) ([]*api.Service, error) {
|
||||
checkType := func(by By) error {
|
||||
switch by.(type) {
|
||||
case byName, byNamePrefix, byIDPrefix, byReferencedNetworkID, byReferencedSecretID:
|
||||
case byName, byNamePrefix, byIDPrefix, byRuntime, byReferencedNetworkID, byReferencedSecretID, byCustom, byCustomPrefix:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidFindBy
|
||||
|
@ -185,52 +139,30 @@ func FindServices(tx ReadTx, by By) ([]*api.Service, error) {
|
|||
}
|
||||
|
||||
serviceList := []*api.Service{}
|
||||
appendResult := func(o Object) {
|
||||
serviceList = append(serviceList, o.(serviceEntry).Service)
|
||||
appendResult := func(o api.StoreObject) {
|
||||
serviceList = append(serviceList, o.(*api.Service))
|
||||
}
|
||||
|
||||
err := tx.find(tableService, by, checkType, appendResult)
|
||||
return serviceList, err
|
||||
}
|
||||
|
||||
type serviceIndexerByID struct{}
|
||||
type serviceIndexerByRuntime struct{}
|
||||
|
||||
func (si serviceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
func (si serviceIndexerByRuntime) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (si serviceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
s, ok := obj.(serviceEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
func (si serviceIndexerByRuntime) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
s := obj.(*api.Service)
|
||||
r, err := naming.Runtime(s.Spec.Task)
|
||||
if err != nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := s.Service.ID + "\x00"
|
||||
return true, []byte(val), nil
|
||||
return true, []byte(r + "\x00"), nil
|
||||
}
|
||||
|
||||
func (si serviceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
type serviceIndexerByName struct{}
|
||||
|
||||
func (si serviceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (si serviceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
s, ok := obj.(serviceEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strings.ToLower(s.Spec.Annotations.Name) + "\x00"), nil
|
||||
}
|
||||
|
||||
func (si serviceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
func (si serviceIndexerByRuntime) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
|
@ -241,10 +173,7 @@ func (si serviceIndexerByNetwork) FromArgs(args ...interface{}) ([]byte, error)
|
|||
}
|
||||
|
||||
func (si serviceIndexerByNetwork) FromObject(obj interface{}) (bool, [][]byte, error) {
|
||||
s, ok := obj.(serviceEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
s := obj.(*api.Service)
|
||||
|
||||
var networkIDs [][]byte
|
||||
|
||||
|
@ -269,10 +198,7 @@ func (si serviceIndexerBySecret) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (si serviceIndexerBySecret) FromObject(obj interface{}) (bool, [][]byte, error) {
|
||||
s, ok := obj.(serviceEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
s := obj.(*api.Service)
|
||||
|
||||
container := s.Spec.Task.GetContainer()
|
||||
if container == nil {
|
||||
|
|
165
vendor/github.com/docker/swarmkit/manager/state/store/tasks.go
generated
vendored
165
vendor/github.com/docker/swarmkit/manager/state/store/tasks.go
generated
vendored
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/api/naming"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
|
@ -14,20 +13,24 @@ const tableTask = "task"
|
|||
|
||||
func init() {
|
||||
register(ObjectStoreConfig{
|
||||
Name: tableTask,
|
||||
Table: &memdb.TableSchema{
|
||||
Name: tableTask,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
Unique: true,
|
||||
Indexer: taskIndexerByID{},
|
||||
Indexer: api.TaskIndexerByID{},
|
||||
},
|
||||
indexName: {
|
||||
Name: indexName,
|
||||
AllowMissing: true,
|
||||
Indexer: taskIndexerByName{},
|
||||
},
|
||||
indexRuntime: {
|
||||
Name: indexRuntime,
|
||||
AllowMissing: true,
|
||||
Indexer: taskIndexerByRuntime{},
|
||||
},
|
||||
indexServiceID: {
|
||||
Name: indexServiceID,
|
||||
AllowMissing: true,
|
||||
|
@ -61,6 +64,11 @@ func init() {
|
|||
AllowMissing: true,
|
||||
Indexer: taskIndexerBySecret{},
|
||||
},
|
||||
indexCustom: {
|
||||
Name: indexCustom,
|
||||
Indexer: api.TaskCustomIndexer{},
|
||||
AllowMissing: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
|
||||
|
@ -85,7 +93,7 @@ func init() {
|
|||
}
|
||||
return nil
|
||||
},
|
||||
ApplyStoreAction: func(tx Tx, sa *api.StoreAction) error {
|
||||
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
|
||||
switch v := sa.Target.(type) {
|
||||
case *api.StoreAction_Task:
|
||||
obj := v.Task
|
||||
|
@ -100,74 +108,19 @@ func init() {
|
|||
}
|
||||
return errUnknownStoreAction
|
||||
},
|
||||
NewStoreAction: func(c state.Event) (api.StoreAction, error) {
|
||||
var sa api.StoreAction
|
||||
switch v := c.(type) {
|
||||
case state.EventCreateTask:
|
||||
sa.Action = api.StoreActionKindCreate
|
||||
sa.Target = &api.StoreAction_Task{
|
||||
Task: v.Task,
|
||||
}
|
||||
case state.EventUpdateTask:
|
||||
sa.Action = api.StoreActionKindUpdate
|
||||
sa.Target = &api.StoreAction_Task{
|
||||
Task: v.Task,
|
||||
}
|
||||
case state.EventDeleteTask:
|
||||
sa.Action = api.StoreActionKindRemove
|
||||
sa.Target = &api.StoreAction_Task{
|
||||
Task: v.Task,
|
||||
}
|
||||
default:
|
||||
return api.StoreAction{}, errUnknownStoreAction
|
||||
}
|
||||
return sa, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type taskEntry struct {
|
||||
*api.Task
|
||||
}
|
||||
|
||||
func (t taskEntry) ID() string {
|
||||
return t.Task.ID
|
||||
}
|
||||
|
||||
func (t taskEntry) Meta() api.Meta {
|
||||
return t.Task.Meta
|
||||
}
|
||||
|
||||
func (t taskEntry) SetMeta(meta api.Meta) {
|
||||
t.Task.Meta = meta
|
||||
}
|
||||
|
||||
func (t taskEntry) Copy() Object {
|
||||
return taskEntry{t.Task.Copy()}
|
||||
}
|
||||
|
||||
func (t taskEntry) EventCreate() state.Event {
|
||||
return state.EventCreateTask{Task: t.Task}
|
||||
}
|
||||
|
||||
func (t taskEntry) EventUpdate() state.Event {
|
||||
return state.EventUpdateTask{Task: t.Task}
|
||||
}
|
||||
|
||||
func (t taskEntry) EventDelete() state.Event {
|
||||
return state.EventDeleteTask{Task: t.Task}
|
||||
}
|
||||
|
||||
// CreateTask adds a new task to the store.
|
||||
// Returns ErrExist if the ID is already taken.
|
||||
func CreateTask(tx Tx, t *api.Task) error {
|
||||
return tx.create(tableTask, taskEntry{t})
|
||||
return tx.create(tableTask, t)
|
||||
}
|
||||
|
||||
// UpdateTask updates an existing task in the store.
|
||||
// Returns ErrNotExist if the node doesn't exist.
|
||||
func UpdateTask(tx Tx, t *api.Task) error {
|
||||
return tx.update(tableTask, taskEntry{t})
|
||||
return tx.update(tableTask, t)
|
||||
}
|
||||
|
||||
// DeleteTask removes a task from the store.
|
||||
|
@ -183,14 +136,14 @@ func GetTask(tx ReadTx, id string) *api.Task {
|
|||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return t.(taskEntry).Task
|
||||
return t.(*api.Task)
|
||||
}
|
||||
|
||||
// FindTasks selects a set of tasks and returns them.
|
||||
func FindTasks(tx ReadTx, by By) ([]*api.Task, error) {
|
||||
checkType := func(by By) error {
|
||||
switch by.(type) {
|
||||
case byName, byNamePrefix, byIDPrefix, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID:
|
||||
case byName, byNamePrefix, byIDPrefix, byRuntime, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID, byCustom, byCustomPrefix:
|
||||
return nil
|
||||
default:
|
||||
return ErrInvalidFindBy
|
||||
|
@ -198,35 +151,14 @@ func FindTasks(tx ReadTx, by By) ([]*api.Task, error) {
|
|||
}
|
||||
|
||||
taskList := []*api.Task{}
|
||||
appendResult := func(o Object) {
|
||||
taskList = append(taskList, o.(taskEntry).Task)
|
||||
appendResult := func(o api.StoreObject) {
|
||||
taskList = append(taskList, o.(*api.Task))
|
||||
}
|
||||
|
||||
err := tx.find(tableTask, by, checkType, appendResult)
|
||||
return taskList, err
|
||||
}
|
||||
|
||||
type taskIndexerByID struct{}
|
||||
|
||||
func (ti taskIndexerByID) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ti taskIndexerByID) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := t.Task.ID + "\x00"
|
||||
return true, []byte(val), nil
|
||||
}
|
||||
|
||||
func (ti taskIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
type taskIndexerByName struct{}
|
||||
|
||||
func (ti taskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
|
@ -234,12 +166,9 @@ func (ti taskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ti taskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
t := obj.(*api.Task)
|
||||
|
||||
name := naming.Task(t.Task)
|
||||
name := naming.Task(t)
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strings.ToLower(name) + "\x00"), nil
|
||||
|
@ -249,6 +178,25 @@ func (ti taskIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error)
|
|||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
type taskIndexerByRuntime struct{}
|
||||
|
||||
func (ti taskIndexerByRuntime) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
return fromArgs(args...)
|
||||
}
|
||||
|
||||
func (ti taskIndexerByRuntime) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
t := obj.(*api.Task)
|
||||
r, err := naming.Runtime(t.Spec)
|
||||
if err != nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
return true, []byte(r + "\x00"), nil
|
||||
}
|
||||
|
||||
func (ti taskIndexerByRuntime) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return prefixFromArgs(args...)
|
||||
}
|
||||
|
||||
type taskIndexerByServiceID struct{}
|
||||
|
||||
func (ti taskIndexerByServiceID) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
|
@ -256,10 +204,7 @@ func (ti taskIndexerByServiceID) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ti taskIndexerByServiceID) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
t := obj.(*api.Task)
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := t.ServiceID + "\x00"
|
||||
|
@ -273,10 +218,7 @@ func (ti taskIndexerByNodeID) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ti taskIndexerByNodeID) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
t := obj.(*api.Task)
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := t.NodeID + "\x00"
|
||||
|
@ -290,10 +232,7 @@ func (ti taskIndexerBySlot) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ti taskIndexerBySlot) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
t := obj.(*api.Task)
|
||||
|
||||
// Add the null character as a terminator
|
||||
val := t.ServiceID + "\x00" + strconv.FormatUint(t.Slot, 10) + "\x00"
|
||||
|
@ -307,10 +246,7 @@ func (ti taskIndexerByDesiredState) FromArgs(args ...interface{}) ([]byte, error
|
|||
}
|
||||
|
||||
func (ti taskIndexerByDesiredState) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
t := obj.(*api.Task)
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strconv.FormatInt(int64(t.DesiredState), 10) + "\x00"), nil
|
||||
|
@ -323,10 +259,7 @@ func (ti taskIndexerByNetwork) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ti taskIndexerByNetwork) FromObject(obj interface{}) (bool, [][]byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
t := obj.(*api.Task)
|
||||
|
||||
var networkIDs [][]byte
|
||||
|
||||
|
@ -345,10 +278,7 @@ func (ti taskIndexerBySecret) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ti taskIndexerBySecret) FromObject(obj interface{}) (bool, [][]byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
t := obj.(*api.Task)
|
||||
|
||||
container := t.Spec.GetContainer()
|
||||
if container == nil {
|
||||
|
@ -372,10 +302,7 @@ func (ts taskIndexerByTaskState) FromArgs(args ...interface{}) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (ts taskIndexerByTaskState) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
t, ok := obj.(taskEntry)
|
||||
if !ok {
|
||||
panic("unexpected type passed to FromObject")
|
||||
}
|
||||
t := obj.(*api.Task)
|
||||
|
||||
// Add the null character as a terminator
|
||||
return true, []byte(strconv.FormatInt(int64(t.Status.State), 10) + "\x00"), nil
|
||||
|
|
485
vendor/github.com/docker/swarmkit/manager/state/watch.go
generated
vendored
485
vendor/github.com/docker/swarmkit/manager/state/watch.go
generated
vendored
|
@ -6,30 +6,15 @@ import (
|
|||
"github.com/docker/swarmkit/watch"
|
||||
)
|
||||
|
||||
// Event is the type used for events passed over watcher channels, and also
|
||||
// the type used to specify filtering in calls to Watch.
|
||||
type Event interface {
|
||||
// TODO(stevvooe): Consider whether it makes sense to squish both the
|
||||
// matcher type and the primary type into the same type. It might be better
|
||||
// to build a matcher from an event prototype.
|
||||
|
||||
// matches checks if this item in a watch queue matches the event
|
||||
// description.
|
||||
matches(events.Event) bool
|
||||
}
|
||||
|
||||
// EventCommit delineates a transaction boundary.
|
||||
type EventCommit struct{}
|
||||
|
||||
func (e EventCommit) matches(watchEvent events.Event) bool {
|
||||
// Matches returns true if this event is a commit event.
|
||||
func (e EventCommit) Matches(watchEvent events.Event) bool {
|
||||
_, ok := watchEvent.(EventCommit)
|
||||
return ok
|
||||
}
|
||||
|
||||
// TaskCheckFunc is the type of function used to perform filtering checks on
|
||||
// api.Task structures.
|
||||
type TaskCheckFunc func(t1, t2 *api.Task) bool
|
||||
|
||||
// TaskCheckID is a TaskCheckFunc for matching task IDs.
|
||||
func TaskCheckID(t1, t2 *api.Task) bool {
|
||||
return t1.ID == t2.ID
|
||||
|
@ -50,244 +35,16 @@ func TaskCheckStateGreaterThan(t1, t2 *api.Task) bool {
|
|||
return t2.Status.State > t1.Status.State
|
||||
}
|
||||
|
||||
// EventCreateTask is the type used to put CreateTask events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventCreateTask struct {
|
||||
Task *api.Task
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []TaskCheckFunc
|
||||
}
|
||||
|
||||
func (e EventCreateTask) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventCreateTask)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Task, typedEvent.Task) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventUpdateTask is the type used to put UpdateTask events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventUpdateTask struct {
|
||||
Task *api.Task
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []TaskCheckFunc
|
||||
}
|
||||
|
||||
func (e EventUpdateTask) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventUpdateTask)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Task, typedEvent.Task) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventDeleteTask is the type used to put DeleteTask events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventDeleteTask struct {
|
||||
Task *api.Task
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []TaskCheckFunc
|
||||
}
|
||||
|
||||
func (e EventDeleteTask) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventDeleteTask)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Task, typedEvent.Task) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ServiceCheckFunc is the type of function used to perform filtering checks on
|
||||
// api.Service structures.
|
||||
type ServiceCheckFunc func(j1, j2 *api.Service) bool
|
||||
|
||||
// ServiceCheckID is a ServiceCheckFunc for matching service IDs.
|
||||
func ServiceCheckID(j1, j2 *api.Service) bool {
|
||||
return j1.ID == j2.ID
|
||||
}
|
||||
|
||||
// EventCreateService is the type used to put CreateService events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventCreateService struct {
|
||||
Service *api.Service
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []ServiceCheckFunc
|
||||
}
|
||||
|
||||
func (e EventCreateService) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventCreateService)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Service, typedEvent.Service) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventUpdateService is the type used to put UpdateService events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventUpdateService struct {
|
||||
Service *api.Service
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []ServiceCheckFunc
|
||||
}
|
||||
|
||||
func (e EventUpdateService) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventUpdateService)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Service, typedEvent.Service) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventDeleteService is the type used to put DeleteService events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventDeleteService struct {
|
||||
Service *api.Service
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []ServiceCheckFunc
|
||||
}
|
||||
|
||||
func (e EventDeleteService) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventDeleteService)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Service, typedEvent.Service) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NetworkCheckFunc is the type of function used to perform filtering checks on
|
||||
// api.Service structures.
|
||||
type NetworkCheckFunc func(n1, n2 *api.Network) bool
|
||||
|
||||
// NetworkCheckID is a NetworkCheckFunc for matching network IDs.
|
||||
func NetworkCheckID(n1, n2 *api.Network) bool {
|
||||
return n1.ID == n2.ID
|
||||
}
|
||||
|
||||
// EventCreateNetwork is the type used to put CreateNetwork events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventCreateNetwork struct {
|
||||
Network *api.Network
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []NetworkCheckFunc
|
||||
}
|
||||
|
||||
func (e EventCreateNetwork) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventCreateNetwork)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Network, typedEvent.Network) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventUpdateNetwork is the type used to put UpdateNetwork events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventUpdateNetwork struct {
|
||||
Network *api.Network
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []NetworkCheckFunc
|
||||
}
|
||||
|
||||
func (e EventUpdateNetwork) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventUpdateNetwork)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Network, typedEvent.Network) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventDeleteNetwork is the type used to put DeleteNetwork events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventDeleteNetwork struct {
|
||||
Network *api.Network
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []NetworkCheckFunc
|
||||
}
|
||||
|
||||
func (e EventDeleteNetwork) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventDeleteNetwork)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Network, typedEvent.Network) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeCheckFunc is the type of function used to perform filtering checks on
|
||||
// api.Service structures.
|
||||
type NodeCheckFunc func(n1, n2 *api.Node) bool
|
||||
|
||||
// NodeCheckID is a NodeCheckFunc for matching node IDs.
|
||||
func NodeCheckID(n1, n2 *api.Node) bool {
|
||||
return n1.ID == n2.ID
|
||||
|
@ -298,238 +55,34 @@ func NodeCheckState(n1, n2 *api.Node) bool {
|
|||
return n1.Status.State == n2.Status.State
|
||||
}
|
||||
|
||||
// EventCreateNode is the type used to put CreateNode events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventCreateNode struct {
|
||||
Node *api.Node
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []NodeCheckFunc
|
||||
}
|
||||
|
||||
func (e EventCreateNode) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventCreateNode)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Node, typedEvent.Node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventUpdateNode is the type used to put DeleteNode events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventUpdateNode struct {
|
||||
Node *api.Node
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []NodeCheckFunc
|
||||
}
|
||||
|
||||
func (e EventUpdateNode) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventUpdateNode)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Node, typedEvent.Node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventDeleteNode is the type used to put DeleteNode events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventDeleteNode struct {
|
||||
Node *api.Node
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []NodeCheckFunc
|
||||
}
|
||||
|
||||
func (e EventDeleteNode) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventDeleteNode)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Node, typedEvent.Node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ClusterCheckFunc is the type of function used to perform filtering checks on
|
||||
// api.Cluster structures.
|
||||
type ClusterCheckFunc func(v1, v2 *api.Cluster) bool
|
||||
|
||||
// ClusterCheckID is a ClusterCheckFunc for matching volume IDs.
|
||||
func ClusterCheckID(v1, v2 *api.Cluster) bool {
|
||||
return v1.ID == v2.ID
|
||||
}
|
||||
|
||||
// EventCreateCluster is the type used to put CreateCluster events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventCreateCluster struct {
|
||||
Cluster *api.Cluster
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []ClusterCheckFunc
|
||||
}
|
||||
|
||||
func (e EventCreateCluster) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventCreateCluster)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Cluster, typedEvent.Cluster) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventUpdateCluster is the type used to put UpdateCluster events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventUpdateCluster struct {
|
||||
Cluster *api.Cluster
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []ClusterCheckFunc
|
||||
}
|
||||
|
||||
func (e EventUpdateCluster) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventUpdateCluster)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Cluster, typedEvent.Cluster) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventDeleteCluster is the type used to put DeleteCluster events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventDeleteCluster struct {
|
||||
Cluster *api.Cluster
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []ClusterCheckFunc
|
||||
}
|
||||
|
||||
func (e EventDeleteCluster) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventDeleteCluster)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Cluster, typedEvent.Cluster) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SecretCheckFunc is the type of function used to perform filtering checks on
|
||||
// api.Secret structures.
|
||||
type SecretCheckFunc func(v1, v2 *api.Secret) bool
|
||||
|
||||
// SecretCheckID is a SecretCheckFunc for matching volume IDs.
|
||||
// SecretCheckID is a SecretCheckFunc for matching secret IDs.
|
||||
func SecretCheckID(v1, v2 *api.Secret) bool {
|
||||
return v1.ID == v2.ID
|
||||
}
|
||||
|
||||
// EventCreateSecret is the type used to put CreateSecret events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventCreateSecret struct {
|
||||
Secret *api.Secret
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []SecretCheckFunc
|
||||
// ResourceCheckID is a ResourceCheckFunc for matching resource IDs.
|
||||
func ResourceCheckID(v1, v2 *api.Resource) bool {
|
||||
return v1.ID == v2.ID
|
||||
}
|
||||
|
||||
func (e EventCreateSecret) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventCreateSecret)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Secret, typedEvent.Secret) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
// ResourceCheckKind is a ResourceCheckFunc for matching resource kinds.
|
||||
func ResourceCheckKind(v1, v2 *api.Resource) bool {
|
||||
return v1.Kind == v2.Kind
|
||||
}
|
||||
|
||||
// EventUpdateSecret is the type used to put UpdateSecret events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventUpdateSecret struct {
|
||||
Secret *api.Secret
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []SecretCheckFunc
|
||||
// ExtensionCheckID is a ExtensionCheckFunc for matching extension IDs.
|
||||
func ExtensionCheckID(v1, v2 *api.Extension) bool {
|
||||
return v1.ID == v2.ID
|
||||
}
|
||||
|
||||
func (e EventUpdateSecret) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventUpdateSecret)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Secret, typedEvent.Secret) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EventDeleteSecret is the type used to put DeleteSecret events on the
|
||||
// publish/subscribe queue and filter these events in calls to Watch.
|
||||
type EventDeleteSecret struct {
|
||||
Secret *api.Secret
|
||||
// Checks is a list of functions to call to filter events for a watch
|
||||
// stream. They are applied with AND logic. They are only applicable for
|
||||
// calls to Watch.
|
||||
Checks []SecretCheckFunc
|
||||
}
|
||||
|
||||
func (e EventDeleteSecret) matches(watchEvent events.Event) bool {
|
||||
typedEvent, ok := watchEvent.(EventDeleteSecret)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, check := range e.Checks {
|
||||
if !check(e.Secret, typedEvent.Secret) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
// ExtensionCheckName is a ExtensionCheckFunc for matching extension names names.
|
||||
func ExtensionCheckName(v1, v2 *api.Extension) bool {
|
||||
return v1.Annotations.Name == v2.Annotations.Name
|
||||
}
|
||||
|
||||
// Watch takes a variable number of events to match against. The subscriber
|
||||
|
@ -559,18 +112,18 @@ func (e EventDeleteSecret) matches(watchEvent events.Event) bool {
|
|||
// func(t1, t2 *api.Task) bool {
|
||||
// return t1.ServiceID == t2.ServiceID
|
||||
// }}})
|
||||
func Watch(queue *watch.Queue, specifiers ...Event) (eventq chan events.Event, cancel func()) {
|
||||
func Watch(queue *watch.Queue, specifiers ...api.Event) (eventq chan events.Event, cancel func()) {
|
||||
if len(specifiers) == 0 {
|
||||
return queue.Watch()
|
||||
}
|
||||
return queue.CallbackWatch(Matcher(specifiers...))
|
||||
}
|
||||
|
||||
// Matcher returns an events.Matcher that matches the specifiers with OR logic.
|
||||
func Matcher(specifiers ...Event) events.MatcherFunc {
|
||||
// Matcher returns an events.Matcher that Matches the specifiers with OR logic.
|
||||
func Matcher(specifiers ...api.Event) events.MatcherFunc {
|
||||
return events.MatcherFunc(func(event events.Event) bool {
|
||||
for _, s := range specifiers {
|
||||
if s.matches(event) {
|
||||
if s.Matches(event) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
5
vendor/github.com/docker/swarmkit/node/node.go
generated
vendored
5
vendor/github.com/docker/swarmkit/node/node.go
generated
vendored
|
@ -600,10 +600,13 @@ func (n *Node) loadSecurityConfig(ctx context.Context) (*ca.SecurityConfig, erro
|
|||
n.unlockKey = encryption.GenerateSecretKey()
|
||||
}
|
||||
krw = ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{})
|
||||
rootCA, err = ca.CreateRootCA(ca.DefaultRootCN, paths.RootCA)
|
||||
rootCA, err = ca.CreateRootCA(ca.DefaultRootCN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ca.SaveRootCA(rootCA, paths.RootCA); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.G(ctx).Debug("generated CA key and certificate")
|
||||
} else if err == ca.ErrNoLocalRootCA { // from previous error loading the root CA from disk
|
||||
rootCA, err = ca.DownloadRootCA(ctx, paths.RootCA, n.config.JoinToken, n.connBroker)
|
||||
|
|
141
vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go
generated
vendored
141
vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go
generated
vendored
|
@ -9,6 +9,7 @@
|
|||
plugin.proto
|
||||
|
||||
It has these top-level messages:
|
||||
StoreObject
|
||||
TLSAuthorization
|
||||
*/
|
||||
package plugin
|
||||
|
@ -34,6 +35,14 @@ var _ = math.Inf
|
|||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type StoreObject struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StoreObject) Reset() { *m = StoreObject{} }
|
||||
func (*StoreObject) ProtoMessage() {}
|
||||
func (*StoreObject) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
|
||||
|
||||
type TLSAuthorization struct {
|
||||
// Roles contains the acceptable TLS OU roles for the handler.
|
||||
Roles []string `protobuf:"bytes,1,rep,name=roles" json:"roles,omitempty"`
|
||||
|
@ -46,7 +55,7 @@ type TLSAuthorization struct {
|
|||
|
||||
func (m *TLSAuthorization) Reset() { *m = TLSAuthorization{} }
|
||||
func (*TLSAuthorization) ProtoMessage() {}
|
||||
func (*TLSAuthorization) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
|
||||
func (*TLSAuthorization) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} }
|
||||
|
||||
var E_Deepcopy = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.MessageOptions)(nil),
|
||||
|
@ -56,6 +65,14 @@ var E_Deepcopy = &proto.ExtensionDesc{
|
|||
Tag: "varint,70000,opt,name=deepcopy,def=1",
|
||||
}
|
||||
|
||||
var E_StoreObject = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.MessageOptions)(nil),
|
||||
ExtensionType: (*StoreObject)(nil),
|
||||
Field: 70001,
|
||||
Name: "docker.protobuf.plugin.store_object",
|
||||
Tag: "bytes,70001,opt,name=store_object,json=storeObject",
|
||||
}
|
||||
|
||||
var E_TlsAuthorization = &proto.ExtensionDesc{
|
||||
ExtendedType: (*google_protobuf.MethodOptions)(nil),
|
||||
ExtensionType: (*TLSAuthorization)(nil),
|
||||
|
@ -65,10 +82,33 @@ var E_TlsAuthorization = &proto.ExtensionDesc{
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*StoreObject)(nil), "docker.protobuf.plugin.StoreObject")
|
||||
proto.RegisterType((*TLSAuthorization)(nil), "docker.protobuf.plugin.TLSAuthorization")
|
||||
proto.RegisterExtension(E_Deepcopy)
|
||||
proto.RegisterExtension(E_StoreObject)
|
||||
proto.RegisterExtension(E_TlsAuthorization)
|
||||
}
|
||||
func (m *StoreObject) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *StoreObject) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *TLSAuthorization) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
|
@ -142,6 +182,15 @@ func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
|
|||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *StoreObject) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *TLSAuthorization) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
|
@ -173,6 +222,16 @@ func sovPlugin(x uint64) (n int) {
|
|||
func sozPlugin(x uint64) (n int) {
|
||||
return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *StoreObject) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&StoreObject{`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *TLSAuthorization) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
|
@ -193,6 +252,57 @@ func valueToStringPlugin(v interface{}) string {
|
|||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *StoreObject) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: StoreObject: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: StoreObject: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *TLSAuthorization) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
@ -402,21 +512,24 @@ var (
|
|||
func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
|
||||
|
||||
var fileDescriptorPlugin = []byte{
|
||||
// 254 bytes of a gzipped FileDescriptorProto
|
||||
// 296 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
|
||||
0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4b, 0xc9, 0x4f, 0xce, 0x4e, 0x2d,
|
||||
0x82, 0xf0, 0x92, 0x4a, 0xd3, 0xf4, 0x20, 0xb2, 0x52, 0x0a, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9,
|
||||
0xfa, 0x30, 0x71, 0xfd, 0x94, 0xd4, 0xe2, 0xe4, 0xa2, 0xcc, 0x82, 0x92, 0x7c, 0xa8, 0x5a, 0x25,
|
||||
0x17, 0x2e, 0x81, 0x10, 0x9f, 0x60, 0xc7, 0xd2, 0x92, 0x8c, 0xfc, 0xa2, 0xcc, 0xaa, 0xc4, 0x92,
|
||||
0xcc, 0xfc, 0x3c, 0x21, 0x11, 0x2e, 0xd6, 0xa2, 0xfc, 0x9c, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x66,
|
||||
0x0d, 0xce, 0x20, 0x08, 0x47, 0x48, 0x8a, 0x8b, 0x23, 0x33, 0xaf, 0x38, 0x35, 0xb9, 0xb4, 0x28,
|
||||
0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x23, 0x08, 0xce, 0xb7, 0x72, 0xe6, 0xe2, 0x48, 0x49, 0x4d,
|
||||
0x2d, 0x48, 0xce, 0x2f, 0xa8, 0x14, 0x92, 0xd7, 0x83, 0x58, 0x8a, 0x70, 0x8c, 0x6f, 0x6a, 0x71,
|
||||
0x71, 0x62, 0x7a, 0xaa, 0x7f, 0x01, 0xc8, 0xf4, 0x62, 0x89, 0x0f, 0x8b, 0x58, 0x40, 0xda, 0xad,
|
||||
0x58, 0x4a, 0x8a, 0x4a, 0x53, 0x83, 0xe0, 0x1a, 0xad, 0x2a, 0xb8, 0x04, 0x4b, 0x72, 0x8a, 0xe3,
|
||||
0x13, 0x51, 0xdc, 0x22, 0x87, 0xc5, 0xb4, 0x92, 0x8c, 0xfc, 0x14, 0x98, 0x61, 0x2f, 0x9f, 0xf6,
|
||||
0x2a, 0x2b, 0x30, 0x6a, 0x70, 0x1b, 0x69, 0xe8, 0x61, 0x0f, 0x03, 0x3d, 0x74, 0xef, 0x05, 0x09,
|
||||
0x94, 0xe4, 0x14, 0xa3, 0x88, 0x38, 0x49, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43,
|
||||
0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e,
|
||||
0x11, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x4c, 0x2c, 0xf3, 0x67, 0x01, 0x00, 0x00,
|
||||
0x5e, 0x2e, 0xee, 0xe0, 0x92, 0xfc, 0xa2, 0x54, 0xff, 0xa4, 0xac, 0xd4, 0xe4, 0x12, 0x25, 0x17,
|
||||
0x2e, 0x81, 0x10, 0x9f, 0x60, 0xc7, 0xd2, 0x92, 0x8c, 0xfc, 0xa2, 0xcc, 0xaa, 0xc4, 0x92, 0xcc,
|
||||
0xfc, 0x3c, 0x21, 0x11, 0x2e, 0xd6, 0xa2, 0xfc, 0x9c, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x66, 0x0d,
|
||||
0xce, 0x20, 0x08, 0x47, 0x48, 0x8a, 0x8b, 0x23, 0x33, 0xaf, 0x38, 0x35, 0xb9, 0xb4, 0x28, 0x55,
|
||||
0x82, 0x49, 0x81, 0x51, 0x83, 0x23, 0x08, 0xce, 0xb7, 0x72, 0xe6, 0xe2, 0x48, 0x49, 0x4d, 0x2d,
|
||||
0x48, 0xce, 0x2f, 0xa8, 0x14, 0x92, 0xd7, 0x83, 0xb8, 0x01, 0xe1, 0x36, 0xdf, 0xd4, 0xe2, 0xe2,
|
||||
0xc4, 0xf4, 0x54, 0xff, 0x02, 0x90, 0xe9, 0xc5, 0x12, 0x1f, 0x16, 0xb1, 0x80, 0xb4, 0x5b, 0xb1,
|
||||
0x94, 0x14, 0x95, 0xa6, 0x06, 0xc1, 0x35, 0x5a, 0x65, 0x72, 0xf1, 0x14, 0x83, 0x5c, 0x16, 0x9f,
|
||||
0x0f, 0x76, 0x1a, 0x61, 0x83, 0x3e, 0x82, 0x0d, 0xe2, 0x36, 0x52, 0xd6, 0xc3, 0x1e, 0x1a, 0x7a,
|
||||
0x48, 0x1e, 0x0d, 0xe2, 0x2e, 0x46, 0x70, 0xac, 0x2a, 0xb8, 0x04, 0x4b, 0x72, 0x8a, 0xe3, 0x13,
|
||||
0x51, 0xbc, 0x2d, 0x87, 0xc5, 0xbe, 0x92, 0x8c, 0xfc, 0x14, 0x98, 0x75, 0x2f, 0x9f, 0xf6, 0x2a,
|
||||
0x83, 0xed, 0xd3, 0xc0, 0x65, 0x1f, 0x7a, 0x48, 0x06, 0x09, 0x94, 0xe4, 0x14, 0xa3, 0x88, 0x38,
|
||||
0x49, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
|
||||
0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x11, 0x10, 0x00, 0x00, 0xff, 0xff,
|
||||
0xc2, 0x49, 0xd6, 0x3b, 0xe1, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto
generated
vendored
4
vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto
generated
vendored
|
@ -4,8 +4,12 @@ package docker.protobuf.plugin;
|
|||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
message StoreObject {
|
||||
}
|
||||
|
||||
extend google.protobuf.MessageOptions {
|
||||
optional bool deepcopy = 70000 [default=true];
|
||||
optional StoreObject store_object = 70001;
|
||||
}
|
||||
|
||||
message TLSAuthorization {
|
||||
|
|
Loading…
Add table
Reference in a new issue