Update swarmkit to 310f1119

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2016-06-15 22:41:30 -07:00
parent ef9c058b67
commit 2783568284
23 changed files with 907 additions and 268 deletions

View file

@ -139,7 +139,7 @@ clone git github.com/docker/docker-credential-helpers v0.3.0
clone git github.com/docker/containerd 860f3a94940894ac0a106eff4bd1616a67407ee2
# cluster
clone git github.com/docker/swarmkit 682e0b69be208176d6055cba855a5e9cf15c7cb4
clone git github.com/docker/swarmkit 310f1119bc81f22e60b5670d9d4731bc12d7be87
clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
clone git github.com/cloudflare/cfssl 92f037e39eb103fb30f9151be40d9ed267fc4ae2
@ -158,6 +158,7 @@ clone git github.com/prometheus/common ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650
clone git github.com/prometheus/procfs 454a56f35412459b5e684fd5ec0f9211b94f002a
clone hg bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675
clone git github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
clone git github.com/pkg/errors 01fa4104b9c248c8945d14d9f128454d5b28d595
# cli
clone git github.com/spf13/cobra 75205f23b3ea70dc7ae5e900d074e010c23c37e9 https://github.com/dnephin/cobra.git

View file

@ -6,16 +6,10 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// ContainerController controls execution of container tasks.
type ContainerController interface {
// ContainerStatus returns the status of the target container, if
// available. When the container is not available, the status will be nil.
ContainerStatus(ctx context.Context) (*api.ContainerStatus, error)
}
// Controller controls execution of a task.
type Controller interface {
// Update the task definition seen by the controller. Will return
@ -48,6 +42,15 @@ type Controller interface {
Close() error
}
// ContainerStatuser reports status of a container.
//
// This can be implemented by controllers or error types.
type ContainerStatuser interface {
// ContainerStatus returns the status of the target container, if
// available. When the container is not available, the status will be nil.
ContainerStatus(ctx context.Context) (*api.ContainerStatus, error)
}
// Resolve attempts to get a controller from the executor and reports the
// correct status depending on the tasks current state according to the result.
//
@ -121,6 +124,13 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
return status, nil
}
// containerStatus exitCode keeps track of whether or not we've set it in
// this particular method. Eventually, we assemble this as part of a defer.
var (
containerStatus *api.ContainerStatus
exitCode int
)
// returned when a fatal execution of the task is fatal. In this case, we
// proceed to a terminal error state and set the appropriate fields.
//
@ -131,28 +141,37 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
panic("err must not be nil when fatal")
}
if IsTemporary(err) {
switch Cause(err) {
case context.DeadlineExceeded, context.Canceled:
// no need to set these errors, since these will more common.
default:
status.Err = err.Error()
if cs, ok := err.(ContainerStatuser); ok {
var err error
containerStatus, err = cs.ContainerStatus(ctx)
if err != nil {
log.G(ctx).WithError(err).Error("error resolving container status on fatal")
}
}
// make sure we've set the *correct* exit code
if ec, ok := err.(ExitCoder); ok {
exitCode = ec.ExitCode()
}
if cause := errors.Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled {
return retry()
}
if cause := Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled {
status.Err = err.Error() // still reported on temporary
if IsTemporary(err) {
return retry()
}
// only at this point do we consider the error fatal to the task.
log.G(ctx).WithError(err).Error("fatal task error")
status.Err = err.Error()
// NOTE(stevvooe): The following switch dictates the terminal failure
// state based on the state in which the failure was encountered.
switch {
case status.State < api.TaskStateStarting:
status.State = api.TaskStateRejected
case status.State > api.TaskStateStarting:
case status.State >= api.TaskStateStarting:
status.State = api.TaskStateFailed
}
@ -172,21 +191,37 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
return
}
cctlr, ok := ctlr.(ContainerController)
if !ok {
return
}
cstatus, err := cctlr.ContainerStatus(ctx)
if err != nil {
log.G(ctx).WithError(err).Error("container status unavailable")
return
}
if cstatus != nil {
status.RuntimeStatus = &api.TaskStatus_Container{
Container: cstatus,
if containerStatus == nil {
// collect this, if we haven't
cctlr, ok := ctlr.(ContainerStatuser)
if !ok {
return
}
var err error
containerStatus, err = cctlr.ContainerStatus(ctx)
if err != nil {
log.G(ctx).WithError(err).Error("container status unavailable")
}
// at this point, things have gone fairly wrong. Remain positive
// and let's get something out the door.
if containerStatus == nil {
containerStatus = new(api.ContainerStatus)
containerStatusTask := task.Status.GetContainer()
if containerStatusTask != nil {
*containerStatus = *containerStatusTask // copy it over.
}
}
}
// at this point, we *must* have a containerStatus.
if exitCode != 0 {
containerStatus.ExitCode = int32(exitCode)
}
status.RuntimeStatus = &api.TaskStatus_Container{
Container: containerStatus,
}
}()
@ -222,17 +257,7 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
return transition(api.TaskStateRunning, "started")
case api.TaskStateRunning:
if err := ctlr.Wait(ctx); err != nil {
// Wait should only proceed to failed if there is a terminal
// error. The only two conditions when this happens are when we
// get an exit code or when the container doesn't exist.
switch err := err.(type) {
case ExitCoder:
return transition(api.TaskStateFailed, "failed")
default:
// pursuant to the above comment, report fatal, but wrap as
// temporary.
return fatal(MakeTemporary(err))
}
return fatal(err)
}
return transition(api.TaskStateCompleted, "finished")

View file

@ -9,38 +9,6 @@ import (
context "golang.org/x/net/context"
)
// Mock of ContainerController interface
type MockContainerController struct {
ctrl *gomock.Controller
recorder *_MockContainerControllerRecorder
}
// Recorder for MockContainerController (not exported)
type _MockContainerControllerRecorder struct {
mock *MockContainerController
}
func NewMockContainerController(ctrl *gomock.Controller) *MockContainerController {
mock := &MockContainerController{ctrl: ctrl}
mock.recorder = &_MockContainerControllerRecorder{mock}
return mock
}
func (_m *MockContainerController) EXPECT() *_MockContainerControllerRecorder {
return _m.recorder
}
func (_m *MockContainerController) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
ret := _m.ctrl.Call(_m, "ContainerStatus", ctx)
ret0, _ := ret[0].(*api.ContainerStatus)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockContainerControllerRecorder) ContainerStatus(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerStatus", arg0)
}
// Mock of Controller interface
type MockController struct {
ctrl *gomock.Controller
@ -141,3 +109,35 @@ func (_m *MockController) Close() error {
func (_mr *_MockControllerRecorder) Close() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Close")
}
// Mock of ContainerStatuser interface
type MockContainerStatuser struct {
ctrl *gomock.Controller
recorder *_MockContainerStatuserRecorder
}
// Recorder for MockContainerStatuser (not exported)
type _MockContainerStatuserRecorder struct {
mock *MockContainerStatuser
}
func NewMockContainerStatuser(ctrl *gomock.Controller) *MockContainerStatuser {
mock := &MockContainerStatuser{ctrl: ctrl}
mock.recorder = &_MockContainerStatuserRecorder{mock}
return mock
}
func (_m *MockContainerStatuser) EXPECT() *_MockContainerStatuserRecorder {
return _m.recorder
}
func (_m *MockContainerStatuser) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
ret := _m.ctrl.Call(_m, "ContainerStatus", ctx)
ret0, _ := ret[0].(*api.ContainerStatus)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockContainerStatuserRecorder) ContainerStatus(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerStatus", arg0)
}

View file

@ -1,6 +1,6 @@
package exec
import "errors"
import "github.com/pkg/errors"
var (
// ErrRuntimeUnsupported encountered when a task requires a runtime
@ -37,23 +37,6 @@ type ExitCoder interface {
ExitCode() int
}
type causal interface {
Cause() error
}
// Cause returns the cause of the error, recursively.
func Cause(err error) error {
for err != nil {
if causal, ok := err.(causal); ok {
err = causal.Cause()
} else {
break
}
}
return err
}
// Temporary indicates whether or not the error condition is temporary.
//
// If this is encountered in the controller, the failing operation will be
@ -65,15 +48,19 @@ type Temporary interface {
// MakeTemporary makes the error temporary.
func MakeTemporary(err error) error {
return &temporary{error: err}
if IsTemporary(err) {
return err
}
return temporary{err}
}
type temporary struct {
error
}
func (t *temporary) Cause() error { return t.error }
func (t *temporary) Temporary() bool { return true }
func (t temporary) Cause() error { return t.error }
func (t temporary) Temporary() bool { return true }
// IsTemporary returns true if the error or a recursive cause returns true for
// temporary.
@ -85,11 +72,12 @@ func IsTemporary(err error) bool {
}
}
if causal, ok := err.(causal); !ok {
cause := errors.Cause(err)
if cause == err {
break
} else {
err = causal.Cause()
}
err = cause
}
return false

View file

@ -78,21 +78,23 @@ type NodeConfig struct {
// cluster. Node handles workloads and may also run as a manager.
type Node struct {
sync.RWMutex
config *NodeConfig
remotes *persistentRemotes
role string
roleCond *sync.Cond
conn *grpc.ClientConn
connCond *sync.Cond
nodeID string
started chan struct{}
stopped chan struct{}
ready chan struct{}
closed chan struct{}
err error
agent *Agent
manager *manager.Manager
roleChangeReq chan api.NodeRole
config *NodeConfig
remotes *persistentRemotes
role string
roleCond *sync.Cond
conn *grpc.ClientConn
connCond *sync.Cond
nodeID string
nodeMembership api.NodeSpec_Membership
started chan struct{}
stopped chan struct{}
ready chan struct{} // closed when agent has completed registration and manager(if enabled) is ready to receive control requests
certificateRequested chan struct{} // closed when certificate issue request has been sent by node
closed chan struct{}
err error
agent *Agent
manager *manager.Manager
roleChangeReq chan api.NodeRole // used to send role updates from the dispatcher api on promotion/demotion
}
// NewNode returns new Node instance.
@ -113,14 +115,15 @@ func NewNode(c *NodeConfig) (*Node, error) {
}
n := &Node{
remotes: newPersistentRemotes(stateFile, p...),
role: ca.AgentRole,
config: c,
started: make(chan struct{}),
stopped: make(chan struct{}),
closed: make(chan struct{}),
ready: make(chan struct{}),
roleChangeReq: make(chan api.NodeRole, 1),
remotes: newPersistentRemotes(stateFile, p...),
role: ca.AgentRole,
config: c,
started: make(chan struct{}),
stopped: make(chan struct{}),
closed: make(chan struct{}),
ready: make(chan struct{}),
certificateRequested: make(chan struct{}),
roleChangeReq: make(chan api.NodeRole, 1),
}
n.roleCond = sync.NewCond(n.RLocker())
n.connCond = sync.NewCond(n.RLocker())
@ -171,14 +174,17 @@ func (n *Node) run(ctx context.Context) (err error) {
}
}()
if (n.config.JoinAddr == "" && n.nodeID == "") || n.config.ForceNewCluster {
if n.config.JoinAddr == "" && n.nodeID == "" {
if err := n.bootstrapCA(); err != nil {
return err
}
}
if n.config.JoinAddr != "" || n.config.ForceNewCluster {
n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename), api.Peer{Addr: n.config.JoinAddr})
n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename))
if n.config.JoinAddr != "" {
n.remotes.Observe(api.Peer{Addr: n.config.JoinAddr}, 1)
}
}
csrRole := n.role
@ -193,23 +199,22 @@ func (n *Node) run(ctx context.Context) (err error) {
// - We wait for LoadOrCreateSecurityConfig to finish since we need a certificate to operate.
// - Given a valid certificate, spin a renewal go-routine that will ensure that certificates stay
// up to date.
nodeIDChan := make(chan string, 1)
caLoadDone := make(chan struct{})
issueResponseChan := make(chan api.IssueNodeCertificateResponse, 1)
go func() {
select {
case <-ctx.Done():
case <-caLoadDone:
case nodeID := <-nodeIDChan:
logrus.Debugf("Requesting certificate for NodeID: %v", nodeID)
case resp := <-issueResponseChan:
logrus.Debugf("Requesting certificate for NodeID: %v", resp.NodeID)
n.Lock()
n.nodeID = nodeID
n.nodeID = resp.NodeID
n.nodeMembership = resp.NodeMembership
n.Unlock()
close(n.certificateRequested)
}
}()
certDir := filepath.Join(n.config.StateDir, "certificates")
securityConfig, err := ca.LoadOrCreateSecurityConfig(ctx, certDir, n.config.CAHash, n.config.Secret, csrRole, picker.NewPicker(n.remotes), nodeIDChan)
close(caLoadDone)
securityConfig, err := ca.LoadOrCreateSecurityConfig(ctx, certDir, n.config.CAHash, n.config.Secret, csrRole, picker.NewPicker(n.remotes), issueResponseChan)
if err != nil {
return err
}
@ -223,6 +228,7 @@ func (n *Node) run(ctx context.Context) (err error) {
if err != nil {
return err
}
defer db.Close()
if err := n.loadCertificates(); err != nil {
return err
@ -402,10 +408,17 @@ func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.Tran
// Ready returns a channel that is closed after node's initialization has
// completes for the first time.
func (n *Node) Ready(ctx context.Context) <-chan struct{} {
func (n *Node) Ready() <-chan struct{} {
return n.ready
}
// CertificateRequested returns a channel that is closed after node has
// requested a certificate. After this call a caller can expect calls to
// NodeID() and `NodeMembership()` to succeed.
func (n *Node) CertificateRequested() <-chan struct{} {
return n.certificateRequested
}
func (n *Node) waitRole(ctx context.Context, role string) <-chan struct{} {
c := make(chan struct{})
n.roleCond.L.Lock()
@ -482,6 +495,13 @@ func (n *Node) NodeID() string {
return n.nodeID
}
// NodeMembership returns current node's membership. May be empty if not set.
func (n *Node) NodeMembership() api.NodeSpec_Membership {
n.RLock()
defer n.RUnlock()
return n.nodeMembership
}
// Manager return manager instance started by node. May be nil.
func (n *Node) Manager() *manager.Manager {
n.RLock()
@ -528,6 +548,7 @@ func (n *Node) loadCertificates() error {
n.Lock()
n.role = clientTLSCreds.Role()
n.nodeID = clientTLSCreds.NodeID()
n.nodeMembership = api.NodeMembershipAccepted
n.roleCond.Broadcast()
n.Unlock()
@ -623,11 +644,11 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
select {
case <-ctx.Done():
m.Stop(context.Background()) // todo: this should be sync like other components
case <-n.waitRole(ctx, ca.AgentRole):
<-done
// in case of demotion manager will stop itself
case <-done:
}
<-done
ready = nil // ready event happens once, even on multiple starts
n.Lock()
n.manager = nil

View file

@ -63,7 +63,8 @@ func (*IssueNodeCertificateRequest) ProtoMessage() {}
func (*IssueNodeCertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{2} }
type IssueNodeCertificateResponse struct {
NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
NodeMembership NodeSpec_Membership `protobuf:"varint,2,opt,name=node_membership,json=nodeMembership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"node_membership,omitempty"`
}
func (m *IssueNodeCertificateResponse) Reset() { *m = IssueNodeCertificateResponse{} }
@ -178,7 +179,8 @@ func (m *IssueNodeCertificateResponse) Copy() *IssueNodeCertificateResponse {
}
o := &IssueNodeCertificateResponse{
NodeID: m.NodeID,
NodeID: m.NodeID,
NodeMembership: m.NodeMembership,
}
return o
@ -247,9 +249,10 @@ func (this *IssueNodeCertificateResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s := make([]string, 0, 6)
s = append(s, "&api.IssueNodeCertificateResponse{")
s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
s = append(s, "NodeMembership: "+fmt.Sprintf("%#v", this.NodeMembership)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@ -583,6 +586,11 @@ func (m *IssueNodeCertificateResponse) MarshalTo(data []byte) (int, error) {
i = encodeVarintCa(data, i, uint64(len(m.NodeID)))
i += copy(data[i:], m.NodeID)
}
if m.NodeMembership != 0 {
data[i] = 0x10
i++
i = encodeVarintCa(data, i, uint64(m.NodeMembership))
}
return i, nil
}
@ -842,6 +850,9 @@ func (m *IssueNodeCertificateResponse) Size() (n int) {
if l > 0 {
n += 1 + l + sovCa(uint64(l))
}
if m.NodeMembership != 0 {
n += 1 + sovCa(uint64(m.NodeMembership))
}
return n
}
@ -913,6 +924,7 @@ func (this *IssueNodeCertificateResponse) String() string {
}
s := strings.Join([]string{`&IssueNodeCertificateResponse{`,
`NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
`NodeMembership:` + fmt.Sprintf("%v", this.NodeMembership) + `,`,
`}`,
}, "")
return s
@ -1326,6 +1338,25 @@ func (m *IssueNodeCertificateResponse) Unmarshal(data []byte) error {
}
m.NodeID = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NodeMembership", wireType)
}
m.NodeMembership = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCa
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.NodeMembership |= (NodeSpec_Membership(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipCa(data[iNdEx:])
@ -1584,33 +1615,36 @@ var (
)
var fileDescriptorCa = []byte{
// 442 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x48, 0x4e, 0xd4, 0x2b,
0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0xd2, 0x2b, 0x2e, 0x4f,
0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, 0x2d, 0x86,
0x28, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x33, 0xf5, 0x41, 0x2c, 0xa8, 0xa8, 0x70, 0x41,
0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x3e, 0x84, 0x82, 0x08, 0x2a, 0x39, 0x73, 0xc9, 0xf8, 0xe5, 0xa7,
0xa4, 0x3a, 0xa7, 0x16, 0x95, 0x64, 0xa6, 0x65, 0x26, 0x27, 0x96, 0xa4, 0x06, 0x97, 0x24, 0x96,
0x94, 0x16, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x29, 0x73, 0xb1, 0xe7, 0x01, 0xe5,
0xe3, 0x33, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0xb8, 0x1e, 0xdd, 0x93, 0x67, 0x03,
0x69, 0xf1, 0x74, 0x09, 0x62, 0x03, 0x49, 0x79, 0xa6, 0x28, 0xcd, 0x63, 0xe4, 0x92, 0xc5, 0x61,
0x4a, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x15, 0x17, 0x5b, 0x31, 0x58, 0x04, 0x6c, 0x0a,
0xb7, 0x91, 0x92, 0x1e, 0xa6, 0x1f, 0xf4, 0x3c, 0x8b, 0x8b, 0x4b, 0x13, 0xf3, 0x92, 0x61, 0x7a,
0xa1, 0x3a, 0x84, 0x1c, 0xb9, 0xb8, 0x93, 0x11, 0x06, 0x4b, 0x30, 0x81, 0x0d, 0x90, 0xc7, 0x66,
0x00, 0x92, 0xfd, 0x41, 0xc8, 0x7a, 0x94, 0x9a, 0x18, 0xb9, 0xa4, 0x41, 0xa6, 0xa7, 0xa2, 0xb9,
0x12, 0xe6, 0x4b, 0x03, 0x2e, 0x96, 0xa2, 0xfc, 0x9c, 0x54, 0xb0, 0xe3, 0xf8, 0x8c, 0x64, 0xb0,
0x99, 0x0d, 0xd2, 0x19, 0x04, 0x54, 0x13, 0x04, 0x56, 0x29, 0x24, 0xc9, 0xc5, 0x9c, 0x5c, 0x5c,
0x04, 0x76, 0x0c, 0x8f, 0x13, 0x3b, 0x30, 0x4c, 0x98, 0x9d, 0x83, 0x83, 0x82, 0x40, 0x62, 0x42,
0x62, 0x40, 0xbf, 0xa6, 0x26, 0x17, 0xa5, 0x96, 0x48, 0x30, 0x83, 0x42, 0x2c, 0x08, 0xca, 0x03,
0x05, 0x35, 0x76, 0x37, 0x40, 0xc3, 0x88, 0xa8, 0xa0, 0x96, 0xe5, 0x92, 0x76, 0x4f, 0x2d, 0x09,
0xca, 0xcf, 0x2f, 0x71, 0x76, 0xc4, 0xf4, 0x88, 0x92, 0x03, 0x97, 0x0c, 0x76, 0x69, 0xa8, 0x1d,
0x0a, 0xa8, 0x61, 0x09, 0xb2, 0x87, 0x07, 0x25, 0xa8, 0x8c, 0xba, 0x18, 0xb9, 0x98, 0x9c, 0x1d,
0x85, 0x9a, 0x19, 0xb9, 0x44, 0xb0, 0x99, 0x24, 0xa4, 0x8f, 0x2d, 0x70, 0xf0, 0x38, 0x49, 0xca,
0x80, 0x78, 0x0d, 0x10, 0x47, 0x2a, 0x71, 0x9c, 0x5a, 0xf7, 0x6e, 0x06, 0x13, 0x93, 0x00, 0xa3,
0xd1, 0x74, 0x26, 0x2e, 0x70, 0x00, 0x40, 0x1d, 0x84, 0x2d, 0xf8, 0xb0, 0x3b, 0x08, 0x4f, 0x64,
0x63, 0x77, 0x10, 0xbe, 0x98, 0x41, 0x38, 0x48, 0xa8, 0x8d, 0x91, 0x4b, 0x14, 0x6b, 0x4a, 0x17,
0x32, 0xc0, 0x95, 0x68, 0x70, 0x65, 0x2d, 0x29, 0x43, 0x12, 0x74, 0xa0, 0x3b, 0xc4, 0x49, 0xe6,
0xc4, 0x43, 0x39, 0x86, 0x1b, 0x40, 0xfc, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x13,
0x40, 0x7c, 0x01, 0x88, 0x1f, 0x00, 0x71, 0x12, 0x1b, 0x38, 0x73, 0x1b, 0x03, 0x02, 0x00, 0x00,
0xff, 0xff, 0x42, 0x13, 0xc9, 0x2a, 0x34, 0x04, 0x00, 0x00,
// 487 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
0x10, 0xc6, 0x59, 0x07, 0xa5, 0x65, 0x52, 0x05, 0xb4, 0x14, 0x14, 0x52, 0x37, 0xad, 0xcc, 0x01,
0x4e, 0x4e, 0x6a, 0x6e, 0x9c, 0x48, 0x8c, 0x84, 0x72, 0x00, 0xa1, 0xcd, 0x03, 0x20, 0xd7, 0x19,
0x82, 0xd5, 0x26, 0x6b, 0x76, 0x37, 0x20, 0x6e, 0x08, 0x24, 0x0e, 0xdc, 0x11, 0x9c, 0x78, 0x04,
0x9e, 0xa3, 0xe2, 0xc4, 0x91, 0x13, 0xa2, 0x7d, 0x00, 0xc4, 0x23, 0xb0, 0xbb, 0x71, 0x48, 0xff,
0xac, 0xa3, 0xf6, 0x30, 0x8a, 0x77, 0x76, 0xbe, 0x2f, 0xbf, 0x9d, 0xf1, 0x1a, 0x56, 0xd3, 0x24,
0xcc, 0x05, 0x57, 0x9c, 0xd2, 0x21, 0x4f, 0xf7, 0x50, 0x84, 0xf2, 0x75, 0x22, 0xc6, 0x7b, 0x99,
0x0a, 0x5f, 0xed, 0x34, 0x6b, 0xea, 0x4d, 0x8e, 0x72, 0x56, 0xd0, 0xac, 0xc9, 0x1c, 0xd3, 0xf9,
0x62, 0x7d, 0xc4, 0x47, 0xdc, 0x3e, 0xb6, 0xcd, 0x53, 0x91, 0xbd, 0x9e, 0xef, 0x4f, 0x47, 0xd9,
0xa4, 0x3d, 0xfb, 0x99, 0x25, 0x83, 0x18, 0xfc, 0x27, 0x7c, 0x88, 0x31, 0x0a, 0x95, 0x3d, 0xcf,
0xd2, 0x44, 0xe1, 0x40, 0x25, 0x6a, 0x2a, 0x19, 0xbe, 0x9c, 0xa2, 0x54, 0xf4, 0x36, 0xac, 0x4c,
0xf4, 0xfe, 0xb3, 0x6c, 0xd8, 0x20, 0xdb, 0xe4, 0xee, 0x95, 0x1e, 0x1c, 0xfd, 0xda, 0xaa, 0x1a,
0x49, 0xff, 0x21, 0xab, 0x9a, 0xad, 0xfe, 0x30, 0xf8, 0x4a, 0x60, 0xb3, 0xc4, 0x45, 0xe6, 0x7c,
0x22, 0x91, 0xde, 0x87, 0xaa, 0xb4, 0x19, 0xeb, 0x52, 0x8b, 0x82, 0xf0, 0xec, 0x81, 0xc2, 0xbe,
0x94, 0xd3, 0x64, 0x92, 0xce, 0xb5, 0x85, 0x82, 0x76, 0xa1, 0x96, 0x2e, 0x8c, 0x1b, 0x9e, 0x35,
0xd8, 0x72, 0x19, 0x1c, 0xfb, 0x7f, 0x76, 0x5c, 0x13, 0xbc, 0x23, 0xb0, 0x61, 0xdc, 0xf1, 0x14,
0xe5, 0xfc, 0x94, 0x1d, 0xb8, 0x2c, 0xf8, 0x3e, 0x5a, 0xb8, 0x7a, 0xe4, 0xbb, 0xbc, 0x8d, 0x92,
0xe9, 0x1a, 0x66, 0x2b, 0xe9, 0x2d, 0xa8, 0xa4, 0x52, 0x58, 0x98, 0xb5, 0xde, 0x8a, 0xee, 0x49,
0x25, 0x1e, 0x30, 0x66, 0x72, 0xf4, 0xa6, 0x3e, 0x2b, 0xa6, 0x02, 0x55, 0xa3, 0x62, 0x3a, 0xc6,
0x8a, 0x55, 0xf0, 0x89, 0x80, 0xef, 0x86, 0x28, 0x9a, 0x74, 0x9e, 0x5e, 0xd3, 0xa7, 0x70, 0xd5,
0x16, 0x8d, 0x71, 0xbc, 0x8b, 0x42, 0xbe, 0xc8, 0x72, 0x0b, 0x51, 0x8f, 0xee, 0x94, 0x51, 0x0f,
0xf4, 0x9b, 0x11, 0x3e, 0xfe, 0x5f, 0xce, 0xea, 0x46, 0xbf, 0x58, 0x07, 0x9b, 0xb0, 0xf1, 0x08,
0x15, 0xe3, 0x5c, 0xc5, 0xdd, 0xb3, 0xbd, 0x09, 0x1e, 0x80, 0xef, 0xde, 0x2e, 0xa8, 0xb7, 0x4f,
0x8e, 0xc7, 0x90, 0xaf, 0x9d, 0xe8, 0x7e, 0xf4, 0x91, 0x80, 0x17, 0x77, 0xe9, 0x7b, 0x02, 0xeb,
0x2e, 0x27, 0xda, 0x76, 0x91, 0x2f, 0x41, 0x6a, 0x76, 0xce, 0x2f, 0x98, 0x41, 0x06, 0xab, 0xdf,
0xbf, 0xfd, 0xf9, 0xe2, 0x79, 0xd7, 0x48, 0xf4, 0xd9, 0x03, 0xdb, 0xd2, 0x02, 0xc8, 0x35, 0x10,
0x37, 0xd0, 0x92, 0xf7, 0xc7, 0x0d, 0xb4, 0x6c, 0xd6, 0x0b, 0x20, 0xfa, 0x81, 0xc0, 0x0d, 0xe7,
0xe5, 0xa1, 0x9d, 0xb2, 0x89, 0x96, 0xdd, 0xd6, 0xe6, 0xce, 0x05, 0x14, 0xa7, 0x41, 0x7a, 0xfe,
0xc1, 0x61, 0xeb, 0xd2, 0x4f, 0x1d, 0x7f, 0x0f, 0x5b, 0xe4, 0xed, 0x51, 0x8b, 0x1c, 0xe8, 0xf8,
0xa1, 0xe3, 0xb7, 0x8e, 0xdd, 0xaa, 0xfd, 0x5e, 0xdc, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x72,
0xd0, 0xad, 0xdf, 0x94, 0x04, 0x00, 0x00,
}

View file

@ -3,6 +3,7 @@ syntax = "proto3";
package docker.swarmkit.v1;
import "types.proto";
import "specs.proto";
import "gogoproto/gogo.proto";
import "plugin/plugin.proto";
@ -42,6 +43,7 @@ message IssueNodeCertificateRequest {
message IssueNodeCertificateResponse {
string node_id = 1 [(gogoproto.customname) = "NodeID"];
NodeSpec.Membership node_membership = 2;
}
message GetRootCACertificateRequest {}

View file

@ -122,13 +122,12 @@ func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org stri
return nil, err
}
var signedCert []byte
if !rca.CanSign() {
return nil, ErrNoValidSigner
}
// Obtain a signed Certificate
signedCert, err = rca.ParseValidateAndSignCSR(csr, cn, ou, org)
certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org)
if err != nil {
log.Debugf("failed to sign node certificate: %v", err)
return nil, err
@ -141,12 +140,12 @@ func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org stri
}
// Write the chain to disk
if err := ioutils.AtomicWriteFile(paths.Cert, signedCert, 0644); err != nil {
if err := ioutils.AtomicWriteFile(paths.Cert, certChain, 0644); err != nil {
return nil, err
}
// Create a valid TLSKeyPair out of the PEM encoded private key and certificate
tlsKeyPair, err := tls.X509KeyPair(signedCert, key)
tlsKeyPair, err := tls.X509KeyPair(certChain, key)
if err != nil {
return nil, err
}
@ -157,7 +156,7 @@ func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org stri
// RequestAndSaveNewCertificates gets new certificates issued, either by signing them locally if a signer is
// available, or by requesting them from the remote server at remoteAddr.
func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, role, secret string, picker *picker.Picker, transport credentials.TransportAuthenticator, nodeInfo chan<- string) (*tls.Certificate, error) {
func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, role, secret string, picker *picker.Picker, transport credentials.TransportAuthenticator, nodeInfo chan<- api.IssueNodeCertificateResponse) (*tls.Certificate, error) {
// Create a new key/pair and CSR for the new manager
// Write the new CSR and the new key to a temporary location so we can survive crashes on rotation
tempPaths := genTempPaths(paths)
@ -245,31 +244,56 @@ func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string)
return nil, err
}
return cert, nil
// Append the first root CA Cert to the certificate, to create a valid chain
// Get the first Root CA Cert on the bundle
firstRootCA, _, err := helpers.ParseOneCertificateFromPEM(rca.Cert)
if err != nil {
return nil, err
}
if len(firstRootCA) < 1 {
return nil, fmt.Errorf("no valid Root CA certificates found")
}
// Convert the first root CA back to PEM
firstRootCAPEM := helpers.EncodeCertificatePEM(firstRootCA[0])
if firstRootCAPEM == nil {
return nil, fmt.Errorf("error while encoding the Root CA certificate")
}
// Append this Root CA to the certificate to make [Cert PEM]\n[Root PEM][EOF]
certChain := append(cert, firstRootCAPEM...)
return certChain, nil
}
// NewRootCA creates a new RootCA object from unparsed cert and key byte
// NewRootCA creates a new RootCA object from unparsed PEM cert bundle and key byte
// slices. key may be nil, and in this case NewRootCA will return a RootCA
// without a signer.
func NewRootCA(cert, key []byte, certExpiry time.Duration) (RootCA, error) {
// Check to see if the Certificate file is a valid, self-signed Cert
parsedCA, err := helpers.ParseSelfSignedCertificatePEM(cert)
func NewRootCA(certBytes, keyBytes []byte, certExpiry time.Duration) (RootCA, error) {
// Parse all the certificates in the cert bundle
parsedCerts, err := helpers.ParseCertificatesPEM(certBytes)
if err != nil {
return RootCA{}, err
}
// Calculate the digest for our RootCACertificate
digest := digest.FromBytes(cert)
// Create a Pool with our RootCACertificate
pool := x509.NewCertPool()
if !pool.AppendCertsFromPEM(cert) {
return RootCA{}, fmt.Errorf("error while adding root CA cert to Cert Pool")
// Check to see if we have at least one valid cert
if len(parsedCerts) < 1 {
return RootCA{}, fmt.Errorf("no valid Root CA certificates found")
}
if len(key) == 0 {
// Create a Pool with all of the certificates found
pool := x509.NewCertPool()
for _, cert := range parsedCerts {
// Check to see if all of the certificates are valid, self-signed root CA certs
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
return RootCA{}, fmt.Errorf("error while validating Root CA Certificate: %v", err)
}
pool.AddCert(cert)
}
// Calculate the digest for our Root CA bundle
digest := digest.FromBytes(certBytes)
if len(keyBytes) == 0 {
// This RootCA does not have a valid signer.
return RootCA{Cert: cert, Digest: digest, Pool: pool}, nil
return RootCA{Cert: certBytes, Digest: digest, Pool: pool}, nil
}
var (
@ -288,39 +312,40 @@ func NewRootCA(cert, key []byte, certExpiry time.Duration) (RootCA, error) {
}
// Attempt to decrypt the current private-key with the passphrases provided
priv, err = helpers.ParsePrivateKeyPEMWithPassword(key, passphrase)
priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrase)
if err != nil {
priv, err = helpers.ParsePrivateKeyPEMWithPassword(key, passphrasePrev)
priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrasePrev)
if err != nil {
log.Debug("Malformed private key %v", err)
return RootCA{}, err
}
}
if err := ensureCertKeyMatch(parsedCA, priv.Public()); err != nil {
// We will always use the first certificate inside of the root bundle as the active one
if err := ensureCertKeyMatch(parsedCerts[0], priv.Public()); err != nil {
return RootCA{}, err
}
signer, err := local.NewSigner(priv, parsedCA, cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry))
signer, err := local.NewSigner(priv, parsedCerts[0], cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry))
if err != nil {
return RootCA{}, err
}
// If the key was loaded from disk unencrypted, but there is a passphrase set,
// ensure it is encrypted, so it doesn't hit raft in plain-text
keyBlock, _ := pem.Decode(key)
keyBlock, _ := pem.Decode(keyBytes)
if keyBlock == nil {
// This RootCA does not have a valid signer.
return RootCA{Cert: cert, Digest: digest, Pool: pool}, nil
return RootCA{Cert: certBytes, Digest: digest, Pool: pool}, nil
}
if passphraseStr != "" && !x509.IsEncryptedPEMBlock(keyBlock) {
key, err = EncryptECPrivateKey(key, passphraseStr)
keyBytes, err = EncryptECPrivateKey(keyBytes, passphraseStr)
if err != nil {
return RootCA{}, err
}
}
return RootCA{Signer: signer, Key: key, Digest: digest, Cert: cert, Pool: pool}, nil
return RootCA{Signer: signer, Key: keyBytes, Digest: digest, Cert: certBytes, Pool: pool}, nil
}
func ensureCertKeyMatch(cert *x509.Certificate, key crypto.PublicKey) error {
@ -494,15 +519,12 @@ func GenerateAndSignNewTLSCert(rootCA RootCA, cn, ou, org string, paths CertPath
}
// Obtain a signed Certificate
cert, err := rootCA.ParseValidateAndSignCSR(csr, cn, ou, org)
certChain, err := rootCA.ParseValidateAndSignCSR(csr, cn, ou, org)
if err != nil {
log.Debugf("failed to sign node certificate: %v", err)
return nil, err
}
// Append the root CA Key to the certificate, to create a valid chain
certChain := append(cert, rootCA.Cert...)
// Ensure directory exists
err = os.MkdirAll(filepath.Dir(paths.Cert), 0755)
if err != nil {
@ -550,7 +572,7 @@ func GenerateAndWriteNewKey(paths CertPaths) (csr, key []byte, err error) {
// GetRemoteSignedCertificate submits a CSR together with the intended role to a remote CA server address
// available through a picker, and that is part of a CA identified by a specific certificate pool.
func GetRemoteSignedCertificate(ctx context.Context, csr []byte, role, secret string, rootCAPool *x509.CertPool, picker *picker.Picker, creds credentials.TransportAuthenticator, nodeInfo chan<- string) ([]byte, error) {
func GetRemoteSignedCertificate(ctx context.Context, csr []byte, role, secret string, rootCAPool *x509.CertPool, picker *picker.Picker, creds credentials.TransportAuthenticator, nodeInfo chan<- api.IssueNodeCertificateResponse) ([]byte, error) {
if rootCAPool == nil {
return nil, fmt.Errorf("valid root CA pool required")
}
@ -596,13 +618,12 @@ func GetRemoteSignedCertificate(ctx context.Context, csr []byte, role, secret st
return nil, err
}
nodeID := issueResponse.NodeID
// Send back the NodeID on the nodeInfo, so the caller can know what ID was assigned by the CA
if nodeInfo != nil {
nodeInfo <- nodeID
nodeInfo <- *issueResponse
}
statusRequest := &api.NodeCertificateStatusRequest{NodeID: nodeID}
statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID}
expBackoff := events.NewExponentialBackoff(events.ExponentialBackoffConfig{
Base: time.Second,
Factor: time.Second,

View file

@ -139,7 +139,7 @@ func NewConfigPaths(baseCertDir string) *SecurityConfigPaths {
// LoadOrCreateSecurityConfig encapsulates the security logic behind joining a cluster.
// Every node requires at least a set of TLS certificates with which to join the cluster with.
// In the case of a manager, these certificates will be used both for client and server credentials.
func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret, proposedRole string, picker *picker.Picker, nodeInfo chan<- string) (*SecurityConfig, error) {
func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret, proposedRole string, picker *picker.Picker, nodeInfo chan<- api.IssueNodeCertificateResponse) (*SecurityConfig, error) {
paths := NewConfigPaths(baseCertDir)
var (
@ -198,7 +198,10 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret
org := identity.NewID()
if nodeInfo != nil {
nodeInfo <- cn
nodeInfo <- api.IssueNodeCertificateResponse{
NodeID: cn,
NodeMembership: api.NodeMembershipAccepted,
}
}
tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(paths.Node, cn, proposedRole, org)
} else {
@ -225,7 +228,10 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret
log.Debugf("new TLS credentials generated: %s.", paths.Node.Cert)
} else {
if nodeInfo != nil {
nodeInfo <- clientTLSCreds.NodeID()
nodeInfo <- api.IssueNodeCertificateResponse{
NodeID: clientTLSCreds.NodeID(),
NodeMembership: api.NodeMembershipAccepted,
}
}
log.Debugf("loaded local TLS credentials: %s.", paths.Node.Cert)
}

View file

@ -33,7 +33,7 @@ type Server struct {
// Started is a channel which gets closed once the server is running
// and able to service RPCs.
Started chan struct{}
started chan struct{}
}
// DefaultAcceptancePolicy returns the default acceptance policy.
@ -64,7 +64,7 @@ func NewServer(store *store.MemoryStore, securityConfig *SecurityConfig) *Server
return &Server{
store: store,
securityConfig: securityConfig,
Started: make(chan struct{}),
started: make(chan struct{}),
}
}
@ -249,7 +249,8 @@ func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNod
}
return &api.IssueNodeCertificateResponse{
NodeID: nodeID,
NodeID: nodeID,
NodeMembership: nodeMembership,
}, nil
}
@ -289,11 +290,14 @@ func (s *Server) getRolePolicy(role api.NodeRole) *api.AcceptancePolicy_RoleAdmi
// issueRenewCertificate receives a nodeID and a CSR and modifies the node's certificate entry with the new CSR
// and changes the state to RENEW, so it can be picked up and signed by the signing reconciliation loop
func (s *Server) issueRenewCertificate(ctx context.Context, nodeID string, csr []byte) (*api.IssueNodeCertificateResponse, error) {
var cert api.Certificate
var (
cert api.Certificate
node *api.Node
)
err := s.store.Update(func(tx store.Tx) error {
// Attempt to retrieve the node with nodeID
node := store.GetNode(tx, nodeID)
node = store.GetNode(tx, nodeID)
if node == nil {
log.G(ctx).WithFields(logrus.Fields{
"node.id": nodeID,
@ -325,8 +329,10 @@ func (s *Server) issueRenewCertificate(ctx context.Context, nodeID string, csr [
"cert.role": cert.Role,
"method": "issueRenewCertificate",
}).Debugf("node certificate updated")
return &api.IssueNodeCertificateResponse{
NodeID: nodeID,
NodeID: nodeID,
NodeMembership: node.Spec.Membership,
}, nil
}
@ -358,7 +364,14 @@ func (s *Server) Run(ctx context.Context) error {
s.ctx, s.cancel = context.WithCancel(ctx)
s.mu.Unlock()
close(s.Started)
// Run() should never be called twice, but just in case, we're
// attempting to close the started channel in a safe way
select {
case <-s.started:
return fmt.Errorf("CA server cannot be started more than once")
default:
close(s.started)
}
// Retrieve the channels to keep track of changes in the cluster
// Retrieve all the currently registered nodes
@ -439,6 +452,11 @@ func (s *Server) Stop() error {
return nil
}
// Ready waits on the ready channel and returns when the server is ready to serve.
func (s *Server) Ready() <-chan struct{} {
return s.started
}
func (s *Server) addTask() error {
s.mu.Lock()
if !s.isRunning() {
@ -600,8 +618,7 @@ func (s *Server) signNodeCert(ctx context.Context, node *api.Node) {
// We were able to successfully sign the new CSR. Let's try to update the nodeStore
for {
err = s.store.Update(func(tx store.Tx) error {
// Remote nodes are expecting a full certificate chain, not just a signed certificate
node.Certificate.Certificate = append(cert, s.securityConfig.RootCA().Cert...)
node.Certificate.Certificate = cert
node.Certificate.Status = api.IssuanceStatus{
State: api.IssuanceStateIssued,
}

View file

@ -95,7 +95,6 @@ type Dispatcher struct {
cluster Cluster
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
taskUpdates map[string]*api.TaskStatus // indexed by task ID
taskUpdatesLock sync.Mutex
@ -152,8 +151,6 @@ func (d *Dispatcher) Run(ctx context.Context) error {
d.mu.Unlock()
return fmt.Errorf("dispatcher is stopped")
}
d.wg.Add(1)
defer d.wg.Done()
logger := log.G(ctx).WithField("module", "dispatcher")
ctx = log.WithLogger(ctx, logger)
if err := d.markNodesUnknown(ctx); err != nil {
@ -236,27 +233,19 @@ func (d *Dispatcher) Stop() error {
d.cancel()
d.mu.Unlock()
d.nodes.Clean()
// wait for all handlers to finish their raft deals, because manager will
// set raftNode to nil
d.wg.Wait()
return nil
}
func (d *Dispatcher) addTask() error {
func (d *Dispatcher) isRunningLocked() error {
d.mu.Lock()
if !d.isRunning() {
d.mu.Unlock()
return grpc.Errorf(codes.Aborted, "dispatcher is stopped")
}
d.wg.Add(1)
d.mu.Unlock()
return nil
}
func (d *Dispatcher) doneTask() {
d.wg.Done()
}
func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
var nodes []*api.Node
@ -325,10 +314,9 @@ func (d *Dispatcher) isRunning() bool {
// register is used for registration of node with particular dispatcher.
func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, string, error) {
// prevent register until we're ready to accept it
if err := d.addTask(); err != nil {
if err := d.isRunningLocked(); err != nil {
return "", "", err
}
defer d.doneTask()
// create or update node in store
// TODO(stevvooe): Validate node specification.
@ -390,10 +378,9 @@ func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStat
}
log := log.G(ctx).WithFields(fields)
if err := d.addTask(); err != nil {
if err := d.isRunningLocked(); err != nil {
return nil, err
}
defer d.doneTask()
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return nil, err
@ -505,10 +492,9 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
}
nodeID := nodeInfo.NodeID
if err := d.addTask(); err != nil {
if err := d.isRunningLocked(); err != nil {
return err
}
defer d.doneTask()
fields := logrus.Fields{
"node.id": nodeID,
@ -585,10 +571,9 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
}
func (d *Dispatcher) nodeRemove(id string, status api.NodeStatus) error {
if err := d.addTask(); err != nil {
if err := d.isRunningLocked(); err != nil {
return err
}
defer d.doneTask()
// TODO(aaronl): Is it worth batching node removals?
err := d.store.Update(func(tx store.Tx) error {
node := store.GetNode(tx, id)
@ -640,10 +625,9 @@ func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_Sessio
}
nodeID := nodeInfo.NodeID
if err := d.addTask(); err != nil {
if err := d.isRunningLocked(); err != nil {
return err
}
defer d.doneTask()
// register the node.
nodeID, sessionID, err := d.register(stream.Context(), nodeID, r.Description)

View file

@ -3,6 +3,7 @@ package manager
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"net"
"os"
@ -101,6 +102,10 @@ func New(config *Config) (*Manager, error) {
tcpAddr := config.ProtoAddr["tcp"]
if tcpAddr == "" {
return nil, errors.New("no tcp listen address or listener provided")
}
listenHost, listenPort, err := net.SplitHostPort(tcpAddr)
if err == nil {
ip := net.ParseIP(listenHost)
@ -664,7 +669,7 @@ func (m *Manager) rotateRootCAKEK(ctx context.Context, clusterID string) error {
return s.Update(func(tx store.Tx) error {
cluster = store.GetCluster(tx, clusterID)
if cluster == nil {
return fmt.Errorf("cluster not found")
return fmt.Errorf("cluster not found: %s", clusterID)
}
cluster.RootCA.CAKey = finalKey
return store.UpdateCluster(tx, cluster)

View file

@ -53,9 +53,6 @@ func (f *ResourceFilter) SetTask(t *api.Task) bool {
// Check returns true if the task can be scheduled into the given node.
func (f *ResourceFilter) Check(n *NodeInfo) bool {
if n.AvailableResources == nil {
return false
}
if f.reservations.NanoCPUs > n.AvailableResources.NanoCPUs {
return false
}

View file

@ -6,10 +6,10 @@ import "github.com/docker/swarmkit/api"
type NodeInfo struct {
*api.Node
Tasks map[string]*api.Task
AvailableResources *api.Resources
AvailableResources api.Resources
}
func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources *api.Resources) NodeInfo {
func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api.Resources) NodeInfo {
nodeInfo := NodeInfo{
Node: n,
Tasks: make(map[string]*api.Task),
@ -31,11 +31,9 @@ func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
}
delete(nodeInfo.Tasks, t.ID)
if nodeInfo.AvailableResources != nil {
reservations := taskReservations(t.Spec)
nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes
nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs
}
reservations := taskReservations(t.Spec)
nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes
nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs
return true
}
@ -49,11 +47,9 @@ func (nodeInfo *NodeInfo) addTask(t *api.Task) bool {
}
if _, ok := nodeInfo.Tasks[t.ID]; !ok {
nodeInfo.Tasks[t.ID] = t
if nodeInfo.AvailableResources != nil {
reservations := taskReservations(t.Spec)
nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
}
reservations := taskReservations(t.Spec)
nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
return true
}

View file

@ -243,16 +243,18 @@ func (s *Scheduler) deleteTask(ctx context.Context, t *api.Task) {
func (s *Scheduler) createOrUpdateNode(n *api.Node) {
nodeInfo := s.nodeHeap.nodeInfo(n.ID)
var resources api.Resources
if n.Description != nil && n.Description.Resources != nil {
if nodeInfo.AvailableResources == nil {
// if nodeInfo.AvailableResources hasn't been initialized
// we copy resources information from node description and
// pass it to nodeInfo
resources := *n.Description.Resources
nodeInfo.AvailableResources = &resources
resources = *n.Description.Resources
// reconcile resources by looping over all tasks in this node
for _, task := range nodeInfo.Tasks {
reservations := taskReservations(task.Spec)
resources.MemoryBytes -= reservations.MemoryBytes
resources.NanoCPUs -= reservations.NanoCPUs
}
}
nodeInfo.Node = n
nodeInfo.AvailableResources = resources
s.nodeHeap.addOrUpdateNode(nodeInfo)
}
@ -422,10 +424,9 @@ func (s *Scheduler) buildNodeHeap(tx store.ReadTx, tasksByNode map[string]map[st
i := 0
for _, n := range nodes {
var resources *api.Resources
var resources api.Resources
if n.Description != nil && n.Description.Resources != nil {
resources = &api.Resources{NanoCPUs: n.Description.Resources.NanoCPUs,
MemoryBytes: n.Description.Resources.MemoryBytes}
resources = *n.Description.Resources
}
s.nodeHeap.heap = append(s.nodeHeap.heap, newNodeInfo(n, tasksByNode[n.ID], resources))
s.nodeHeap.index[n.ID] = i

View file

@ -91,6 +91,10 @@ type Node struct {
removed uint32
joinAddr string
// waitProp waits for all the proposals to be terminated before
// shutting down the node.
waitProp sync.WaitGroup
// forceNewCluster is a special flag used to recover from disaster
// scenario by pointing to an existing or backed up data directory.
forceNewCluster bool
@ -420,6 +424,7 @@ func (n *Node) stop() {
defer n.stopMu.Unlock()
n.cancel()
n.waitProp.Wait()
n.asyncTasks.Wait()
members := n.cluster.Members()
@ -762,6 +767,17 @@ func (n *Node) mustStop() bool {
return atomic.LoadUint32(&n.removed) == 1
}
// canSubmitProposal defines if any more proposals
// could be submitted and processed.
func (n *Node) canSubmitProposal() bool {
select {
case <-n.Ctx.Done():
return false
default:
return true
}
}
// Saves a log entry to our Store
func (n *Node) saveToStorage(raftConfig *api.RaftConfig, hardState raftpb.HardState, entries []raftpb.Entry, snapshot raftpb.Snapshot) (err error) {
if !raft.IsEmptySnap(snapshot) {
@ -842,7 +858,7 @@ func (n *Node) sendToMember(members map[uint64]*membership.Member, m raftpb.Mess
}
}
if queryMember == nil {
if queryMember == nil || queryMember.RaftID == n.Config.ID {
n.Config.Logger.Error("could not find cluster member to query for leader address")
return
}
@ -885,10 +901,19 @@ type applyResult struct {
err error
}
// processInternalRaftRequest sends a message through consensus
// and then waits for it to be applies to the server. It will
// block until the change is performed or there is an error
// processInternalRaftRequest sends a message to nodes participating
// in the raft to apply a log entry and then waits for it to be applied
// on the server. It will block until the update is performed, there is
// an error or until the raft node finalizes all the proposals on node
// shutdown.
func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRaftRequest, cb func()) (proto.Message, error) {
n.waitProp.Add(1)
defer n.waitProp.Done()
if !n.canSubmitProposal() {
return nil, ErrStopped
}
r.ID = n.reqIDGen.Next()
ch := n.wait.register(r.ID, cb)
@ -923,7 +948,7 @@ func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRa
return res.resp, res.err
}
return nil, ErrLostLeadership
case <-n.stopCh:
case <-n.Ctx.Done():
n.wait.cancel(r.ID)
return nil, ErrStopped
case <-ctx.Done():
@ -956,7 +981,7 @@ func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error {
case <-ctx.Done():
n.wait.trigger(cc.ID, nil)
return ctx.Err()
case <-n.stopCh:
case <-n.Ctx.Done():
return ErrStopped
}
}

View file

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View file

@ -0,0 +1,10 @@
language: go
go_import_path: github.com/pkg/errors
go:
- 1.4.3
- 1.5.4
- 1.6.2
- tip
script:
- go test -v ./...

View file

@ -0,0 +1,24 @@
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,50 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
Package errors provides simple error handling primitives.
The traditional error handling idiom in Go is roughly akin to
```go
if err != nil {
return err
}
```
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
## Adding context to an error
The errors.Wrap function returns a new error that adds context to the original error. For example
```go
_, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "read failed")
}
```
## Retrieving the cause of an error
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
```go
type causer interface {
Cause() error
}
```
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
```go
switch err := errors.Cause(err).(type) {
case *MyError:
// handle specifically
default:
// unknown error
}
```
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
## Contributing
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
Before proposing a change, please discuss your change by raising an issue.
## Licence
BSD-2-Clause

View file

@ -0,0 +1,32 @@
version: build-{build}.{branch}
clone_folder: C:\gopath\src\github.com\pkg\errors
shallow_clone: true # for startup speed
environment:
GOPATH: C:\gopath
platform:
- x64
# http://www.appveyor.com/docs/installed-software
install:
# some helpful output for debugging builds
- go version
- go env
# pre-installed MinGW at C:\MinGW is 32bit only
# but MSYS2 at C:\msys64 has mingw64
- set PATH=C:\msys64\mingw64\bin;%PATH%
- gcc --version
- g++ --version
build_script:
- go install -v ./...
test_script:
- set PATH=C:\gopath\bin;%PATH%
- go test -v ./...
#artifacts:
# - path: '%GOPATH%\bin\*.exe'
deploy: off

View file

@ -0,0 +1,211 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type Causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type StackTrace interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stacktrace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stacktrace of this error. For example:
//
// if err, ok := err.(StackTrace); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// _error is an error implementation returned by New and Errorf
// that implements its own fmt.Formatter.
type _error struct {
msg string
*stack
}
func (e _error) Error() string { return e.msg }
func (e _error) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, e.msg)
fmt.Fprintf(s, "%+v", e.StackTrace())
return
}
fallthrough
case 's':
io.WriteString(s, e.msg)
}
}
// New returns an error with the supplied message.
func New(message string) error {
return _error{
message,
callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
func Errorf(format string, args ...interface{}) error {
return _error{
fmt.Sprintf(format, args...),
callers(),
}
}
type cause struct {
cause error
msg string
}
func (c cause) Error() string { return fmt.Sprintf("%s: %v", c.msg, c.Cause()) }
func (c cause) Cause() error { return c.cause }
// wrapper is an error implementation returned by Wrap and Wrapf
// that implements its own fmt.Formatter.
type wrapper struct {
cause
*stack
}
func (w wrapper) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
fmt.Fprintf(s, "%+v: %s", w.StackTrace()[0], w.msg)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
}
}
// Wrap returns an error annotating err with message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
return wrapper{
cause: cause{
cause: err,
msg: message,
},
stack: callers(),
}
}
// Wrapf returns an error annotating err with the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
return wrapper{
cause: cause{
cause: err,
msg: fmt.Sprintf(format, args...),
},
stack: callers(),
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type Causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

View file

@ -0,0 +1,165 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}