Kaynağa Gözat

Merge pull request #36848 from cpuguy83/libcontainerd_client_locking

Relax some libcontainerd client locking
Sebastiaan van Stijn 7 yıl önce
ebeveyn
işleme
69a5611dde
1 değiştirilmiş dosya ile 32 ekleme ve 12 silme
  1. 32 12
      libcontainerd/client_daemon.go

+ 32 - 12
libcontainerd/client_daemon.go

@@ -34,7 +34,7 @@ import (
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/opencontainers/image-spec/specs-go/v1"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )
@@ -131,9 +131,30 @@ func (c *client) Version(ctx context.Context) (containerd.Version, error) {
 	return c.getRemote().Version(ctx)
 	return c.getRemote().Version(ctx)
 }
 }
 
 
+// Restore loads the containerd container.
+// It should not be called concurrently with any other operation for the given ID.
 func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (alive bool, pid int, err error) {
 func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (alive bool, pid int, err error) {
 	c.Lock()
 	c.Lock()
-	defer c.Unlock()
+	_, ok := c.containers[id]
+	if ok {
+		c.Unlock()
+		return false, 0, errors.WithStack(newConflictError("id already in use"))
+	}
+
+	cntr := &container{}
+	c.containers[id] = cntr
+	cntr.mu.Lock()
+	defer cntr.mu.Unlock()
+
+	c.Unlock()
+
+	defer func() {
+		if err != nil {
+			c.Lock()
+			delete(c.containers, id)
+			c.Unlock()
+		}
+	}()
 
 
 	var dio *cio.DirectIO
 	var dio *cio.DirectIO
 	defer func() {
 	defer func() {
@@ -144,9 +165,9 @@ func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallba
 		err = wrapError(err)
 		err = wrapError(err)
 	}()
 	}()
 
 
-	ctr, err := c.remote.LoadContainer(ctx, id)
+	ctr, err := c.getRemote().LoadContainer(ctx, id)
 	if err != nil {
 	if err != nil {
-		return false, -1, errors.WithStack(err)
+		return false, -1, errors.WithStack(wrapError(err))
 	}
 	}
 
 
 	attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
 	attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
@@ -160,24 +181,23 @@ func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallba
 	}
 	}
 	t, err := ctr.Task(ctx, attachIO)
 	t, err := ctr.Task(ctx, attachIO)
 	if err != nil && !containerderrors.IsNotFound(err) {
 	if err != nil && !containerderrors.IsNotFound(err) {
-		return false, -1, err
+		return false, -1, errors.Wrap(wrapError(err), "error getting containerd task for container")
 	}
 	}
 
 
 	if t != nil {
 	if t != nil {
 		s, err := t.Status(ctx)
 		s, err := t.Status(ctx)
 		if err != nil {
 		if err != nil {
-			return false, -1, err
+			return false, -1, errors.Wrap(wrapError(err), "error getting task status")
 		}
 		}
 
 
 		alive = s.Status != containerd.Stopped
 		alive = s.Status != containerd.Stopped
 		pid = int(t.Pid())
 		pid = int(t.Pid())
 	}
 	}
-	c.containers[id] = &container{
-		bundleDir: filepath.Join(c.stateDir, id),
-		ctr:       ctr,
-		task:      t,
-		// TODO(mlaventure): load execs
-	}
+
+	cntr.bundleDir = filepath.Join(c.stateDir, id)
+	cntr.ctr = ctr
+	cntr.task = t
+	// TODO(mlaventure): load execs
 
 
 	c.logger.WithFields(logrus.Fields{
 	c.logger.WithFields(logrus.Fields{
 		"container": id,
 		"container": id,