Browse Source

Use SELinux labels for volumes

Fixes a regression from the volumes refactor where the vfs graphdriver
was setting labels for volumes to `s0` so that they can both be written
to by the container and shared with other containers.
When moving away from vfs this was never re-introduced.
Since this needs to happen regardless of volume driver, this is
implemented outside of the driver.

Fixes issue where `z` and `Z` labels are not set for bind-mounts.

Don't lock while creating volumes

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
Brian Goff 10 years ago
parent
commit
b2a43baf2e
4 changed files with 27 additions and 28 deletions
  1. 3 0
      daemon/container.go
  2. 3 0
      daemon/create.go
  3. 9 4
      daemon/daemon.go
  4. 12 24
      daemon/volumes.go

+ 3 - 0
daemon/container.go

@@ -1009,6 +1009,7 @@ func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
 func (container *Container) networkMounts() []execdriver.Mount {
 func (container *Container) networkMounts() []execdriver.Mount {
 	var mounts []execdriver.Mount
 	var mounts []execdriver.Mount
 	if container.ResolvConfPath != "" {
 	if container.ResolvConfPath != "" {
+		label.SetFileLabel(container.ResolvConfPath, container.MountLabel)
 		mounts = append(mounts, execdriver.Mount{
 		mounts = append(mounts, execdriver.Mount{
 			Source:      container.ResolvConfPath,
 			Source:      container.ResolvConfPath,
 			Destination: "/etc/resolv.conf",
 			Destination: "/etc/resolv.conf",
@@ -1017,6 +1018,7 @@ func (container *Container) networkMounts() []execdriver.Mount {
 		})
 		})
 	}
 	}
 	if container.HostnamePath != "" {
 	if container.HostnamePath != "" {
+		label.SetFileLabel(container.HostnamePath, container.MountLabel)
 		mounts = append(mounts, execdriver.Mount{
 		mounts = append(mounts, execdriver.Mount{
 			Source:      container.HostnamePath,
 			Source:      container.HostnamePath,
 			Destination: "/etc/hostname",
 			Destination: "/etc/hostname",
@@ -1025,6 +1027,7 @@ func (container *Container) networkMounts() []execdriver.Mount {
 		})
 		})
 	}
 	}
 	if container.HostsPath != "" {
 	if container.HostsPath != "" {
+		label.SetFileLabel(container.HostsPath, container.MountLabel)
 		mounts = append(mounts, execdriver.Mount{
 		mounts = append(mounts, execdriver.Mount{
 			Source:      container.HostsPath,
 			Source:      container.HostsPath,
 			Destination: "/etc/hosts",
 			Destination: "/etc/hosts",

+ 3 - 0
daemon/create.go

@@ -129,6 +129,9 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 		if err != nil {
 		if err != nil {
 			return nil, nil, err
 			return nil, nil, err
 		}
 		}
+		if err := label.Relabel(v.Path(), container.MountLabel, "z"); err != nil {
+			return nil, nil, err
+		}
 
 
 		if err := container.copyImagePathContent(v, destination); err != nil {
 		if err := container.copyImagePathContent(v, destination); err != nil {
 			return nil, nil, err
 			return nil, nil, err

+ 9 - 4
daemon/daemon.go

@@ -1221,16 +1221,21 @@ func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]stri
 }
 }
 
 
 func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
 func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
-	if err := daemon.registerMountPoints(container, hostConfig); err != nil {
+	container.Lock()
+	if err := parseSecurityOpt(container, hostConfig); err != nil {
+		container.Unlock()
 		return err
 		return err
 	}
 	}
+	container.Unlock()
 
 
-	container.Lock()
-	defer container.Unlock()
-	if err := parseSecurityOpt(container, hostConfig); err != nil {
+	// Do not lock while creating volumes since this could be calling out to external plugins
+	// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
+	if err := daemon.registerMountPoints(container, hostConfig); err != nil {
 		return err
 		return err
 	}
 	}
 
 
+	container.Lock()
+	defer container.Unlock()
 	// Register any links from the host config before starting the container
 	// Register any links from the host config before starting the container
 	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
 	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
 		return err
 		return err

+ 12 - 24
daemon/volumes.go

@@ -6,10 +6,8 @@ import (
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
-	"runtime"
 	"strings"
 	"strings"
 
 
-	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/volume"
 	"github.com/docker/docker/volume"
@@ -23,6 +21,7 @@ type mountPoint struct {
 	RW          bool
 	RW          bool
 	Volume      volume.Volume `json:"-"`
 	Volume      volume.Volume `json:"-"`
 	Source      string
 	Source      string
+	Relabel     string
 }
 }
 
 
 func (m *mountPoint) Setup() (string, error) {
 func (m *mountPoint) Setup() (string, error) {
@@ -69,12 +68,8 @@ func parseBindMount(spec string, mountLabel string, config *runconfig.Config) (*
 			return nil, fmt.Errorf("invalid mode for volumes-from: %s", mode)
 			return nil, fmt.Errorf("invalid mode for volumes-from: %s", mode)
 		}
 		}
 		bind.RW = rwModes[mode]
 		bind.RW = rwModes[mode]
-		// check if we need to apply a SELinux label
-		if strings.ContainsAny(mode, "zZ") {
-			if err := label.Relabel(bind.Source, mountLabel, mode); err != nil {
-				return nil, err
-			}
-		}
+		// Relabel will apply a SELinux label, if necessary
+		bind.Relabel = mode
 	default:
 	default:
 		return nil, fmt.Errorf("Invalid volume specification: %s", spec)
 		return nil, fmt.Errorf("Invalid volume specification: %s", spec)
 	}
 	}
@@ -203,9 +198,6 @@ func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runc
 		}
 		}
 	}
 	}
 
 
-	// lock for labels
-	runtime.LockOSThread()
-	defer runtime.UnlockOSThread()
 	// 3. Read bind mounts
 	// 3. Read bind mounts
 	for _, b := range hostConfig.Binds {
 	for _, b := range hostConfig.Binds {
 		// #10618
 		// #10618
@@ -219,33 +211,29 @@ func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runc
 		}
 		}
 
 
 		if len(bind.Name) > 0 && len(bind.Driver) > 0 {
 		if len(bind.Name) > 0 && len(bind.Driver) > 0 {
-			// set the label
-			if err := label.SetFileCreateLabel(container.MountLabel); err != nil {
-				return fmt.Errorf("Unable to setup default labeling for volume creation %s: %v", bind.Source, err)
-			}
-
 			// create the volume
 			// create the volume
 			v, err := createVolume(bind.Name, bind.Driver)
 			v, err := createVolume(bind.Name, bind.Driver)
 			if err != nil {
 			if err != nil {
-				// reset the label
-				if e := label.SetFileCreateLabel(""); e != nil {
-					logrus.Errorf("Unable to reset labeling for volume creation %s: %v", bind.Source, e)
-				}
 				return err
 				return err
 			}
 			}
 			bind.Volume = v
 			bind.Volume = v
-
-			// reset the label
-			if err := label.SetFileCreateLabel(""); err != nil {
-				return fmt.Errorf("Unable to reset labeling for volume creation %s: %v", bind.Source, err)
+			bind.Source = v.Path()
+			// Since this is just a named volume and not a typical bind, set to shared mode `z`
+			if bind.Relabel == "" {
+				bind.Relabel = "z"
 			}
 			}
 		}
 		}
 
 
+		if err := label.Relabel(bind.Source, container.MountLabel, bind.Relabel); err != nil {
+			return err
+		}
 		binds[bind.Destination] = true
 		binds[bind.Destination] = true
 		mountPoints[bind.Destination] = bind
 		mountPoints[bind.Destination] = bind
 	}
 	}
 
 
+	container.Lock()
 	container.MountPoints = mountPoints
 	container.MountPoints = mountPoints
+	container.Unlock()
 
 
 	return nil
 	return nil
 }
 }