2016-03-18 18:50:19 +00:00
|
|
|
package daemon
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"os"
|
2016-06-27 21:38:47 +00:00
|
|
|
"os/exec"
|
2016-03-18 18:50:19 +00:00
|
|
|
"path/filepath"
|
2016-04-26 08:20:17 +00:00
|
|
|
"sort"
|
2016-03-18 18:50:19 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
|
2016-03-24 16:18:03 +00:00
|
|
|
"github.com/Sirupsen/logrus"
|
2016-09-06 18:18:12 +00:00
|
|
|
containertypes "github.com/docker/docker/api/types/container"
|
2016-03-18 18:50:19 +00:00
|
|
|
"github.com/docker/docker/container"
|
|
|
|
"github.com/docker/docker/daemon/caps"
|
|
|
|
"github.com/docker/docker/oci"
|
|
|
|
"github.com/docker/docker/pkg/idtools"
|
|
|
|
"github.com/docker/docker/pkg/mount"
|
|
|
|
"github.com/docker/docker/pkg/stringutils"
|
|
|
|
"github.com/docker/docker/pkg/symlink"
|
|
|
|
"github.com/docker/docker/volume"
|
|
|
|
"github.com/opencontainers/runc/libcontainer/apparmor"
|
2016-06-07 19:05:43 +00:00
|
|
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
2016-03-18 18:50:19 +00:00
|
|
|
"github.com/opencontainers/runc/libcontainer/devices"
|
|
|
|
"github.com/opencontainers/runc/libcontainer/user"
|
2016-06-07 19:05:43 +00:00
|
|
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
2016-03-18 18:50:19 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func setResources(s *specs.Spec, r containertypes.Resources) error {
|
|
|
|
weightDevices, err := getBlkioWeightDevices(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-29 20:39:04 +00:00
|
|
|
readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps)
|
2016-03-18 18:50:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-29 20:39:04 +00:00
|
|
|
writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps)
|
2016-03-18 18:50:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-29 20:39:04 +00:00
|
|
|
readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps)
|
2016-03-18 18:50:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-29 20:39:04 +00:00
|
|
|
writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps)
|
2016-03-18 18:50:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
memoryRes := getMemoryResources(r)
|
|
|
|
cpuRes := getCPUResources(r)
|
|
|
|
blkioWeight := r.BlkioWeight
|
|
|
|
|
|
|
|
specResources := &specs.Resources{
|
|
|
|
Memory: memoryRes,
|
|
|
|
CPU: cpuRes,
|
|
|
|
BlockIO: &specs.BlockIO{
|
|
|
|
Weight: &blkioWeight,
|
|
|
|
WeightDevice: weightDevices,
|
|
|
|
ThrottleReadBpsDevice: readBpsDevice,
|
|
|
|
ThrottleWriteBpsDevice: writeBpsDevice,
|
|
|
|
ThrottleReadIOPSDevice: readIOpsDevice,
|
|
|
|
ThrottleWriteIOPSDevice: writeIOpsDevice,
|
|
|
|
},
|
|
|
|
DisableOOMKiller: r.OomKillDisable,
|
|
|
|
Pids: &specs.Pids{
|
|
|
|
Limit: &r.PidsLimit,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 {
|
|
|
|
specResources.Devices = s.Linux.Resources.Devices
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Linux.Resources = specResources
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setDevices(s *specs.Spec, c *container.Container) error {
|
|
|
|
// Build lists of devices allowed and created within the container.
|
|
|
|
var devs []specs.Device
|
2016-03-24 19:01:12 +00:00
|
|
|
devPermissions := s.Linux.Resources.Devices
|
2016-03-18 18:50:19 +00:00
|
|
|
if c.HostConfig.Privileged {
|
|
|
|
hostDevices, err := devices.HostDevices()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, d := range hostDevices {
|
2016-11-17 00:18:43 +00:00
|
|
|
devs = append(devs, oci.Device(d))
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
2016-03-24 19:01:12 +00:00
|
|
|
rwm := "rwm"
|
|
|
|
devPermissions = []specs.DeviceCgroup{
|
|
|
|
{
|
|
|
|
Allow: true,
|
|
|
|
Access: &rwm,
|
|
|
|
},
|
|
|
|
}
|
2016-03-18 18:50:19 +00:00
|
|
|
} else {
|
|
|
|
for _, deviceMapping := range c.HostConfig.Devices {
|
2016-11-17 00:18:43 +00:00
|
|
|
d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions)
|
2016-03-18 18:50:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
devs = append(devs, d...)
|
2016-03-24 19:01:12 +00:00
|
|
|
devPermissions = append(devPermissions, dPermissions...)
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Linux.Devices = append(s.Linux.Devices, devs...)
|
2016-03-24 19:01:12 +00:00
|
|
|
s.Linux.Resources.Devices = devPermissions
|
2016-03-18 18:50:19 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error {
|
|
|
|
var rlimits []specs.Rlimit
|
|
|
|
|
2016-09-08 04:23:56 +00:00
|
|
|
// We want to leave the original HostConfig alone so make a copy here
|
|
|
|
hostConfig := *c.HostConfig
|
|
|
|
// Merge with the daemon defaults
|
|
|
|
daemon.mergeUlimits(&hostConfig)
|
|
|
|
for _, ul := range hostConfig.Ulimits {
|
2016-03-18 18:50:19 +00:00
|
|
|
rlimits = append(rlimits, specs.Rlimit{
|
|
|
|
Type: "RLIMIT_" + strings.ToUpper(ul.Name),
|
|
|
|
Soft: uint64(ul.Soft),
|
|
|
|
Hard: uint64(ul.Hard),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Process.Rlimits = rlimits
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setUser(s *specs.Spec, c *container.Container) error {
|
|
|
|
uid, gid, additionalGids, err := getUser(c, c.Config.User)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s.Process.User.UID = uid
|
|
|
|
s.Process.User.GID = gid
|
|
|
|
s.Process.User.AdditionalGids = additionalGids
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func readUserFile(c *container.Container, p string) (io.ReadCloser, error) {
|
|
|
|
fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return os.Open(fp)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) {
|
|
|
|
passwdPath, err := user.GetPasswdPath()
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, nil, err
|
|
|
|
}
|
|
|
|
groupPath, err := user.GetGroupPath()
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, nil, err
|
|
|
|
}
|
|
|
|
passwdFile, err := readUserFile(c, passwdPath)
|
|
|
|
if err == nil {
|
|
|
|
defer passwdFile.Close()
|
|
|
|
}
|
|
|
|
groupFile, err := readUserFile(c, groupPath)
|
|
|
|
if err == nil {
|
|
|
|
defer groupFile.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// todo: fix this double read by a change to libcontainer/user pkg
|
|
|
|
groupFile, err = readUserFile(c, groupPath)
|
|
|
|
if err == nil {
|
|
|
|
defer groupFile.Close()
|
|
|
|
}
|
|
|
|
var addGroups []int
|
|
|
|
if len(c.HostConfig.GroupAdd) > 0 {
|
|
|
|
addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uid := uint32(execUser.Uid)
|
|
|
|
gid := uint32(execUser.Gid)
|
|
|
|
sgids := append(execUser.Sgids, addGroups...)
|
|
|
|
var additionalGids []uint32
|
|
|
|
for _, g := range sgids {
|
|
|
|
additionalGids = append(additionalGids, uint32(g))
|
|
|
|
}
|
|
|
|
return uid, gid, additionalGids, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setNamespace(s *specs.Spec, ns specs.Namespace) {
|
|
|
|
for i, n := range s.Linux.Namespaces {
|
|
|
|
if n.Type == ns.Type {
|
|
|
|
s.Linux.Namespaces[i] = ns
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.Linux.Namespaces = append(s.Linux.Namespaces, ns)
|
|
|
|
}
|
|
|
|
|
|
|
|
func setCapabilities(s *specs.Spec, c *container.Container) error {
|
|
|
|
var caplist []string
|
|
|
|
var err error
|
|
|
|
if c.HostConfig.Privileged {
|
|
|
|
caplist = caps.GetAllCapabilities()
|
|
|
|
} else {
|
|
|
|
caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.Process.Capabilities = caplist
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error {
|
2016-03-22 01:30:21 +00:00
|
|
|
userNS := false
|
|
|
|
// user
|
|
|
|
if c.HostConfig.UsernsMode.IsPrivate() {
|
|
|
|
uidMap, gidMap := daemon.GetUIDGIDMaps()
|
|
|
|
if uidMap != nil {
|
|
|
|
userNS = true
|
|
|
|
ns := specs.Namespace{Type: "user"}
|
|
|
|
setNamespace(s, ns)
|
|
|
|
s.Linux.UIDMappings = specMapping(uidMap)
|
|
|
|
s.Linux.GIDMappings = specMapping(gidMap)
|
|
|
|
}
|
|
|
|
}
|
2016-03-18 18:50:19 +00:00
|
|
|
// network
|
|
|
|
if !c.Config.NetworkDisabled {
|
|
|
|
ns := specs.Namespace{Type: "network"}
|
|
|
|
parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2)
|
|
|
|
if parts[0] == "container" {
|
|
|
|
nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID())
|
2016-03-22 01:30:21 +00:00
|
|
|
if userNS {
|
|
|
|
// to share a net namespace, they must also share a user namespace
|
|
|
|
nsUser := specs.Namespace{Type: "user"}
|
|
|
|
nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID())
|
|
|
|
setNamespace(s, nsUser)
|
|
|
|
}
|
2016-03-18 18:50:19 +00:00
|
|
|
} else if c.HostConfig.NetworkMode.IsHost() {
|
|
|
|
ns.Path = c.NetworkSettings.SandboxKey
|
|
|
|
}
|
|
|
|
setNamespace(s, ns)
|
|
|
|
}
|
|
|
|
// ipc
|
|
|
|
if c.HostConfig.IpcMode.IsContainer() {
|
|
|
|
ns := specs.Namespace{Type: "ipc"}
|
|
|
|
ic, err := daemon.getIpcContainer(c)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID())
|
|
|
|
setNamespace(s, ns)
|
2016-03-22 01:30:21 +00:00
|
|
|
if userNS {
|
|
|
|
// to share an IPC namespace, they must also share a user namespace
|
|
|
|
nsUser := specs.Namespace{Type: "user"}
|
|
|
|
nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID())
|
|
|
|
setNamespace(s, nsUser)
|
|
|
|
}
|
2016-03-18 18:50:19 +00:00
|
|
|
} else if c.HostConfig.IpcMode.IsHost() {
|
2016-11-22 21:42:11 +00:00
|
|
|
oci.RemoveNamespace(s, specs.NamespaceType("ipc"))
|
2016-03-18 18:50:19 +00:00
|
|
|
} else {
|
|
|
|
ns := specs.Namespace{Type: "ipc"}
|
|
|
|
setNamespace(s, ns)
|
|
|
|
}
|
|
|
|
// pid
|
2016-05-06 18:56:03 +00:00
|
|
|
if c.HostConfig.PidMode.IsContainer() {
|
|
|
|
ns := specs.Namespace{Type: "pid"}
|
|
|
|
pc, err := daemon.getPidContainer(c)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID())
|
|
|
|
setNamespace(s, ns)
|
|
|
|
if userNS {
|
2016-05-08 01:36:10 +00:00
|
|
|
// to share a PID namespace, they must also share a user namespace
|
2016-05-06 18:56:03 +00:00
|
|
|
nsUser := specs.Namespace{Type: "user"}
|
|
|
|
nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID())
|
|
|
|
setNamespace(s, nsUser)
|
|
|
|
}
|
|
|
|
} else if c.HostConfig.PidMode.IsHost() {
|
2016-11-22 21:42:11 +00:00
|
|
|
oci.RemoveNamespace(s, specs.NamespaceType("pid"))
|
2016-05-06 18:56:03 +00:00
|
|
|
} else {
|
|
|
|
ns := specs.Namespace{Type: "pid"}
|
|
|
|
setNamespace(s, ns)
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
// uts
|
|
|
|
if c.HostConfig.UTSMode.IsHost() {
|
2016-11-22 21:42:11 +00:00
|
|
|
oci.RemoveNamespace(s, specs.NamespaceType("uts"))
|
2016-03-18 18:50:19 +00:00
|
|
|
s.Hostname = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func specMapping(s []idtools.IDMap) []specs.IDMapping {
|
|
|
|
var ids []specs.IDMapping
|
|
|
|
for _, item := range s {
|
|
|
|
ids = append(ids, specs.IDMapping{
|
|
|
|
HostID: uint32(item.HostID),
|
|
|
|
ContainerID: uint32(item.ContainerID),
|
|
|
|
Size: uint32(item.Size),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|
|
|
|
|
|
|
|
func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info {
|
|
|
|
for _, m := range mountinfo {
|
|
|
|
if m.Mountpoint == dir {
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the source mount point of directory passed in as argument. Also return
|
|
|
|
// optional fields.
|
|
|
|
func getSourceMount(source string) (string, string, error) {
|
|
|
|
// Ensure any symlinks are resolved.
|
|
|
|
sourcePath, err := filepath.EvalSymlinks(source)
|
|
|
|
if err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
mountinfos, err := mount.GetMounts()
|
|
|
|
if err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
mountinfo := getMountInfo(mountinfos, sourcePath)
|
|
|
|
if mountinfo != nil {
|
|
|
|
return sourcePath, mountinfo.Optional, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
path := sourcePath
|
|
|
|
for {
|
|
|
|
path = filepath.Dir(path)
|
|
|
|
|
|
|
|
mountinfo = getMountInfo(mountinfos, path)
|
|
|
|
if mountinfo != nil {
|
|
|
|
return path, mountinfo.Optional, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if path == "/" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are here, we did not find parent mount. Something is wrong.
|
|
|
|
return "", "", fmt.Errorf("Could not find source mount of %s", source)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure mount point on which path is mounted, is shared.
|
|
|
|
func ensureShared(path string) error {
|
|
|
|
sharedMount := false
|
|
|
|
|
|
|
|
sourceMount, optionalOpts, err := getSourceMount(path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Make sure source mount point is shared.
|
|
|
|
optsSplit := strings.Split(optionalOpts, " ")
|
|
|
|
for _, opt := range optsSplit {
|
|
|
|
if strings.HasPrefix(opt, "shared:") {
|
|
|
|
sharedMount = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !sharedMount {
|
|
|
|
return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure mount point on which path is mounted, is either shared or slave.
|
|
|
|
func ensureSharedOrSlave(path string) error {
|
|
|
|
sharedMount := false
|
|
|
|
slaveMount := false
|
|
|
|
|
|
|
|
sourceMount, optionalOpts, err := getSourceMount(path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Make sure source mount point is shared.
|
|
|
|
optsSplit := strings.Split(optionalOpts, " ")
|
|
|
|
for _, opt := range optsSplit {
|
|
|
|
if strings.HasPrefix(opt, "shared:") {
|
|
|
|
sharedMount = true
|
|
|
|
break
|
|
|
|
} else if strings.HasPrefix(opt, "master:") {
|
|
|
|
slaveMount = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !sharedMount && !slaveMount {
|
|
|
|
return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
mountPropagationMap = map[string]int{
|
|
|
|
"private": mount.PRIVATE,
|
|
|
|
"rprivate": mount.RPRIVATE,
|
|
|
|
"shared": mount.SHARED,
|
|
|
|
"rshared": mount.RSHARED,
|
|
|
|
"slave": mount.SLAVE,
|
|
|
|
"rslave": mount.RSLAVE,
|
|
|
|
}
|
|
|
|
|
|
|
|
mountPropagationReverseMap = map[int]string{
|
|
|
|
mount.PRIVATE: "private",
|
|
|
|
mount.RPRIVATE: "rprivate",
|
|
|
|
mount.SHARED: "shared",
|
|
|
|
mount.RSHARED: "rshared",
|
|
|
|
mount.SLAVE: "slave",
|
|
|
|
mount.RSLAVE: "rslave",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error {
|
|
|
|
userMounts := make(map[string]struct{})
|
|
|
|
for _, m := range mounts {
|
|
|
|
userMounts[m.Destination] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2016-06-19 16:53:31 +00:00
|
|
|
// Filter out mounts that are overridden by user supplied mounts
|
2016-03-18 18:50:19 +00:00
|
|
|
var defaultMounts []specs.Mount
|
|
|
|
_, mountDev := userMounts["/dev"]
|
|
|
|
for _, m := range s.Mounts {
|
|
|
|
if _, ok := userMounts[m.Destination]; !ok {
|
|
|
|
if mountDev && strings.HasPrefix(m.Destination, "/dev/") {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
defaultMounts = append(defaultMounts, m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Mounts = defaultMounts
|
|
|
|
for _, m := range mounts {
|
|
|
|
for _, cm := range s.Mounts {
|
|
|
|
if cm.Destination == m.Destination {
|
|
|
|
return fmt.Errorf("Duplicate mount point '%s'", m.Destination)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.Source == "tmpfs" {
|
2016-09-22 20:14:15 +00:00
|
|
|
data := m.Data
|
2016-08-15 16:13:18 +00:00
|
|
|
options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)}
|
2016-06-06 09:57:11 +00:00
|
|
|
if data != "" {
|
|
|
|
options = append(options, strings.Split(data, ",")...)
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
|
Inconsistent --tmpfs behavior
This fix tries to address the issue raised in #22420. When
`--tmpfs` is specified with `/tmp`, the default value is
`rw,nosuid,nodev,noexec,relatime,size=65536k`. When `--tmpfs`
is specified with `/tmp:rw`, then the value changed to
`rw,nosuid,nodev,noexec,relatime`.
The reason for such an inconsistency is because docker tries
to add `size=65536k` option only when user provides no option.
This fix tries to address this issue by always pre-progating
`size=65536k` along with `rw,nosuid,nodev,noexec,relatime`.
If user provides a different value (e.g., `size=8192k`), it
will override the `size=65536k` anyway since the combined
options will be parsed and merged to remove any duplicates.
Additional test cases have been added to cover the changes
in this fix.
This fix fixes #22420.
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
2016-05-01 02:42:19 +00:00
|
|
|
merged, err := mount.MergeTmpfsOptions(options)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged})
|
2016-03-18 18:50:19 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"}
|
|
|
|
|
|
|
|
// Determine property of RootPropagation based on volume
|
|
|
|
// properties. If a volume is shared, then keep root propagation
|
|
|
|
// shared. This should work for slave and private volumes too.
|
|
|
|
//
|
|
|
|
// For slave volumes, it can be either [r]shared/[r]slave.
|
|
|
|
//
|
|
|
|
// For private volumes any root propagation value should work.
|
|
|
|
pFlag := mountPropagationMap[m.Propagation]
|
|
|
|
if pFlag == mount.SHARED || pFlag == mount.RSHARED {
|
|
|
|
if err := ensureShared(m.Source); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rootpg := mountPropagationMap[s.Linux.RootfsPropagation]
|
|
|
|
if rootpg != mount.SHARED && rootpg != mount.RSHARED {
|
|
|
|
s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED]
|
|
|
|
}
|
|
|
|
} else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE {
|
|
|
|
if err := ensureSharedOrSlave(m.Source); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rootpg := mountPropagationMap[s.Linux.RootfsPropagation]
|
|
|
|
if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE {
|
|
|
|
s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := []string{"rbind"}
|
|
|
|
if !m.Writable {
|
|
|
|
opts = append(opts, "ro")
|
|
|
|
}
|
|
|
|
if pFlag != 0 {
|
|
|
|
opts = append(opts, mountPropagationReverseMap[pFlag])
|
|
|
|
}
|
|
|
|
|
|
|
|
mt.Options = opts
|
|
|
|
s.Mounts = append(s.Mounts, mt)
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.Root.Readonly {
|
|
|
|
for i, m := range s.Mounts {
|
|
|
|
switch m.Destination {
|
|
|
|
case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := userMounts[m.Destination]; !ok {
|
|
|
|
if !stringutils.InSlice(m.Options, "ro") {
|
|
|
|
s.Mounts[i].Options = append(s.Mounts[i].Options, "ro")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.HostConfig.Privileged {
|
|
|
|
if !s.Root.Readonly {
|
|
|
|
// clear readonly for /sys
|
|
|
|
for i := range s.Mounts {
|
|
|
|
if s.Mounts[i].Destination == "/sys" {
|
|
|
|
clearReadOnly(&s.Mounts[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-04-04 21:27:44 +00:00
|
|
|
s.Linux.ReadonlyPaths = nil
|
|
|
|
s.Linux.MaskedPaths = nil
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: until a kernel/mount solution exists for handling remount in a user namespace,
|
|
|
|
// we must clear the readonly flag for the cgroups mount (@mrunalp concurs)
|
|
|
|
if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged {
|
|
|
|
for i, m := range s.Mounts {
|
|
|
|
if m.Type == "cgroup" {
|
|
|
|
clearReadOnly(&s.Mounts[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error {
|
|
|
|
linkedEnv, err := daemon.setupLinkedContainers(c)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s.Root = specs.Root{
|
|
|
|
Path: c.BaseFS,
|
|
|
|
Readonly: c.HostConfig.ReadonlyRootfs,
|
|
|
|
}
|
|
|
|
rootUID, rootGID := daemon.GetRemappedUIDGID()
|
|
|
|
if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
cwd := c.Config.WorkingDir
|
|
|
|
if len(cwd) == 0 {
|
|
|
|
cwd = "/"
|
|
|
|
}
|
|
|
|
s.Process.Args = append([]string{c.Path}, c.Args...)
|
2016-06-27 21:38:47 +00:00
|
|
|
|
|
|
|
// only add the custom init if it is specified and the container is running in its
|
|
|
|
// own private pid namespace. It does not make sense to add if it is running in the
|
|
|
|
// host namespace or another container's pid namespace where we already have an init
|
|
|
|
if c.HostConfig.PidMode.IsPrivate() {
|
|
|
|
if (c.HostConfig.Init != nil && *c.HostConfig.Init) ||
|
|
|
|
(c.HostConfig.Init == nil && daemon.configStore.Init) {
|
|
|
|
s.Process.Args = append([]string{"/dev/init", c.Path}, c.Args...)
|
2016-09-27 10:51:42 +00:00
|
|
|
var path string
|
|
|
|
if daemon.configStore.InitPath == "" && c.HostConfig.InitPath == "" {
|
2016-10-24 22:18:58 +00:00
|
|
|
path, err = exec.LookPath(DefaultInitBinary)
|
2016-09-27 10:51:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if daemon.configStore.InitPath != "" {
|
|
|
|
path = daemon.configStore.InitPath
|
|
|
|
}
|
|
|
|
if c.HostConfig.InitPath != "" {
|
|
|
|
path = c.HostConfig.InitPath
|
2016-06-27 21:38:47 +00:00
|
|
|
}
|
|
|
|
s.Mounts = append(s.Mounts, specs.Mount{
|
|
|
|
Destination: "/dev/init",
|
|
|
|
Type: "bind",
|
|
|
|
Source: path,
|
|
|
|
Options: []string{"bind", "ro"},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2016-03-18 18:50:19 +00:00
|
|
|
s.Process.Cwd = cwd
|
2016-09-28 22:21:33 +00:00
|
|
|
s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv)
|
2016-03-18 18:50:19 +00:00
|
|
|
s.Process.Terminal = c.Config.Tty
|
|
|
|
s.Hostname = c.FullHostname()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-27 17:26:59 +00:00
|
|
|
func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) {
|
2016-03-18 18:50:19 +00:00
|
|
|
s := oci.DefaultSpec()
|
|
|
|
if err := daemon.populateCommonSpec(&s, c); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var cgroupsPath string
|
2016-03-24 16:18:03 +00:00
|
|
|
scopePrefix := "docker"
|
|
|
|
parent := "/docker"
|
|
|
|
useSystemd := UsingSystemd(daemon.configStore)
|
|
|
|
if useSystemd {
|
|
|
|
parent = "system.slice"
|
|
|
|
}
|
|
|
|
|
2016-03-18 18:50:19 +00:00
|
|
|
if c.HostConfig.CgroupParent != "" {
|
2016-03-24 16:18:03 +00:00
|
|
|
parent = c.HostConfig.CgroupParent
|
|
|
|
} else if daemon.configStore.CgroupParent != "" {
|
|
|
|
parent = daemon.configStore.CgroupParent
|
|
|
|
}
|
|
|
|
|
|
|
|
if useSystemd {
|
|
|
|
cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID
|
|
|
|
logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath)
|
2016-03-18 18:50:19 +00:00
|
|
|
} else {
|
2016-03-24 16:18:03 +00:00
|
|
|
cgroupsPath = filepath.Join(parent, c.ID)
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
s.Linux.CgroupsPath = &cgroupsPath
|
|
|
|
|
|
|
|
if err := setResources(&s, c.HostConfig.Resources); err != nil {
|
|
|
|
return nil, fmt.Errorf("linux runtime spec resources: %v", err)
|
|
|
|
}
|
|
|
|
s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj
|
2016-03-29 12:24:28 +00:00
|
|
|
s.Linux.Sysctl = c.HostConfig.Sysctls
|
2016-06-07 19:05:43 +00:00
|
|
|
|
|
|
|
p := *s.Linux.CgroupsPath
|
|
|
|
if useSystemd {
|
|
|
|
initPath, err := cgroups.GetInitCgroupDir("cpu")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
p, _ = cgroups.GetThisCgroupDir("cpu")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
p = filepath.Join(initPath, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clean path to guard against things like ../../../BAD
|
|
|
|
parentPath := filepath.Dir(p)
|
|
|
|
if !filepath.IsAbs(parentPath) {
|
|
|
|
parentPath = filepath.Clean("/" + parentPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := daemon.initCgroupsPath(parentPath); err != nil {
|
|
|
|
return nil, fmt.Errorf("linux init cgroups path: %v", err)
|
|
|
|
}
|
2016-03-18 18:50:19 +00:00
|
|
|
if err := setDevices(&s, c); err != nil {
|
|
|
|
return nil, fmt.Errorf("linux runtime spec devices: %v", err)
|
|
|
|
}
|
|
|
|
if err := setRlimits(daemon, &s, c); err != nil {
|
|
|
|
return nil, fmt.Errorf("linux runtime spec rlimits: %v", err)
|
|
|
|
}
|
|
|
|
if err := setUser(&s, c); err != nil {
|
|
|
|
return nil, fmt.Errorf("linux spec user: %v", err)
|
|
|
|
}
|
|
|
|
if err := setNamespaces(daemon, &s, c); err != nil {
|
|
|
|
return nil, fmt.Errorf("linux spec namespaces: %v", err)
|
|
|
|
}
|
|
|
|
if err := setCapabilities(&s, c); err != nil {
|
|
|
|
return nil, fmt.Errorf("linux spec capabilities: %v", err)
|
|
|
|
}
|
|
|
|
if err := setSeccomp(daemon, &s, c); err != nil {
|
|
|
|
return nil, fmt.Errorf("linux seccomp: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := daemon.setupIpcDirs(c); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-10-19 16:22:02 +00:00
|
|
|
if err := daemon.setupSecretDir(c); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:20:17 +00:00
|
|
|
ms, err := daemon.setupMounts(c)
|
2016-03-18 18:50:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-10-27 07:41:32 +00:00
|
|
|
|
2016-04-26 08:20:17 +00:00
|
|
|
ms = append(ms, c.IpcMounts()...)
|
2016-10-19 16:22:02 +00:00
|
|
|
|
2016-09-22 20:14:15 +00:00
|
|
|
tmpfsMounts, err := c.TmpfsMounts()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
ms = append(ms, tmpfsMounts...)
|
2016-10-19 16:22:02 +00:00
|
|
|
|
2016-10-27 07:41:32 +00:00
|
|
|
if m := c.SecretMount(); m != nil {
|
|
|
|
ms = append(ms, *m)
|
|
|
|
}
|
2016-10-26 20:30:53 +00:00
|
|
|
|
2016-04-26 08:20:17 +00:00
|
|
|
sort.Sort(mounts(ms))
|
|
|
|
if err := setMounts(daemon, &s, c, ms); err != nil {
|
2016-03-18 18:50:19 +00:00
|
|
|
return nil, fmt.Errorf("linux mounts: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ns := range s.Linux.Namespaces {
|
|
|
|
if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled {
|
|
|
|
target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Hooks = specs.Hooks{
|
|
|
|
Prestart: []specs.Hook{{
|
|
|
|
Path: target, // FIXME: cross-platform
|
|
|
|
Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()},
|
|
|
|
}},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if apparmor.IsEnabled() {
|
2016-12-05 13:12:17 +00:00
|
|
|
var appArmorProfile string
|
|
|
|
if c.AppArmorProfile != "" {
|
2016-03-18 18:50:19 +00:00
|
|
|
appArmorProfile = c.AppArmorProfile
|
2016-04-04 21:27:44 +00:00
|
|
|
} else if c.HostConfig.Privileged {
|
|
|
|
appArmorProfile = "unconfined"
|
2016-12-05 13:12:17 +00:00
|
|
|
} else {
|
|
|
|
appArmorProfile = "docker-default"
|
|
|
|
}
|
|
|
|
|
|
|
|
if appArmorProfile == "docker-default" {
|
|
|
|
// Unattended upgrades and other fun services can unload AppArmor
|
|
|
|
// profiles inadvertently. Since we cannot store our profile in
|
|
|
|
// /etc/apparmor.d, nor can we practically add other ways of
|
|
|
|
// telling the system to keep our profile loaded, in order to make
|
|
|
|
// sure that we keep the default profile enabled we dynamically
|
|
|
|
// reload it if necessary.
|
|
|
|
if err := ensureDefaultAppArmorProfile(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
2016-12-05 13:12:17 +00:00
|
|
|
|
2016-03-18 18:50:19 +00:00
|
|
|
s.Process.ApparmorProfile = appArmorProfile
|
|
|
|
}
|
|
|
|
s.Process.SelinuxLabel = c.GetProcessLabel()
|
|
|
|
s.Process.NoNewPrivileges = c.NoNewPrivileges
|
2016-04-25 19:55:28 +00:00
|
|
|
s.Linux.MountLabel = c.MountLabel
|
2016-03-18 18:50:19 +00:00
|
|
|
|
2016-09-27 17:26:59 +00:00
|
|
|
return (*specs.Spec)(&s), nil
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func clearReadOnly(m *specs.Mount) {
|
|
|
|
var opt []string
|
|
|
|
for _, o := range m.Options {
|
|
|
|
if o != "ro" {
|
|
|
|
opt = append(opt, o)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m.Options = opt
|
|
|
|
}
|
2016-09-08 04:23:56 +00:00
|
|
|
|
|
|
|
// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig
|
|
|
|
func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) {
|
|
|
|
ulimits := c.Ulimits
|
|
|
|
// Merge ulimits with daemon defaults
|
|
|
|
ulIdx := make(map[string]struct{})
|
|
|
|
for _, ul := range ulimits {
|
|
|
|
ulIdx[ul.Name] = struct{}{}
|
|
|
|
}
|
|
|
|
for name, ul := range daemon.configStore.Ulimits {
|
|
|
|
if _, exists := ulIdx[name]; !exists {
|
|
|
|
ulimits = append(ulimits, ul)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Ulimits = ulimits
|
|
|
|
}
|