container.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. package container // import "github.com/docker/docker/daemon/cluster/executor/container"
  2. import (
  3. "errors"
  4. "fmt"
  5. "net"
  6. "strconv"
  7. "strings"
  8. "github.com/sirupsen/logrus"
  9. "github.com/docker/distribution/reference"
  10. "github.com/docker/docker/api/types"
  11. enginecontainer "github.com/docker/docker/api/types/container"
  12. "github.com/docker/docker/api/types/events"
  13. "github.com/docker/docker/api/types/filters"
  14. enginemount "github.com/docker/docker/api/types/mount"
  15. "github.com/docker/docker/api/types/network"
  16. volumetypes "github.com/docker/docker/api/types/volume"
  17. "github.com/docker/docker/daemon/cluster/convert"
  18. executorpkg "github.com/docker/docker/daemon/cluster/executor"
  19. clustertypes "github.com/docker/docker/daemon/cluster/provider"
  20. netconst "github.com/docker/docker/libnetwork/datastore"
  21. "github.com/docker/go-connections/nat"
  22. "github.com/docker/go-units"
  23. "github.com/docker/swarmkit/agent/exec"
  24. "github.com/docker/swarmkit/api"
  25. "github.com/docker/swarmkit/api/genericresource"
  26. "github.com/docker/swarmkit/template"
  27. gogotypes "github.com/gogo/protobuf/types"
  28. )
  29. const (
  30. // systemLabelPrefix represents the reserved namespace for system labels.
  31. systemLabelPrefix = "com.docker.swarm"
  32. )
  33. // containerConfig converts task properties into docker container compatible
  34. // components.
  35. type containerConfig struct {
  36. task *api.Task
  37. networksAttachments map[string]*api.NetworkAttachment
  38. }
  39. // newContainerConfig returns a validated container config. No methods should
  40. // return an error if this function returns without error.
  41. func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) {
  42. var c containerConfig
  43. return &c, c.setTask(t, node)
  44. }
  45. func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error {
  46. if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil {
  47. return exec.ErrRuntimeUnsupported
  48. }
  49. container := t.Spec.GetContainer()
  50. if container != nil {
  51. if container.Image == "" {
  52. return ErrImageRequired
  53. }
  54. if err := validateMounts(container.Mounts); err != nil {
  55. return err
  56. }
  57. }
  58. // index the networks by name
  59. c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
  60. for _, attachment := range t.Networks {
  61. c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
  62. }
  63. c.task = t
  64. if t.Spec.GetContainer() != nil {
  65. preparedSpec, err := template.ExpandContainerSpec(node, t)
  66. if err != nil {
  67. return err
  68. }
  69. c.task.Spec.Runtime = &api.TaskSpec_Container{
  70. Container: preparedSpec,
  71. }
  72. }
  73. return nil
  74. }
  75. func (c *containerConfig) networkAttachmentContainerID() string {
  76. attachment := c.task.Spec.GetAttachment()
  77. if attachment == nil {
  78. return ""
  79. }
  80. return attachment.ContainerID
  81. }
  82. func (c *containerConfig) taskID() string {
  83. return c.task.ID
  84. }
  85. func (c *containerConfig) spec() *api.ContainerSpec {
  86. return c.task.Spec.GetContainer()
  87. }
  88. func (c *containerConfig) nameOrID() string {
  89. if c.task.Spec.GetContainer() != nil {
  90. return c.name()
  91. }
  92. return c.networkAttachmentContainerID()
  93. }
  94. func (c *containerConfig) name() string {
  95. if c.task.Annotations.Name != "" {
  96. // if set, use the container Annotations.Name field, set in the orchestrator.
  97. return c.task.Annotations.Name
  98. }
  99. slot := fmt.Sprint(c.task.Slot)
  100. if slot == "" || c.task.Slot == 0 {
  101. slot = c.task.NodeID
  102. }
  103. // fallback to service.slot.id.
  104. return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID)
  105. }
  106. func (c *containerConfig) image() string {
  107. raw := c.spec().Image
  108. ref, err := reference.ParseNormalizedNamed(raw)
  109. if err != nil {
  110. return raw
  111. }
  112. return reference.FamiliarString(reference.TagNameOnly(ref))
  113. }
  114. func (c *containerConfig) portBindings() nat.PortMap {
  115. portBindings := nat.PortMap{}
  116. if c.task.Endpoint == nil {
  117. return portBindings
  118. }
  119. for _, portConfig := range c.task.Endpoint.Ports {
  120. if portConfig.PublishMode != api.PublishModeHost {
  121. continue
  122. }
  123. port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String())))
  124. binding := []nat.PortBinding{
  125. {},
  126. }
  127. if portConfig.PublishedPort != 0 {
  128. binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort))
  129. }
  130. portBindings[port] = binding
  131. }
  132. return portBindings
  133. }
  134. func (c *containerConfig) isolation() enginecontainer.Isolation {
  135. return convert.IsolationFromGRPC(c.spec().Isolation)
  136. }
  137. func (c *containerConfig) init() *bool {
  138. if c.spec().Init == nil {
  139. return nil
  140. }
  141. init := c.spec().Init.GetValue()
  142. return &init
  143. }
  144. func (c *containerConfig) exposedPorts() map[nat.Port]struct{} {
  145. exposedPorts := make(map[nat.Port]struct{})
  146. if c.task.Endpoint == nil {
  147. return exposedPorts
  148. }
  149. for _, portConfig := range c.task.Endpoint.Ports {
  150. if portConfig.PublishMode != api.PublishModeHost {
  151. continue
  152. }
  153. port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String())))
  154. exposedPorts[port] = struct{}{}
  155. }
  156. return exposedPorts
  157. }
  158. func (c *containerConfig) config() *enginecontainer.Config {
  159. genericEnvs := genericresource.EnvFormat(c.task.AssignedGenericResources, "DOCKER_RESOURCE")
  160. env := append(c.spec().Env, genericEnvs...)
  161. config := &enginecontainer.Config{
  162. Labels: c.labels(),
  163. StopSignal: c.spec().StopSignal,
  164. Tty: c.spec().TTY,
  165. OpenStdin: c.spec().OpenStdin,
  166. User: c.spec().User,
  167. Env: env,
  168. Hostname: c.spec().Hostname,
  169. WorkingDir: c.spec().Dir,
  170. Image: c.image(),
  171. ExposedPorts: c.exposedPorts(),
  172. Healthcheck: c.healthcheck(),
  173. }
  174. if len(c.spec().Command) > 0 {
  175. // If Command is provided, we replace the whole invocation with Command
  176. // by replacing Entrypoint and specifying Cmd. Args is ignored in this
  177. // case.
  178. config.Entrypoint = append(config.Entrypoint, c.spec().Command...)
  179. config.Cmd = append(config.Cmd, c.spec().Args...)
  180. } else if len(c.spec().Args) > 0 {
  181. // In this case, we assume the image has an Entrypoint and Args
  182. // specifies the arguments for that entrypoint.
  183. config.Cmd = c.spec().Args
  184. }
  185. return config
  186. }
  187. func (c *containerConfig) labels() map[string]string {
  188. var (
  189. system = map[string]string{
  190. "task": "", // mark as cluster task
  191. "task.id": c.task.ID,
  192. "task.name": c.name(),
  193. "node.id": c.task.NodeID,
  194. "service.id": c.task.ServiceID,
  195. "service.name": c.task.ServiceAnnotations.Name,
  196. }
  197. labels = make(map[string]string)
  198. )
  199. // base labels are those defined in the spec.
  200. for k, v := range c.spec().Labels {
  201. labels[k] = v
  202. }
  203. // we then apply the overrides from the task, which may be set via the
  204. // orchestrator.
  205. for k, v := range c.task.Annotations.Labels {
  206. labels[k] = v
  207. }
  208. // finally, we apply the system labels, which override all labels.
  209. for k, v := range system {
  210. labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
  211. }
  212. return labels
  213. }
  214. func (c *containerConfig) mounts() []enginemount.Mount {
  215. var r []enginemount.Mount
  216. for _, mount := range c.spec().Mounts {
  217. r = append(r, convertMount(mount))
  218. }
  219. return r
  220. }
  221. func convertMount(m api.Mount) enginemount.Mount {
  222. mount := enginemount.Mount{
  223. Source: m.Source,
  224. Target: m.Target,
  225. ReadOnly: m.ReadOnly,
  226. }
  227. switch m.Type {
  228. case api.MountTypeBind:
  229. mount.Type = enginemount.TypeBind
  230. case api.MountTypeVolume:
  231. mount.Type = enginemount.TypeVolume
  232. case api.MountTypeTmpfs:
  233. mount.Type = enginemount.TypeTmpfs
  234. case api.MountTypeNamedPipe:
  235. mount.Type = enginemount.TypeNamedPipe
  236. }
  237. if m.BindOptions != nil {
  238. mount.BindOptions = &enginemount.BindOptions{
  239. NonRecursive: m.BindOptions.NonRecursive,
  240. }
  241. switch m.BindOptions.Propagation {
  242. case api.MountPropagationRPrivate:
  243. mount.BindOptions.Propagation = enginemount.PropagationRPrivate
  244. case api.MountPropagationPrivate:
  245. mount.BindOptions.Propagation = enginemount.PropagationPrivate
  246. case api.MountPropagationRSlave:
  247. mount.BindOptions.Propagation = enginemount.PropagationRSlave
  248. case api.MountPropagationSlave:
  249. mount.BindOptions.Propagation = enginemount.PropagationSlave
  250. case api.MountPropagationRShared:
  251. mount.BindOptions.Propagation = enginemount.PropagationRShared
  252. case api.MountPropagationShared:
  253. mount.BindOptions.Propagation = enginemount.PropagationShared
  254. }
  255. }
  256. if m.VolumeOptions != nil {
  257. mount.VolumeOptions = &enginemount.VolumeOptions{
  258. NoCopy: m.VolumeOptions.NoCopy,
  259. }
  260. if m.VolumeOptions.Labels != nil {
  261. mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels))
  262. for k, v := range m.VolumeOptions.Labels {
  263. mount.VolumeOptions.Labels[k] = v
  264. }
  265. }
  266. if m.VolumeOptions.DriverConfig != nil {
  267. mount.VolumeOptions.DriverConfig = &enginemount.Driver{
  268. Name: m.VolumeOptions.DriverConfig.Name,
  269. }
  270. if m.VolumeOptions.DriverConfig.Options != nil {
  271. mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options))
  272. for k, v := range m.VolumeOptions.DriverConfig.Options {
  273. mount.VolumeOptions.DriverConfig.Options[k] = v
  274. }
  275. }
  276. }
  277. }
  278. if m.TmpfsOptions != nil {
  279. mount.TmpfsOptions = &enginemount.TmpfsOptions{
  280. SizeBytes: m.TmpfsOptions.SizeBytes,
  281. Mode: m.TmpfsOptions.Mode,
  282. }
  283. }
  284. return mount
  285. }
  286. func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig {
  287. hcSpec := c.spec().Healthcheck
  288. if hcSpec == nil {
  289. return nil
  290. }
  291. interval, _ := gogotypes.DurationFromProto(hcSpec.Interval)
  292. timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout)
  293. startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod)
  294. return &enginecontainer.HealthConfig{
  295. Test: hcSpec.Test,
  296. Interval: interval,
  297. Timeout: timeout,
  298. Retries: int(hcSpec.Retries),
  299. StartPeriod: startPeriod,
  300. }
  301. }
  302. func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
  303. hc := &enginecontainer.HostConfig{
  304. Resources: c.resources(),
  305. GroupAdd: c.spec().Groups,
  306. PortBindings: c.portBindings(),
  307. Mounts: c.mounts(),
  308. ReadonlyRootfs: c.spec().ReadOnly,
  309. Isolation: c.isolation(),
  310. Init: c.init(),
  311. Sysctls: c.spec().Sysctls,
  312. CapAdd: c.spec().CapabilityAdd,
  313. CapDrop: c.spec().CapabilityDrop,
  314. }
  315. if c.spec().DNSConfig != nil {
  316. hc.DNS = c.spec().DNSConfig.Nameservers
  317. hc.DNSSearch = c.spec().DNSConfig.Search
  318. hc.DNSOptions = c.spec().DNSConfig.Options
  319. }
  320. c.applyPrivileges(hc)
  321. // The format of extra hosts on swarmkit is specified in:
  322. // http://man7.org/linux/man-pages/man5/hosts.5.html
  323. // IP_address canonical_hostname [aliases...]
  324. // However, the format of ExtraHosts in HostConfig is
  325. // <host>:<ip>
  326. // We need to do the conversion here
  327. // (Alias is ignored for now)
  328. for _, entry := range c.spec().Hosts {
  329. parts := strings.Fields(entry)
  330. if len(parts) > 1 {
  331. hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0]))
  332. }
  333. }
  334. if c.task.LogDriver != nil {
  335. hc.LogConfig = enginecontainer.LogConfig{
  336. Type: c.task.LogDriver.Name,
  337. Config: c.task.LogDriver.Options,
  338. }
  339. }
  340. if len(c.task.Networks) > 0 {
  341. labels := c.task.Networks[0].Network.Spec.Annotations.Labels
  342. name := c.task.Networks[0].Network.Spec.Annotations.Name
  343. if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" {
  344. hc.NetworkMode = enginecontainer.NetworkMode(name)
  345. }
  346. }
  347. return hc
  348. }
  349. // This handles the case of volumes that are defined inside a service Mount
  350. func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumeCreateBody {
  351. var (
  352. driverName string
  353. driverOpts map[string]string
  354. labels map[string]string
  355. )
  356. if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
  357. driverName = mount.VolumeOptions.DriverConfig.Name
  358. driverOpts = mount.VolumeOptions.DriverConfig.Options
  359. labels = mount.VolumeOptions.Labels
  360. }
  361. if mount.VolumeOptions != nil {
  362. return &volumetypes.VolumeCreateBody{
  363. Name: mount.Source,
  364. Driver: driverName,
  365. DriverOpts: driverOpts,
  366. Labels: labels,
  367. }
  368. }
  369. return nil
  370. }
  371. func (c *containerConfig) resources() enginecontainer.Resources {
  372. resources := enginecontainer.Resources{}
  373. // set pids limit
  374. pidsLimit := c.spec().PidsLimit
  375. if pidsLimit > 0 {
  376. resources.PidsLimit = &pidsLimit
  377. }
  378. resources.Ulimits = make([]*units.Ulimit, len(c.spec().Ulimits))
  379. for i, ulimit := range c.spec().Ulimits {
  380. resources.Ulimits[i] = &units.Ulimit{
  381. Name: ulimit.Name,
  382. Soft: ulimit.Soft,
  383. Hard: ulimit.Hard,
  384. }
  385. }
  386. // If no limits are specified let the engine use its defaults.
  387. //
  388. // TODO(aluzzardi): We might want to set some limits anyway otherwise
  389. // "unlimited" tasks will step over the reservation of other tasks.
  390. r := c.task.Spec.Resources
  391. if r == nil || r.Limits == nil {
  392. return resources
  393. }
  394. if r.Limits.MemoryBytes > 0 {
  395. resources.Memory = r.Limits.MemoryBytes
  396. }
  397. if r.Limits.NanoCPUs > 0 {
  398. resources.NanoCPUs = r.Limits.NanoCPUs
  399. }
  400. return resources
  401. }
  402. // Docker daemon supports just 1 network during container create.
  403. func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig {
  404. var networks []*api.NetworkAttachment
  405. if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil {
  406. networks = c.task.Networks
  407. }
  408. epConfig := make(map[string]*network.EndpointSettings)
  409. if len(networks) > 0 {
  410. epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b)
  411. }
  412. return &network.NetworkingConfig{EndpointsConfig: epConfig}
  413. }
  414. // TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create
  415. func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig {
  416. var networks []*api.NetworkAttachment
  417. if c.task.Spec.GetContainer() != nil {
  418. networks = c.task.Networks
  419. }
  420. // First network is used during container create. Other networks are used in "docker network connect"
  421. if len(networks) < 2 {
  422. return nil
  423. }
  424. epConfig := make(map[string]*network.EndpointSettings)
  425. for _, na := range networks[1:] {
  426. epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b)
  427. }
  428. return &network.NetworkingConfig{EndpointsConfig: epConfig}
  429. }
  430. func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings {
  431. var ipv4, ipv6 string
  432. for _, addr := range na.Addresses {
  433. ip, _, err := net.ParseCIDR(addr)
  434. if err != nil {
  435. continue
  436. }
  437. if ip.To4() != nil {
  438. ipv4 = ip.String()
  439. continue
  440. }
  441. if ip.To16() != nil {
  442. ipv6 = ip.String()
  443. }
  444. }
  445. n := &network.EndpointSettings{
  446. NetworkID: na.Network.ID,
  447. IPAMConfig: &network.EndpointIPAMConfig{
  448. IPv4Address: ipv4,
  449. IPv6Address: ipv6,
  450. },
  451. DriverOpts: na.DriverAttachmentOpts,
  452. }
  453. if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" {
  454. if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil {
  455. n.NetworkID = ln.ID()
  456. }
  457. }
  458. return n
  459. }
  460. func (c *containerConfig) virtualIP(networkID string) string {
  461. if c.task.Endpoint == nil {
  462. return ""
  463. }
  464. for _, eVip := range c.task.Endpoint.VirtualIPs {
  465. // We only support IPv4 VIPs for now.
  466. if eVip.NetworkID == networkID {
  467. vip, _, err := net.ParseCIDR(eVip.Addr)
  468. if err != nil {
  469. return ""
  470. }
  471. return vip.String()
  472. }
  473. }
  474. return ""
  475. }
  476. func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
  477. if len(c.task.Networks) == 0 {
  478. return nil
  479. }
  480. logrus.Debugf("Creating service config in agent for t = %+v", c.task)
  481. svcCfg := &clustertypes.ServiceConfig{
  482. Name: c.task.ServiceAnnotations.Name,
  483. Aliases: make(map[string][]string),
  484. ID: c.task.ServiceID,
  485. VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
  486. }
  487. for _, na := range c.task.Networks {
  488. svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
  489. // We support only IPv4 virtual IP for now.
  490. IPv4: c.virtualIP(na.Network.ID),
  491. }
  492. if len(na.Aliases) > 0 {
  493. svcCfg.Aliases[na.Network.ID] = na.Aliases
  494. }
  495. }
  496. if c.task.Endpoint != nil {
  497. for _, ePort := range c.task.Endpoint.Ports {
  498. if ePort.PublishMode != api.PublishModeIngress {
  499. continue
  500. }
  501. svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
  502. Name: ePort.Name,
  503. Protocol: int32(ePort.Protocol),
  504. TargetPort: ePort.TargetPort,
  505. PublishedPort: ePort.PublishedPort,
  506. })
  507. }
  508. }
  509. return svcCfg
  510. }
  511. func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
  512. na, ok := c.networksAttachments[name]
  513. if !ok {
  514. return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
  515. }
  516. options := types.NetworkCreate{
  517. // ID: na.Network.ID,
  518. Labels: na.Network.Spec.Annotations.Labels,
  519. Internal: na.Network.Spec.Internal,
  520. Attachable: na.Network.Spec.Attachable,
  521. Ingress: convert.IsIngressNetwork(na.Network),
  522. EnableIPv6: na.Network.Spec.Ipv6Enabled,
  523. CheckDuplicate: true,
  524. Scope: netconst.SwarmScope,
  525. }
  526. if na.Network.Spec.GetNetwork() != "" {
  527. options.ConfigFrom = &network.ConfigReference{
  528. Network: na.Network.Spec.GetNetwork(),
  529. }
  530. }
  531. if na.Network.DriverState != nil {
  532. options.Driver = na.Network.DriverState.Name
  533. options.Options = na.Network.DriverState.Options
  534. }
  535. if na.Network.IPAM != nil {
  536. options.IPAM = &network.IPAM{
  537. Driver: na.Network.IPAM.Driver.Name,
  538. Options: na.Network.IPAM.Driver.Options,
  539. }
  540. for _, ic := range na.Network.IPAM.Configs {
  541. c := network.IPAMConfig{
  542. Subnet: ic.Subnet,
  543. IPRange: ic.Range,
  544. Gateway: ic.Gateway,
  545. }
  546. options.IPAM.Config = append(options.IPAM.Config, c)
  547. }
  548. }
  549. return clustertypes.NetworkCreateRequest{
  550. ID: na.Network.ID,
  551. NetworkCreateRequest: types.NetworkCreateRequest{
  552. Name: name,
  553. NetworkCreate: options,
  554. },
  555. }, nil
  556. }
  557. func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) {
  558. privileges := c.spec().Privileges
  559. if privileges == nil {
  560. return
  561. }
  562. credentials := privileges.CredentialSpec
  563. if credentials != nil {
  564. switch credentials.Source.(type) {
  565. case *api.Privileges_CredentialSpec_File:
  566. hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile())
  567. case *api.Privileges_CredentialSpec_Registry:
  568. hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry())
  569. case *api.Privileges_CredentialSpec_Config:
  570. hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=config://"+credentials.GetConfig())
  571. }
  572. }
  573. selinux := privileges.SELinuxContext
  574. if selinux != nil {
  575. if selinux.Disable {
  576. hc.SecurityOpt = append(hc.SecurityOpt, "label=disable")
  577. }
  578. if selinux.User != "" {
  579. hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User)
  580. }
  581. if selinux.Role != "" {
  582. hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role)
  583. }
  584. if selinux.Level != "" {
  585. hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level)
  586. }
  587. if selinux.Type != "" {
  588. hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type)
  589. }
  590. }
  591. }
  592. func (c containerConfig) eventFilter() filters.Args {
  593. filter := filters.NewArgs()
  594. filter.Add("type", events.ContainerEventType)
  595. filter.Add("name", c.name())
  596. filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID))
  597. return filter
  598. }