API: swarm: move PidsLimit to TaskTemplate.Resources
The initial implementation followed the Swarm API, where PidsLimit is located in ContainerSpec. This is not the desired place for this property, so moving the field to TaskTemplate.Resources in our API. A similar change should be made in the SwarmKit API (likely keeping the old field for backward compatibility, because it was merged some releases back) Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
fa38a6cd21
commit
687bdc7c71
10 changed files with 80 additions and 35 deletions
|
@ -97,10 +97,13 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
|||
}
|
||||
if versions.LessThan(cliVersion, "1.41") {
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// Capabilities and PidsLimit for docker swarm services weren't
|
||||
// Capabilities for docker swarm services weren't
|
||||
// supported before API version 1.41
|
||||
service.TaskTemplate.ContainerSpec.Capabilities = nil
|
||||
service.TaskTemplate.ContainerSpec.PidsLimit = 0
|
||||
}
|
||||
if service.TaskTemplate.Resources != nil && service.TaskTemplate.Resources.Limits != nil {
|
||||
// Limits.Pids not supported before API version 1.41
|
||||
service.TaskTemplate.Resources.Limits.Pids = 0
|
||||
}
|
||||
|
||||
// jobs were only introduced in API version 1.41. Nil out both Job
|
||||
|
|
|
@ -17,8 +17,7 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
|||
spec := &swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
Sysctls: expectedSysctls,
|
||||
PidsLimit: 300,
|
||||
Sysctls: expectedSysctls,
|
||||
Privileges: &swarm.Privileges{
|
||||
CredentialSpec: &swarm.CredentialSpec{
|
||||
Config: "someconfig",
|
||||
|
@ -44,6 +43,11 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
|||
Placement: &swarm.Placement{
|
||||
MaxReplicas: 222,
|
||||
},
|
||||
Resources: &swarm.ResourceRequirements{
|
||||
Limits: &swarm.Limit{
|
||||
Pids: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -55,10 +59,10 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
|||
t.Error("Sysctls was stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.ContainerSpec.PidsLimit == 0 {
|
||||
if spec.TaskTemplate.Resources.Limits.Pids == 0 {
|
||||
t.Error("PidsLimit was stripped from spec")
|
||||
}
|
||||
if spec.TaskTemplate.ContainerSpec.PidsLimit != 300 {
|
||||
if spec.TaskTemplate.Resources.Limits.Pids != 300 {
|
||||
t.Error("PidsLimit did not preserve the value from spec")
|
||||
}
|
||||
|
||||
|
@ -80,7 +84,7 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
|||
t.Error("Sysctls was not stripped from spec")
|
||||
}
|
||||
|
||||
if spec.TaskTemplate.ContainerSpec.PidsLimit != 0 {
|
||||
if spec.TaskTemplate.Resources.Limits.Pids != 0 {
|
||||
t.Error("PidsLimit was not stripped from spec")
|
||||
}
|
||||
|
||||
|
|
|
@ -638,6 +638,13 @@ definitions:
|
|||
type: "integer"
|
||||
format: "int64"
|
||||
example: 8272408576
|
||||
Pids:
|
||||
description: |
|
||||
Limits the maximum number of PIDs in the container. Set `0` for unlimited.
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
default: 0
|
||||
example: 100
|
||||
|
||||
ResourceObject:
|
||||
description: |
|
||||
|
@ -3220,13 +3227,6 @@ definitions:
|
|||
configured on the daemon) is used.
|
||||
type: "boolean"
|
||||
x-nullable: true
|
||||
PidsLimit:
|
||||
description: |
|
||||
Tune a container's PIDs limit. Set `0` for unlimited.
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
default: 0
|
||||
example: 100
|
||||
Sysctls:
|
||||
description: |
|
||||
Set kernel namedspaced parameters (sysctls) in the container.
|
||||
|
|
|
@ -72,7 +72,6 @@ type ContainerSpec struct {
|
|||
Secrets []*SecretReference `json:",omitempty"`
|
||||
Configs []*ConfigReference `json:",omitempty"`
|
||||
Isolation container.Isolation `json:",omitempty"`
|
||||
PidsLimit int64 `json:",omitempty"`
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
Capabilities []string `json:",omitempty"`
|
||||
}
|
||||
|
|
|
@ -103,6 +103,7 @@ type Resources struct {
|
|||
type Limit struct {
|
||||
NanoCPUs int64 `json:",omitempty"`
|
||||
MemoryBytes int64 `json:",omitempty"`
|
||||
Pids int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// GenericResource represents a "user defined" resource which can
|
||||
|
|
|
@ -36,7 +36,6 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec {
|
|||
Configs: configReferencesFromGRPC(c.Configs),
|
||||
Isolation: IsolationFromGRPC(c.Isolation),
|
||||
Init: initFromGRPC(c.Init),
|
||||
PidsLimit: c.PidsLimit,
|
||||
Sysctls: c.Sysctls,
|
||||
Capabilities: c.Capabilities,
|
||||
}
|
||||
|
@ -264,7 +263,6 @@ func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
|
|||
Secrets: secretReferencesToGRPC(c.Secrets),
|
||||
Isolation: isolationToGRPC(c.Isolation),
|
||||
Init: initToGRPC(c.Init),
|
||||
PidsLimit: c.PidsLimit,
|
||||
Sysctls: c.Sysctls,
|
||||
Capabilities: c.Capabilities,
|
||||
}
|
||||
|
|
|
@ -193,6 +193,10 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
|
|||
if err != nil {
|
||||
return swarmapi.ServiceSpec{}, err
|
||||
}
|
||||
if s.TaskTemplate.Resources != nil && s.TaskTemplate.Resources.Limits != nil {
|
||||
// TODO remove this (or keep for backward compat) once SwarmKit API moved PidsLimit into Resources
|
||||
containerSpec.PidsLimit = s.TaskTemplate.Resources.Limits.Pids
|
||||
}
|
||||
spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}
|
||||
} else {
|
||||
// If the ContainerSpec is nil, we can't set the task runtime
|
||||
|
@ -396,15 +400,31 @@ func GenericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []types.Ge
|
|||
return generic
|
||||
}
|
||||
|
||||
func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements {
|
||||
// resourcesFromGRPC creates a ResourceRequirements from the GRPC TaskSpec.
|
||||
// We currently require the whole TaskSpec to be passed, because PidsLimit
|
||||
// is returned as part of the container spec, instead of Resources
|
||||
// TODO move PidsLimit to Resources in the Swarm API
|
||||
func resourcesFromGRPC(ts *swarmapi.TaskSpec) *types.ResourceRequirements {
|
||||
var resources *types.ResourceRequirements
|
||||
if res != nil {
|
||||
resources = &types.ResourceRequirements{}
|
||||
|
||||
if cs := ts.GetContainer(); cs != nil && cs.PidsLimit != 0 {
|
||||
resources = &types.ResourceRequirements{
|
||||
Limits: &types.Limit{
|
||||
Pids: cs.PidsLimit,
|
||||
},
|
||||
}
|
||||
}
|
||||
if ts.Resources != nil {
|
||||
if resources == nil {
|
||||
resources = &types.ResourceRequirements{}
|
||||
}
|
||||
res := ts.Resources
|
||||
if res.Limits != nil {
|
||||
resources.Limits = &types.Limit{
|
||||
NanoCPUs: res.Limits.NanoCPUs,
|
||||
MemoryBytes: res.Limits.MemoryBytes,
|
||||
if resources.Limits == nil {
|
||||
resources.Limits = &types.Limit{}
|
||||
}
|
||||
resources.Limits.NanoCPUs = res.Limits.NanoCPUs
|
||||
resources.Limits.MemoryBytes = res.Limits.MemoryBytes
|
||||
}
|
||||
if res.Reservations != nil {
|
||||
resources.Reservations = &types.Resources{
|
||||
|
@ -441,6 +461,7 @@ func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirem
|
|||
if res != nil {
|
||||
reqs = &swarmapi.ResourceRequirements{}
|
||||
if res.Limits != nil {
|
||||
// TODO add PidsLimit once Swarm API has been updated to move it into Limits
|
||||
reqs.Limits = &swarmapi.Resources{
|
||||
NanoCPUs: res.Limits.NanoCPUs,
|
||||
MemoryBytes: res.Limits.MemoryBytes,
|
||||
|
@ -657,7 +678,7 @@ func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) (types.TaskSpec, error) {
|
|||
}
|
||||
|
||||
t := types.TaskSpec{
|
||||
Resources: resourcesFromGRPC(taskSpec.Resources),
|
||||
Resources: resourcesFromGRPC(&taskSpec),
|
||||
RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart),
|
||||
Placement: placementFromGRPC(taskSpec.Placement),
|
||||
LogDriver: driverFromGRPC(taskSpec.LogDriver),
|
||||
|
|
|
@ -31,12 +31,13 @@ keywords: "API, Docker, rcli, REST, documentation"
|
|||
* `POST /services/{id}/update` now accepts `Capabilities` as part of the `ContainerSpec`.
|
||||
* `GET /tasks` now returns `Capabilities` as part of the `ContainerSpec`.
|
||||
* `GET /tasks/{id}` now returns `Capabilities` as part of the `ContainerSpec`.
|
||||
* `GET /services` now returns `PidsLimit` as part of the `ContainerSpec`.
|
||||
* `GET /services/{id}` now returns `PidsLimit` as part of the `ContainerSpec`.
|
||||
* `POST /services/create` now accepts `PidsLimit` as part of the `ContainerSpec`.
|
||||
* `POST /services/{id}/update` now accepts `PidsLimit` as part of the `ContainerSpec`.
|
||||
* `GET /tasks` now returns `PidsLimit` as part of the `ContainerSpec`.
|
||||
* `GET /tasks/{id}` now returns `PidsLimit` as part of the `ContainerSpec`.
|
||||
* `GET /services` now returns `Pids` in `TaskTemplate.Resources.Limits`.
|
||||
* `GET /services/{id}` now returns `Pids` in `TaskTemplate.Resources.Limits`.
|
||||
* `POST /services/create` now accepts `Pids` in `TaskTemplate.Resources.Limits`.
|
||||
* `POST /services/{id}/update` now accepts `Pids` in `TaskTemplate.Resources.Limits`
|
||||
to limit the maximum number of PIDs.
|
||||
* `GET /tasks` now returns `Pids` in `TaskTemplate.Resources.Limits`.
|
||||
* `GET /tasks/{id}` now returns `Pids` in `TaskTemplate.Resources.Limits`.
|
||||
* `POST /containers/create` on Linux now accepts the `HostConfig.CgroupnsMode` property.
|
||||
Set the property to `host` to create the container in the daemon's cgroup namespace, or
|
||||
`private` to create the container in its own private cgroup namespace. The per-daemon
|
||||
|
|
|
@ -196,11 +196,11 @@ func ServiceWithCapabilities(Capabilities []string) ServiceSpecOpt {
|
|||
}
|
||||
}
|
||||
|
||||
// ServiceWithPidsLimit sets the PidsLimit option of the service's ContainerSpec.
|
||||
// ServiceWithPidsLimit sets the PidsLimit option of the service's Resources.Limits.
|
||||
func ServiceWithPidsLimit(limit int64) ServiceSpecOpt {
|
||||
return func(spec *swarmtypes.ServiceSpec) {
|
||||
ensureContainerSpec(spec)
|
||||
spec.TaskTemplate.ContainerSpec.PidsLimit = limit
|
||||
ensureResources(spec)
|
||||
spec.TaskTemplate.Resources.Limits.Pids = limit
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,6 +235,18 @@ func ExecTask(t *testing.T, d *daemon.Daemon, task swarmtypes.Task, config types
|
|||
return attach
|
||||
}
|
||||
|
||||
func ensureResources(spec *swarmtypes.ServiceSpec) {
|
||||
if spec.TaskTemplate.Resources == nil {
|
||||
spec.TaskTemplate.Resources = &swarmtypes.ResourceRequirements{}
|
||||
}
|
||||
if spec.TaskTemplate.Resources.Limits == nil {
|
||||
spec.TaskTemplate.Resources.Limits = &swarmtypes.Limit{}
|
||||
}
|
||||
if spec.TaskTemplate.Resources.Reservations == nil {
|
||||
spec.TaskTemplate.Resources.Reservations = &swarmtypes.Resources{}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureContainerSpec(spec *swarmtypes.ServiceSpec) {
|
||||
if spec.TaskTemplate.ContainerSpec == nil {
|
||||
spec.TaskTemplate.ContainerSpec = &swarmtypes.ContainerSpec{}
|
||||
|
|
|
@ -295,7 +295,13 @@ func TestServiceUpdatePidsLimit(t *testing.T) {
|
|||
serviceID = swarm.CreateService(t, d, swarm.ServiceWithPidsLimit(tc.pidsLimit))
|
||||
} else {
|
||||
service = getService(t, cli, serviceID)
|
||||
service.Spec.TaskTemplate.ContainerSpec.PidsLimit = tc.pidsLimit
|
||||
if service.Spec.TaskTemplate.Resources == nil {
|
||||
service.Spec.TaskTemplate.Resources = &swarmtypes.ResourceRequirements{}
|
||||
}
|
||||
if service.Spec.TaskTemplate.Resources.Limits == nil {
|
||||
service.Spec.TaskTemplate.Resources.Limits = &swarmtypes.Limit{}
|
||||
}
|
||||
service.Spec.TaskTemplate.Resources.Limits.Pids = tc.pidsLimit
|
||||
_, err := cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||
assert.NilError(t, err)
|
||||
poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
|
||||
|
@ -304,7 +310,7 @@ func TestServiceUpdatePidsLimit(t *testing.T) {
|
|||
poll.WaitOn(t, swarm.RunningTasksCount(cli, serviceID, 1), swarm.ServicePoll)
|
||||
service = getService(t, cli, serviceID)
|
||||
container := getServiceTaskContainer(ctx, t, cli, serviceID)
|
||||
assert.Equal(t, service.Spec.TaskTemplate.ContainerSpec.PidsLimit, tc.expected)
|
||||
assert.Equal(t, service.Spec.TaskTemplate.Resources.Limits.Pids, tc.expected)
|
||||
if tc.expected == 0 {
|
||||
if container.HostConfig.Resources.PidsLimit != nil {
|
||||
t.Fatalf("Expected container.HostConfig.Resources.PidsLimit to be nil")
|
||||
|
|
Loading…
Reference in a new issue