mirror of https://github.com/docker/cli.git
Add support for --limit-pids on service create / update (swarm)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
ba2a712ff0
commit
395a6d560d
|
@ -152,6 +152,9 @@ Resources:
|
||||||
{{- if .ResourceLimitMemory }}
|
{{- if .ResourceLimitMemory }}
|
||||||
Memory: {{ .ResourceLimitMemory }}
|
Memory: {{ .ResourceLimitMemory }}
|
||||||
{{- end }}{{ end }}{{ end }}
|
{{- end }}{{ end }}{{ end }}
|
||||||
|
{{- if gt .ResourceLimitPids 0 }}
|
||||||
|
PIDs: {{ .ResourceLimitPids }}
|
||||||
|
{{- end }}
|
||||||
{{- if .Networks }}
|
{{- if .Networks }}
|
||||||
Networks:
|
Networks:
|
||||||
{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }}
|
{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }}
|
||||||
|
@ -484,7 +487,7 @@ func (ctx *serviceInspectContext) HasResourceLimits() bool {
|
||||||
if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil {
|
if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0
|
return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.Pids > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 {
|
func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 {
|
||||||
|
@ -498,6 +501,10 @@ func (ctx *serviceInspectContext) ResourceLimitMemory() string {
|
||||||
return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes))
|
return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ctx *serviceInspectContext) ResourceLimitPids() int64 {
|
||||||
|
return ctx.Service.Spec.TaskTemplate.Resources.Limits.Pids
|
||||||
|
}
|
||||||
|
|
||||||
func (ctx *serviceInspectContext) Networks() []string {
|
func (ctx *serviceInspectContext) Networks() []string {
|
||||||
var out []string
|
var out []string
|
||||||
for _, n := range ctx.Service.Spec.TaskTemplate.Networks {
|
for _, n := range ctx.Service.Spec.TaskTemplate.Networks {
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
is "gotest.tools/v3/assert/cmp"
|
is "gotest.tools/v3/assert/cmp"
|
||||||
|
"gotest.tools/v3/golden"
|
||||||
)
|
)
|
||||||
|
|
||||||
func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string {
|
func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string {
|
||||||
|
@ -78,6 +79,13 @@ func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time)
|
||||||
Timeout: 1,
|
Timeout: 1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Resources: &swarm.ResourceRequirements{
|
||||||
|
Limits: &swarm.Limit{
|
||||||
|
NanoCPUs: 100000000000,
|
||||||
|
MemoryBytes: 10490000,
|
||||||
|
Pids: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
Networks: []swarm.NetworkAttachmentConfig{
|
Networks: []swarm.NetworkAttachmentConfig{
|
||||||
{
|
{
|
||||||
Target: "5vpyomhb6ievnk0i0o60gcnei",
|
Target: "5vpyomhb6ievnk0i0o60gcnei",
|
||||||
|
@ -136,6 +144,11 @@ func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time)
|
||||||
return b.String()
|
return b.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPrettyPrint(t *testing.T) {
|
||||||
|
s := formatServiceInspect(t, NewFormat("pretty"), time.Now())
|
||||||
|
golden.Assert(t, s, "service-inspect-pretty.golden")
|
||||||
|
}
|
||||||
|
|
||||||
func TestPrettyPrintWithNoUpdateConfig(t *testing.T) {
|
func TestPrettyPrintWithNoUpdateConfig(t *testing.T) {
|
||||||
s := formatServiceInspect(t, NewFormat("pretty"), time.Now())
|
s := formatServiceInspect(t, NewFormat("pretty"), time.Now())
|
||||||
if strings.Contains(s, "UpdateStatus") {
|
if strings.Contains(s, "UpdateStatus") {
|
||||||
|
|
|
@ -225,6 +225,7 @@ func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConf
|
||||||
type resourceOptions struct {
|
type resourceOptions struct {
|
||||||
limitCPU opts.NanoCPUs
|
limitCPU opts.NanoCPUs
|
||||||
limitMemBytes opts.MemBytes
|
limitMemBytes opts.MemBytes
|
||||||
|
limitPids int64
|
||||||
resCPU opts.NanoCPUs
|
resCPU opts.NanoCPUs
|
||||||
resMemBytes opts.MemBytes
|
resMemBytes opts.MemBytes
|
||||||
resGenericResources []string
|
resGenericResources []string
|
||||||
|
@ -240,6 +241,7 @@ func (r *resourceOptions) ToResourceRequirements() (*swarm.ResourceRequirements,
|
||||||
Limits: &swarm.Limit{
|
Limits: &swarm.Limit{
|
||||||
NanoCPUs: r.limitCPU.Value(),
|
NanoCPUs: r.limitCPU.Value(),
|
||||||
MemoryBytes: r.limitMemBytes.Value(),
|
MemoryBytes: r.limitMemBytes.Value(),
|
||||||
|
Pids: r.limitPids,
|
||||||
},
|
},
|
||||||
Reservations: &swarm.Resources{
|
Reservations: &swarm.Resources{
|
||||||
NanoCPUs: r.resCPU.Value(),
|
NanoCPUs: r.resCPU.Value(),
|
||||||
|
@ -821,6 +823,9 @@ func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValu
|
||||||
flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
|
flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
|
||||||
flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs")
|
flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs")
|
||||||
flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
|
flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
|
||||||
|
flags.Int64Var(&opts.resources.limitPids, flagLimitPids, 0, "Limit maximum number of processes (default 0 = unlimited)")
|
||||||
|
flags.SetAnnotation(flagLimitPids, "version", []string{"1.41"})
|
||||||
|
flags.SetAnnotation(flagLimitPids, "swarm", nil)
|
||||||
|
|
||||||
flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
|
flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
|
||||||
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
|
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
|
||||||
|
@ -934,6 +939,7 @@ const (
|
||||||
flagLabelAdd = "label-add"
|
flagLabelAdd = "label-add"
|
||||||
flagLimitCPU = "limit-cpu"
|
flagLimitCPU = "limit-cpu"
|
||||||
flagLimitMemory = "limit-memory"
|
flagLimitMemory = "limit-memory"
|
||||||
|
flagLimitPids = "limit-pids"
|
||||||
flagMaxReplicas = "replicas-max-per-node"
|
flagMaxReplicas = "replicas-max-per-node"
|
||||||
flagConcurrent = "max-concurrent"
|
flagConcurrent = "max-concurrent"
|
||||||
flagMode = "mode"
|
flagMode = "mode"
|
||||||
|
|
|
@ -217,6 +217,16 @@ func TestToServiceNetwork(t *testing.T) {
|
||||||
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id111"}, {Target: "id555"}, {Target: "id999"}}, service.TaskTemplate.Networks))
|
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id111"}, {Target: "id555"}, {Target: "id999"}}, service.TaskTemplate.Networks))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestToServicePidsLimit(t *testing.T) {
|
||||||
|
flags := newCreateCommand(nil).Flags()
|
||||||
|
opt := newServiceOptions()
|
||||||
|
opt.mode = "replicated"
|
||||||
|
opt.resources.limitPids = 100
|
||||||
|
service, err := opt.ToService(context.Background(), &fakeClient{}, flags)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Equal(t, service.TaskTemplate.Resources.Limits.Pids, int64(100))
|
||||||
|
}
|
||||||
|
|
||||||
func TestToServiceUpdateRollback(t *testing.T) {
|
func TestToServiceUpdateRollback(t *testing.T) {
|
||||||
expected := swarm.ServiceSpec{
|
expected := swarm.ServiceSpec{
|
||||||
UpdateConfig: &swarm.UpdateConfig{
|
UpdateConfig: &swarm.UpdateConfig{
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
|
||||||
|
ID: de179gar9d0o7ltdybungplod
|
||||||
|
Name: my_service
|
||||||
|
Labels:
|
||||||
|
com.label=foo
|
||||||
|
Service Mode: Replicated
|
||||||
|
Replicas: 2
|
||||||
|
Placement:
|
||||||
|
ContainerSpec:
|
||||||
|
Image: foo/bar@sha256:this_is_a_test
|
||||||
|
Configs:
|
||||||
|
Target: /configtest.conf
|
||||||
|
Source: configtest.conf
|
||||||
|
Secrets:
|
||||||
|
Target: /secrettest.conf
|
||||||
|
Source: secrettest.conf
|
||||||
|
Log Driver:
|
||||||
|
Name: driver
|
||||||
|
LogOpts:
|
||||||
|
max-file: 5
|
||||||
|
|
||||||
|
Resources:
|
||||||
|
Limits:
|
||||||
|
CPU: 100
|
||||||
|
Memory: 10MiB
|
||||||
|
PIDs: 20
|
||||||
|
Networks: mynetwork
|
||||||
|
Endpoint Mode: vip
|
||||||
|
Ports:
|
||||||
|
PublishedPort = 30000
|
||||||
|
Protocol = tcp
|
||||||
|
TargetPort = 5000
|
||||||
|
PublishMode =
|
||||||
|
Healthcheck:
|
||||||
|
Interval = 4ns
|
||||||
|
Retries = 3
|
||||||
|
StartPeriod = 2ns
|
||||||
|
Timeout = 1ns
|
||||||
|
Tests:
|
||||||
|
Test = CMD-SHELL
|
||||||
|
Test = curl
|
||||||
|
|
|
@ -283,6 +283,12 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
updateInt64 := func(flag string, field *int64) {
|
||||||
|
if flags.Changed(flag) {
|
||||||
|
*field, _ = flags.GetInt64(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
updateUint64 := func(flag string, field *uint64) {
|
updateUint64 := func(flag string, field *uint64) {
|
||||||
if flags.Changed(flag) {
|
if flags.Changed(flag) {
|
||||||
*field, _ = flags.GetUint64(flag)
|
*field, _ = flags.GetUint64(flag)
|
||||||
|
@ -339,10 +345,11 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||||
|
|
||||||
updateSysCtls(flags, &task.ContainerSpec.Sysctls)
|
updateSysCtls(flags, &task.ContainerSpec.Sysctls)
|
||||||
|
|
||||||
if anyChanged(flags, flagLimitCPU, flagLimitMemory) {
|
if anyChanged(flags, flagLimitCPU, flagLimitMemory, flagLimitPids) {
|
||||||
taskResources().Limits = spec.TaskTemplate.Resources.Limits
|
taskResources().Limits = spec.TaskTemplate.Resources.Limits
|
||||||
updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs)
|
updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs)
|
||||||
updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes)
|
updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes)
|
||||||
|
updateInt64(flagLimitPids, &task.Resources.Limits.Pids)
|
||||||
}
|
}
|
||||||
|
|
||||||
if anyChanged(flags, flagReserveCPU, flagReserveMemory) {
|
if anyChanged(flags, flagReserveCPU, flagReserveMemory) {
|
||||||
|
|
|
@ -620,45 +620,53 @@ func TestUpdateIsolationValid(t *testing.T) {
|
||||||
// TestUpdateLimitsReservations tests that limits and reservations are updated,
|
// TestUpdateLimitsReservations tests that limits and reservations are updated,
|
||||||
// and that values are not updated are not reset to their default value
|
// and that values are not updated are not reset to their default value
|
||||||
func TestUpdateLimitsReservations(t *testing.T) {
|
func TestUpdateLimitsReservations(t *testing.T) {
|
||||||
spec := swarm.ServiceSpec{
|
|
||||||
TaskTemplate: swarm.TaskSpec{
|
|
||||||
ContainerSpec: &swarm.ContainerSpec{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// test that updating works if the service did not previously
|
// test that updating works if the service did not previously
|
||||||
// have limits set (https://github.com/moby/moby/issues/38363)
|
// have limits set (https://github.com/moby/moby/issues/38363)
|
||||||
flags := newUpdateCommand(nil).Flags()
|
t.Run("update limits from scratch", func(t *testing.T) {
|
||||||
err := flags.Set(flagLimitCPU, "2")
|
spec := swarm.ServiceSpec{
|
||||||
assert.NilError(t, err)
|
TaskTemplate: swarm.TaskSpec{
|
||||||
err = flags.Set(flagLimitMemory, "200M")
|
ContainerSpec: &swarm.ContainerSpec{},
|
||||||
assert.NilError(t, err)
|
},
|
||||||
err = updateService(context.Background(), nil, flags, &spec)
|
}
|
||||||
assert.NilError(t, err)
|
flags := newUpdateCommand(nil).Flags()
|
||||||
|
err := flags.Set(flagLimitCPU, "2")
|
||||||
spec = swarm.ServiceSpec{
|
assert.NilError(t, err)
|
||||||
TaskTemplate: swarm.TaskSpec{
|
err = flags.Set(flagLimitMemory, "200M")
|
||||||
ContainerSpec: &swarm.ContainerSpec{},
|
assert.NilError(t, err)
|
||||||
},
|
err = flags.Set(flagLimitPids, "100")
|
||||||
}
|
assert.NilError(t, err)
|
||||||
|
err = updateService(context.Background(), nil, flags, &spec)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
||||||
|
})
|
||||||
|
|
||||||
// test that updating works if the service did not previously
|
// test that updating works if the service did not previously
|
||||||
// have reservations set (https://github.com/moby/moby/issues/38363)
|
// have reservations set (https://github.com/moby/moby/issues/38363)
|
||||||
flags = newUpdateCommand(nil).Flags()
|
t.Run("update reservations from scratch", func(t *testing.T) {
|
||||||
err = flags.Set(flagReserveCPU, "2")
|
spec := swarm.ServiceSpec{
|
||||||
assert.NilError(t, err)
|
TaskTemplate: swarm.TaskSpec{
|
||||||
err = flags.Set(flagReserveMemory, "200M")
|
ContainerSpec: &swarm.ContainerSpec{},
|
||||||
assert.NilError(t, err)
|
},
|
||||||
err = updateService(context.Background(), nil, flags, &spec)
|
}
|
||||||
assert.NilError(t, err)
|
flags := newUpdateCommand(nil).Flags()
|
||||||
|
err := flags.Set(flagReserveCPU, "2")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
err = flags.Set(flagReserveMemory, "200M")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
err = updateService(context.Background(), nil, flags, &spec)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
spec = swarm.ServiceSpec{
|
spec := swarm.ServiceSpec{
|
||||||
TaskTemplate: swarm.TaskSpec{
|
TaskTemplate: swarm.TaskSpec{
|
||||||
ContainerSpec: &swarm.ContainerSpec{},
|
ContainerSpec: &swarm.ContainerSpec{},
|
||||||
Resources: &swarm.ResourceRequirements{
|
Resources: &swarm.ResourceRequirements{
|
||||||
Limits: &swarm.Limit{
|
Limits: &swarm.Limit{
|
||||||
NanoCPUs: 1000000000,
|
NanoCPUs: 1000000000,
|
||||||
MemoryBytes: 104857600,
|
MemoryBytes: 104857600,
|
||||||
|
Pids: 100,
|
||||||
},
|
},
|
||||||
Reservations: &swarm.Resources{
|
Reservations: &swarm.Resources{
|
||||||
NanoCPUs: 1000000000,
|
NanoCPUs: 1000000000,
|
||||||
|
@ -668,29 +676,79 @@ func TestUpdateLimitsReservations(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
flags = newUpdateCommand(nil).Flags()
|
// Updating without flags set should not modify existing values
|
||||||
err = flags.Set(flagLimitCPU, "2")
|
t.Run("update without flags set", func(t *testing.T) {
|
||||||
assert.NilError(t, err)
|
flags := newUpdateCommand(nil).Flags()
|
||||||
err = flags.Set(flagReserveCPU, "2")
|
err := updateService(context.Background(), nil, flags, &spec)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
err = updateService(context.Background(), nil, flags, &spec)
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(1000000000)))
|
||||||
assert.NilError(t, err)
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600)))
|
||||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
||||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600)))
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(1000000000)))
|
||||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600)))
|
||||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600)))
|
})
|
||||||
|
|
||||||
flags = newUpdateCommand(nil).Flags()
|
// Updating CPU limit/reservation should not affect memory limit/reservation
|
||||||
err = flags.Set(flagLimitMemory, "200M")
|
// and pids-limt
|
||||||
assert.NilError(t, err)
|
t.Run("update cpu limit and reservation", func(t *testing.T) {
|
||||||
err = flags.Set(flagReserveMemory, "200M")
|
flags := newUpdateCommand(nil).Flags()
|
||||||
assert.NilError(t, err)
|
err := flags.Set(flagLimitCPU, "2")
|
||||||
err = updateService(context.Background(), nil, flags, &spec)
|
assert.NilError(t, err)
|
||||||
assert.NilError(t, err)
|
err = flags.Set(flagReserveCPU, "2")
|
||||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
assert.NilError(t, err)
|
||||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
err = updateService(context.Background(), nil, flags, &spec)
|
||||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
assert.NilError(t, err)
|
||||||
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600)))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Updating Memory limit/reservation should not affect CPU limit/reservation
|
||||||
|
// and pids-limt
|
||||||
|
t.Run("update memory limit and reservation", func(t *testing.T) {
|
||||||
|
flags := newUpdateCommand(nil).Flags()
|
||||||
|
err := flags.Set(flagLimitMemory, "200M")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
err = flags.Set(flagReserveMemory, "200M")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
err = updateService(context.Background(), nil, flags, &spec)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Updating PidsLimit should only modify PidsLimit, other values unchanged
|
||||||
|
t.Run("update pids limit", func(t *testing.T) {
|
||||||
|
flags := newUpdateCommand(nil).Flags()
|
||||||
|
err := flags.Set(flagLimitPids, "2")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
err = updateService(context.Background(), nil, flags, &spec)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(2)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("update pids limit to default", func(t *testing.T) {
|
||||||
|
// Updating PidsLimit to 0 should work
|
||||||
|
flags := newUpdateCommand(nil).Flags()
|
||||||
|
err := flags.Set(flagLimitPids, "0")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
err = updateService(context.Background(), nil, flags, &spec)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(0)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
||||||
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateIsolationInvalid(t *testing.T) {
|
func TestUpdateIsolationInvalid(t *testing.T) {
|
||||||
|
|
|
@ -3677,6 +3677,7 @@ _docker_service_update_and_create() {
|
||||||
--isolation
|
--isolation
|
||||||
--limit-cpu
|
--limit-cpu
|
||||||
--limit-memory
|
--limit-memory
|
||||||
|
--limit-pids
|
||||||
--log-driver
|
--log-driver
|
||||||
--log-opt
|
--log-opt
|
||||||
--replicas
|
--replicas
|
||||||
|
|
|
@ -1970,6 +1970,7 @@ __docker_service_subcommand() {
|
||||||
"($help)*--label=[Service labels]:label: "
|
"($help)*--label=[Service labels]:label: "
|
||||||
"($help)--limit-cpu=[Limit CPUs]:value: "
|
"($help)--limit-cpu=[Limit CPUs]:value: "
|
||||||
"($help)--limit-memory=[Limit Memory]:value: "
|
"($help)--limit-memory=[Limit Memory]:value: "
|
||||||
|
"($help)--limit-pids[Limit maximum number of processes (default 0 = unlimited)]"
|
||||||
"($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers"
|
"($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers"
|
||||||
"($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options"
|
"($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options"
|
||||||
"($help)*--mount=[Attach a filesystem mount to the service]:mount: "
|
"($help)*--mount=[Attach a filesystem mount to the service]:mount: "
|
||||||
|
|
|
@ -39,6 +39,7 @@ Options:
|
||||||
-l, --label list Service labels
|
-l, --label list Service labels
|
||||||
--limit-cpu decimal Limit CPUs
|
--limit-cpu decimal Limit CPUs
|
||||||
--limit-memory bytes Limit Memory
|
--limit-memory bytes Limit Memory
|
||||||
|
--limit-pids int Limit maximum number of processes (default 0 = unlimited)
|
||||||
--log-driver string Logging driver for service
|
--log-driver string Logging driver for service
|
||||||
--log-opt list Logging driver options
|
--log-opt list Logging driver options
|
||||||
--max-concurrent Number of job tasks to run at once (default equal to --replicas)
|
--max-concurrent Number of job tasks to run at once (default equal to --replicas)
|
||||||
|
|
|
@ -52,6 +52,7 @@ Options:
|
||||||
--label-rm list Remove a label by its key
|
--label-rm list Remove a label by its key
|
||||||
--limit-cpu decimal Limit CPUs
|
--limit-cpu decimal Limit CPUs
|
||||||
--limit-memory bytes Limit Memory
|
--limit-memory bytes Limit Memory
|
||||||
|
--limit-pids int Limit maximum number of processes (default 0 = unlimited)
|
||||||
--log-driver string Logging driver for service
|
--log-driver string Logging driver for service
|
||||||
--log-opt list Logging driver options
|
--log-opt list Logging driver options
|
||||||
--max-concurrent Number of job tasks to run at once (default equal to --replicas)
|
--max-concurrent Number of job tasks to run at once (default equal to --replicas)
|
||||||
|
|
Loading…
Reference in New Issue