diff --git a/cli/command/service/formatter.go b/cli/command/service/formatter.go index d283d87fdc..f4d815a554 100644 --- a/cli/command/service/formatter.go +++ b/cli/command/service/formatter.go @@ -152,6 +152,9 @@ Resources: {{- if .ResourceLimitMemory }} Memory: {{ .ResourceLimitMemory }} {{- end }}{{ end }}{{ end }} +{{- if gt .ResourceLimitPids 0 }} + PIDs: {{ .ResourceLimitPids }} +{{- end }} {{- if .Networks }} Networks: {{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} @@ -484,7 +487,7 @@ func (ctx *serviceInspectContext) HasResourceLimits() bool { if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { return false } - return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 + return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.Pids > 0 } func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { @@ -498,6 +501,10 @@ func (ctx *serviceInspectContext) ResourceLimitMemory() string { return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) } +func (ctx *serviceInspectContext) ResourceLimitPids() int64 { + return ctx.Service.Spec.TaskTemplate.Resources.Limits.Pids +} + func (ctx *serviceInspectContext) Networks() []string { var out []string for _, n := range ctx.Service.Spec.TaskTemplate.Networks { diff --git a/cli/command/service/inspect_test.go b/cli/command/service/inspect_test.go index 1b9a4c05c7..15f9209278 100644 --- a/cli/command/service/inspect_test.go +++ b/cli/command/service/inspect_test.go @@ -13,6 +13,7 @@ import ( "github.com/docker/docker/api/types/swarm" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" + "gotest.tools/v3/golden" ) func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string { @@ -78,6 +79,13 @@ func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) Timeout: 1, }, }, + Resources: &swarm.ResourceRequirements{ + Limits: &swarm.Limit{ + NanoCPUs: 100000000000, + MemoryBytes: 10490000, + Pids: 20, + }, + }, Networks: []swarm.NetworkAttachmentConfig{ { Target: "5vpyomhb6ievnk0i0o60gcnei", @@ -136,6 +144,11 @@ func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) return b.String() } +func TestPrettyPrint(t *testing.T) { + s := formatServiceInspect(t, NewFormat("pretty"), time.Now()) + golden.Assert(t, s, "service-inspect-pretty.golden") +} + func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { s := formatServiceInspect(t, NewFormat("pretty"), time.Now()) if strings.Contains(s, "UpdateStatus") { diff --git a/cli/command/service/opts.go b/cli/command/service/opts.go index 8e241499d5..d473c5fcec 100644 --- a/cli/command/service/opts.go +++ b/cli/command/service/opts.go @@ -225,6 +225,7 @@ func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConf type resourceOptions struct { limitCPU opts.NanoCPUs limitMemBytes opts.MemBytes + limitPids int64 resCPU opts.NanoCPUs resMemBytes opts.MemBytes resGenericResources []string @@ -240,6 +241,7 @@ func (r *resourceOptions) ToResourceRequirements() (*swarm.ResourceRequirements, Limits: &swarm.Limit{ NanoCPUs: r.limitCPU.Value(), MemoryBytes: r.limitMemBytes.Value(), + Pids: r.limitPids, }, Reservations: &swarm.Resources{ NanoCPUs: r.resCPU.Value(), @@ -821,6 +823,9 @@ func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValu flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") + flags.Int64Var(&opts.resources.limitPids, flagLimitPids, 0, "Limit maximum number of processes (default 0 = unlimited)") + flags.SetAnnotation(flagLimitPids, "version", []string{"1.41"}) + flags.SetAnnotation(flagLimitPids, "swarm", nil) flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)")) flags.Var(&opts.replicas, flagReplicas, "Number of tasks") @@ -934,6 +939,7 @@ const ( flagLabelAdd = "label-add" flagLimitCPU = "limit-cpu" flagLimitMemory = "limit-memory" + flagLimitPids = "limit-pids" flagMaxReplicas = "replicas-max-per-node" flagConcurrent = "max-concurrent" flagMode = "mode" diff --git a/cli/command/service/opts_test.go b/cli/command/service/opts_test.go index 41560db698..c89948ad39 100644 --- a/cli/command/service/opts_test.go +++ b/cli/command/service/opts_test.go @@ -217,6 +217,16 @@ func TestToServiceNetwork(t *testing.T) { assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id111"}, {Target: "id555"}, {Target: "id999"}}, service.TaskTemplate.Networks)) } +func TestToServicePidsLimit(t *testing.T) { + flags := newCreateCommand(nil).Flags() + opt := newServiceOptions() + opt.mode = "replicated" + opt.resources.limitPids = 100 + service, err := opt.ToService(context.Background(), &fakeClient{}, flags) + assert.NilError(t, err) + assert.Equal(t, service.TaskTemplate.Resources.Limits.Pids, int64(100)) +} + func TestToServiceUpdateRollback(t *testing.T) { expected := swarm.ServiceSpec{ UpdateConfig: &swarm.UpdateConfig{ diff --git a/cli/command/service/testdata/service-inspect-pretty.golden b/cli/command/service/testdata/service-inspect-pretty.golden new file mode 100644 index 0000000000..f2dce2693b --- /dev/null +++ b/cli/command/service/testdata/service-inspect-pretty.golden @@ -0,0 +1,42 @@ + +ID: de179gar9d0o7ltdybungplod +Name: my_service +Labels: + com.label=foo +Service Mode: Replicated + Replicas: 2 +Placement: +ContainerSpec: + Image: foo/bar@sha256:this_is_a_test +Configs: + Target: /configtest.conf + Source: configtest.conf +Secrets: + Target: /secrettest.conf + Source: secrettest.conf +Log Driver: + Name: driver + LogOpts: + max-file: 5 + +Resources: + Limits: + CPU: 100 + Memory: 10MiB + PIDs: 20 +Networks: mynetwork +Endpoint Mode: vip +Ports: + PublishedPort = 30000 + Protocol = tcp + TargetPort = 5000 + PublishMode = + Healthcheck: + Interval = 4ns + Retries = 3 + StartPeriod = 2ns + Timeout = 1ns + Tests: + Test = CMD-SHELL + Test = curl + diff --git a/cli/command/service/update.go b/cli/command/service/update.go index da668635fa..a36ea2e4f1 100644 --- a/cli/command/service/update.go +++ b/cli/command/service/update.go @@ -283,6 +283,12 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags } } + updateInt64 := func(flag string, field *int64) { + if flags.Changed(flag) { + *field, _ = flags.GetInt64(flag) + } + } + updateUint64 := func(flag string, field *uint64) { if flags.Changed(flag) { *field, _ = flags.GetUint64(flag) @@ -339,10 +345,11 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags updateSysCtls(flags, &task.ContainerSpec.Sysctls) - if anyChanged(flags, flagLimitCPU, flagLimitMemory) { + if anyChanged(flags, flagLimitCPU, flagLimitMemory, flagLimitPids) { taskResources().Limits = spec.TaskTemplate.Resources.Limits updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) + updateInt64(flagLimitPids, &task.Resources.Limits.Pids) } if anyChanged(flags, flagReserveCPU, flagReserveMemory) { diff --git a/cli/command/service/update_test.go b/cli/command/service/update_test.go index 1b765dde99..a785d35f3d 100644 --- a/cli/command/service/update_test.go +++ b/cli/command/service/update_test.go @@ -620,45 +620,53 @@ func TestUpdateIsolationValid(t *testing.T) { // TestUpdateLimitsReservations tests that limits and reservations are updated, // and that values are not updated are not reset to their default value func TestUpdateLimitsReservations(t *testing.T) { - spec := swarm.ServiceSpec{ - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: &swarm.ContainerSpec{}, - }, - } - // test that updating works if the service did not previously // have limits set (https://github.com/moby/moby/issues/38363) - flags := newUpdateCommand(nil).Flags() - err := flags.Set(flagLimitCPU, "2") - assert.NilError(t, err) - err = flags.Set(flagLimitMemory, "200M") - assert.NilError(t, err) - err = updateService(context.Background(), nil, flags, &spec) - assert.NilError(t, err) - - spec = swarm.ServiceSpec{ - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: &swarm.ContainerSpec{}, - }, - } + t.Run("update limits from scratch", func(t *testing.T) { + spec := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagLimitCPU, "2") + assert.NilError(t, err) + err = flags.Set(flagLimitMemory, "200M") + assert.NilError(t, err) + err = flags.Set(flagLimitPids, "100") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100))) + }) // test that updating works if the service did not previously // have reservations set (https://github.com/moby/moby/issues/38363) - flags = newUpdateCommand(nil).Flags() - err = flags.Set(flagReserveCPU, "2") - assert.NilError(t, err) - err = flags.Set(flagReserveMemory, "200M") - assert.NilError(t, err) - err = updateService(context.Background(), nil, flags, &spec) - assert.NilError(t, err) + t.Run("update reservations from scratch", func(t *testing.T) { + spec := swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{}, + }, + } + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagReserveCPU, "2") + assert.NilError(t, err) + err = flags.Set(flagReserveMemory, "200M") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + }) - spec = swarm.ServiceSpec{ + spec := swarm.ServiceSpec{ TaskTemplate: swarm.TaskSpec{ ContainerSpec: &swarm.ContainerSpec{}, Resources: &swarm.ResourceRequirements{ Limits: &swarm.Limit{ NanoCPUs: 1000000000, MemoryBytes: 104857600, + Pids: 100, }, Reservations: &swarm.Resources{ NanoCPUs: 1000000000, @@ -668,29 +676,79 @@ func TestUpdateLimitsReservations(t *testing.T) { }, } - flags = newUpdateCommand(nil).Flags() - err = flags.Set(flagLimitCPU, "2") - assert.NilError(t, err) - err = flags.Set(flagReserveCPU, "2") - assert.NilError(t, err) - err = updateService(context.Background(), nil, flags, &spec) - assert.NilError(t, err) - assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) - assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600))) - assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) - assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600))) + // Updating without flags set should not modify existing values + t.Run("update without flags set", func(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + err := updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(1000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(1000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600))) + }) - flags = newUpdateCommand(nil).Flags() - err = flags.Set(flagLimitMemory, "200M") - assert.NilError(t, err) - err = flags.Set(flagReserveMemory, "200M") - assert.NilError(t, err) - err = updateService(context.Background(), nil, flags, &spec) - assert.NilError(t, err) - assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) - assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200))) - assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) - assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200))) + // Updating CPU limit/reservation should not affect memory limit/reservation + // and pids-limt + t.Run("update cpu limit and reservation", func(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagLimitCPU, "2") + assert.NilError(t, err) + err = flags.Set(flagReserveCPU, "2") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600))) + }) + + // Updating Memory limit/reservation should not affect CPU limit/reservation + // and pids-limt + t.Run("update memory limit and reservation", func(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagLimitMemory, "200M") + assert.NilError(t, err) + err = flags.Set(flagReserveMemory, "200M") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200))) + }) + + // Updating PidsLimit should only modify PidsLimit, other values unchanged + t.Run("update pids limit", func(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagLimitPids, "2") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(2))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200))) + }) + + t.Run("update pids limit to default", func(t *testing.T) { + // Updating PidsLimit to 0 should work + flags := newUpdateCommand(nil).Flags() + err := flags.Set(flagLimitPids, "0") + assert.NilError(t, err) + err = updateService(context.Background(), nil, flags, &spec) + assert.NilError(t, err) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(0))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000))) + assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200))) + }) } func TestUpdateIsolationInvalid(t *testing.T) { diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index 94d521252f..6a52983197 100644 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -3677,6 +3677,7 @@ _docker_service_update_and_create() { --isolation --limit-cpu --limit-memory + --limit-pids --log-driver --log-opt --replicas diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index 10c19e2339..835c8d4592 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -1970,6 +1970,7 @@ __docker_service_subcommand() { "($help)*--label=[Service labels]:label: " "($help)--limit-cpu=[Limit CPUs]:value: " "($help)--limit-memory=[Limit Memory]:value: " + "($help)--limit-pids[Limit maximum number of processes (default 0 = unlimited)]" "($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers" "($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options" "($help)*--mount=[Attach a filesystem mount to the service]:mount: " diff --git a/docs/reference/commandline/service_create.md b/docs/reference/commandline/service_create.md index 9088758b3a..bcd877f5a1 100644 --- a/docs/reference/commandline/service_create.md +++ b/docs/reference/commandline/service_create.md @@ -39,6 +39,7 @@ Options: -l, --label list Service labels --limit-cpu decimal Limit CPUs --limit-memory bytes Limit Memory + --limit-pids int Limit maximum number of processes (default 0 = unlimited) --log-driver string Logging driver for service --log-opt list Logging driver options --max-concurrent Number of job tasks to run at once (default equal to --replicas) diff --git a/docs/reference/commandline/service_update.md b/docs/reference/commandline/service_update.md index cf93f28351..440bb2341d 100644 --- a/docs/reference/commandline/service_update.md +++ b/docs/reference/commandline/service_update.md @@ -52,6 +52,7 @@ Options: --label-rm list Remove a label by its key --limit-cpu decimal Limit CPUs --limit-memory bytes Limit Memory + --limit-pids int Limit maximum number of processes (default 0 = unlimited) --log-driver string Logging driver for service --log-opt list Logging driver options --max-concurrent Number of job tasks to run at once (default equal to --replicas)