2016-09-08 13:11:39 -04:00
|
|
|
package service
|
|
|
|
|
|
|
|
import (
|
2018-05-03 21:02:44 -04:00
|
|
|
"context"
|
2018-01-03 09:40:55 -05:00
|
|
|
"fmt"
|
2016-10-13 14:28:32 -04:00
|
|
|
"reflect"
|
2016-09-08 13:11:39 -04:00
|
|
|
"sort"
|
|
|
|
"testing"
|
2016-10-13 14:28:32 -04:00
|
|
|
"time"
|
2016-09-08 13:11:39 -04:00
|
|
|
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
"github.com/docker/docker/api/types"
|
2016-10-13 14:28:32 -04:00
|
|
|
"github.com/docker/docker/api/types/container"
|
2016-09-08 13:11:39 -04:00
|
|
|
mounttypes "github.com/docker/docker/api/types/mount"
|
|
|
|
"github.com/docker/docker/api/types/swarm"
|
2020-07-26 14:40:52 -04:00
|
|
|
"github.com/docker/go-units"
|
2020-02-22 12:12:14 -05:00
|
|
|
"gotest.tools/v3/assert"
|
|
|
|
is "gotest.tools/v3/assert/cmp"
|
2016-09-08 13:11:39 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestUpdateServiceArgs(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("args", "the \"new args\"")
|
|
|
|
|
2017-08-07 05:52:40 -04:00
|
|
|
spec := &swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cspec := spec.TaskTemplate.ContainerSpec
|
2016-09-08 13:11:39 -04:00
|
|
|
cspec.Args = []string{"old", "args"}
|
|
|
|
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.DeepEqual([]string{"the", "new args"}, cspec.Args))
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateLabels(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
flags.Set("label-add", "add-beats-remove=value")
|
|
|
|
flags.Set("label-add", "to-add=value")
|
|
|
|
flags.Set("label-add", "to-update=new-value")
|
|
|
|
flags.Set("label-add", "to-replace=new-value")
|
|
|
|
flags.Set("label-rm", "add-beats-remove")
|
|
|
|
flags.Set("label-rm", "to-remove")
|
|
|
|
flags.Set("label-rm", "to-replace")
|
|
|
|
flags.Set("label-rm", "no-such-label")
|
2016-09-08 13:11:39 -04:00
|
|
|
|
|
|
|
labels := map[string]string{
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
"to-keep": "value",
|
|
|
|
"to-remove": "value",
|
|
|
|
"to-replace": "value",
|
|
|
|
"to-update": "value",
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
updateLabels(flags, &labels)
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
assert.DeepEqual(t, labels, map[string]string{
|
|
|
|
"add-beats-remove": "value",
|
|
|
|
"to-add": "value",
|
|
|
|
"to-keep": "value",
|
|
|
|
"to-replace": "new-value",
|
|
|
|
"to-update": "new-value",
|
|
|
|
})
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
func TestUpdateContainerLabels(t *testing.T) {
|
2016-09-08 13:11:39 -04:00
|
|
|
flags := newUpdateCommand(nil).Flags()
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
flags.Set("container-label-add", "add-beats-remove=value")
|
|
|
|
flags.Set("container-label-add", "to-add=value")
|
|
|
|
flags.Set("container-label-add", "to-update=new-value")
|
|
|
|
flags.Set("container-label-add", "to-replace=new-value")
|
|
|
|
flags.Set("container-label-rm", "add-beats-remove")
|
|
|
|
flags.Set("container-label-rm", "to-remove")
|
|
|
|
flags.Set("container-label-rm", "to-replace")
|
|
|
|
flags.Set("container-label-rm", "no-such-label")
|
2016-09-08 13:11:39 -04:00
|
|
|
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
labels := map[string]string{
|
|
|
|
"to-keep": "value",
|
|
|
|
"to-remove": "value",
|
|
|
|
"to-replace": "value",
|
|
|
|
"to-update": "value",
|
|
|
|
}
|
|
|
|
|
|
|
|
updateContainerLabels(flags, &labels)
|
|
|
|
assert.DeepEqual(t, labels, map[string]string{
|
|
|
|
"add-beats-remove": "value",
|
|
|
|
"to-add": "value",
|
|
|
|
"to-keep": "value",
|
|
|
|
"to-replace": "new-value",
|
|
|
|
"to-update": "new-value",
|
|
|
|
})
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2017-01-19 18:27:37 -05:00
|
|
|
func TestUpdatePlacementConstraints(t *testing.T) {
|
2016-09-08 13:11:39 -04:00
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("constraint-add", "node=toadd")
|
|
|
|
flags.Set("constraint-rm", "node!=toremove")
|
|
|
|
|
|
|
|
placement := &swarm.Placement{
|
|
|
|
Constraints: []string{"node!=toremove", "container=tokeep"},
|
|
|
|
}
|
|
|
|
|
2017-01-19 18:27:37 -05:00
|
|
|
updatePlacementConstraints(flags, placement)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(placement.Constraints, 2))
|
|
|
|
assert.Check(t, is.Equal("container=tokeep", placement.Constraints[0]))
|
|
|
|
assert.Check(t, is.Equal("node=toadd", placement.Constraints[1]))
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2017-01-19 18:27:37 -05:00
|
|
|
func TestUpdatePlacementPrefs(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("placement-pref-add", "spread=node.labels.dc")
|
|
|
|
flags.Set("placement-pref-rm", "spread=node.labels.rack")
|
|
|
|
|
|
|
|
placement := &swarm.Placement{
|
|
|
|
Preferences: []swarm.PlacementPreference{
|
|
|
|
{
|
|
|
|
Spread: &swarm.SpreadOver{
|
|
|
|
SpreadDescriptor: "node.labels.rack",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Spread: &swarm.SpreadOver{
|
|
|
|
SpreadDescriptor: "node.labels.row",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
updatePlacementPreferences(flags, placement)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(placement.Preferences, 2))
|
|
|
|
assert.Check(t, is.Equal("node.labels.row", placement.Preferences[0].Spread.SpreadDescriptor))
|
|
|
|
assert.Check(t, is.Equal("node.labels.dc", placement.Preferences[1].Spread.SpreadDescriptor))
|
2017-01-19 18:27:37 -05:00
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
func TestUpdateEnvironment(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("env-add", "toadd=newenv")
|
|
|
|
flags.Set("env-rm", "toremove")
|
|
|
|
|
|
|
|
envs := []string{"toremove=theenvtoremove", "tokeep=value"}
|
|
|
|
|
|
|
|
updateEnvironment(flags, &envs)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(envs, 2))
|
2016-09-08 13:11:39 -04:00
|
|
|
// Order has been removed in updateEnvironment (map)
|
|
|
|
sort.Strings(envs)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.Equal("toadd=newenv", envs[0]))
|
|
|
|
assert.Check(t, is.Equal("tokeep=value", envs[1]))
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("env-rm", "foo")
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
flags.Set("env-add", "foo=first")
|
|
|
|
flags.Set("env-add", "foo=second")
|
2016-09-08 13:11:39 -04:00
|
|
|
|
|
|
|
envs := []string{"foo=value"}
|
|
|
|
|
|
|
|
updateEnvironment(flags, &envs)
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
assert.Check(t, is.Len(envs, 1))
|
|
|
|
assert.Equal(t, envs[0], "foo=second")
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) {
|
|
|
|
// Test case for #25404
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("env-add", "A=b")
|
|
|
|
|
|
|
|
envs := []string{"A=c"}
|
|
|
|
|
|
|
|
updateEnvironment(flags, &envs)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(envs, 1))
|
|
|
|
assert.Check(t, is.Equal("A=b", envs[0]))
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateGroups(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("group-add", "wheel")
|
|
|
|
flags.Set("group-add", "docker")
|
|
|
|
flags.Set("group-rm", "root")
|
|
|
|
flags.Set("group-add", "foo")
|
|
|
|
flags.Set("group-rm", "docker")
|
|
|
|
|
|
|
|
groups := []string{"bar", "root"}
|
|
|
|
|
|
|
|
updateGroups(flags, &groups)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(groups, 3))
|
|
|
|
assert.Check(t, is.Equal("bar", groups[0]))
|
|
|
|
assert.Check(t, is.Equal("foo", groups[1]))
|
|
|
|
assert.Check(t, is.Equal("wheel", groups[2]))
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2016-10-26 23:05:39 -04:00
|
|
|
func TestUpdateDNSConfig(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
|
|
|
|
// IPv4, with duplicates
|
|
|
|
flags.Set("dns-add", "1.1.1.1")
|
|
|
|
flags.Set("dns-add", "1.1.1.1")
|
|
|
|
flags.Set("dns-add", "2.2.2.2")
|
|
|
|
flags.Set("dns-rm", "3.3.3.3")
|
|
|
|
flags.Set("dns-rm", "2.2.2.2")
|
|
|
|
// IPv6
|
|
|
|
flags.Set("dns-add", "2001:db8:abc8::1")
|
|
|
|
// Invalid dns record
|
2018-03-06 14:03:47 -05:00
|
|
|
assert.ErrorContains(t, flags.Set("dns-add", "x.y.z.w"), "x.y.z.w is not an ip address")
|
2016-10-26 23:05:39 -04:00
|
|
|
|
|
|
|
// domains with duplicates
|
|
|
|
flags.Set("dns-search-add", "example.com")
|
|
|
|
flags.Set("dns-search-add", "example.com")
|
|
|
|
flags.Set("dns-search-add", "example.org")
|
|
|
|
flags.Set("dns-search-rm", "example.org")
|
|
|
|
// Invalid dns search domain
|
2018-03-06 14:03:47 -05:00
|
|
|
assert.ErrorContains(t, flags.Set("dns-search-add", "example$com"), "example$com is not a valid domain")
|
2016-10-26 23:05:39 -04:00
|
|
|
|
2016-11-08 21:29:10 -05:00
|
|
|
flags.Set("dns-option-add", "ndots:9")
|
|
|
|
flags.Set("dns-option-rm", "timeout:3")
|
2016-10-26 23:05:39 -04:00
|
|
|
|
|
|
|
config := &swarm.DNSConfig{
|
|
|
|
Nameservers: []string{"3.3.3.3", "5.5.5.5"},
|
|
|
|
Search: []string{"localdomain"},
|
|
|
|
Options: []string{"timeout:3"},
|
|
|
|
}
|
|
|
|
|
|
|
|
updateDNSConfig(flags, &config)
|
|
|
|
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(config.Nameservers, 3))
|
|
|
|
assert.Check(t, is.Equal("1.1.1.1", config.Nameservers[0]))
|
|
|
|
assert.Check(t, is.Equal("2001:db8:abc8::1", config.Nameservers[1]))
|
|
|
|
assert.Check(t, is.Equal("5.5.5.5", config.Nameservers[2]))
|
2016-10-26 23:05:39 -04:00
|
|
|
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(config.Search, 2))
|
|
|
|
assert.Check(t, is.Equal("example.com", config.Search[0]))
|
|
|
|
assert.Check(t, is.Equal("localdomain", config.Search[1]))
|
2016-10-26 23:05:39 -04:00
|
|
|
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(config.Options, 1))
|
|
|
|
assert.Check(t, is.Equal(config.Options[0], "ndots:9"))
|
2016-10-26 23:05:39 -04:00
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
func TestUpdateMounts(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
2016-08-24 04:30:54 -04:00
|
|
|
flags.Set("mount-add", "type=volume,source=vol2,target=/toadd")
|
2016-09-08 13:11:39 -04:00
|
|
|
flags.Set("mount-rm", "/toremove")
|
|
|
|
|
|
|
|
mounts := []mounttypes.Mount{
|
2016-08-24 04:30:54 -04:00
|
|
|
{Target: "/toremove", Source: "vol1", Type: mounttypes.TypeBind},
|
|
|
|
{Target: "/tokeep", Source: "vol3", Type: mounttypes.TypeBind},
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
updateMounts(flags, &mounts)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(mounts, 2))
|
|
|
|
assert.Check(t, is.Equal("/toadd", mounts[0].Target))
|
|
|
|
assert.Check(t, is.Equal("/tokeep", mounts[1].Target))
|
2016-08-24 04:30:54 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateMountsWithDuplicateMounts(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("mount-add", "type=volume,source=vol4,target=/toadd")
|
|
|
|
|
|
|
|
mounts := []mounttypes.Mount{
|
|
|
|
{Target: "/tokeep1", Source: "vol1", Type: mounttypes.TypeBind},
|
|
|
|
{Target: "/toadd", Source: "vol2", Type: mounttypes.TypeBind},
|
|
|
|
{Target: "/tokeep2", Source: "vol3", Type: mounttypes.TypeBind},
|
|
|
|
}
|
|
|
|
|
|
|
|
updateMounts(flags, &mounts)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(mounts, 3))
|
|
|
|
assert.Check(t, is.Equal("/tokeep1", mounts[0].Target))
|
|
|
|
assert.Check(t, is.Equal("/tokeep2", mounts[1].Target))
|
|
|
|
assert.Check(t, is.Equal("/toadd", mounts[2].Target))
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdatePorts(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("publish-add", "1000:1000")
|
|
|
|
flags.Set("publish-rm", "333/udp")
|
|
|
|
|
|
|
|
portConfigs := []swarm.PortConfig{
|
|
|
|
{TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP},
|
|
|
|
{TargetPort: 555},
|
|
|
|
}
|
|
|
|
|
|
|
|
err := updatePorts(flags, &portConfigs)
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(portConfigs, 2))
|
2016-09-08 13:11:39 -04:00
|
|
|
// Do a sort to have the order (might have changed by map)
|
|
|
|
targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)}
|
|
|
|
sort.Ints(targetPorts)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.Equal(555, targetPorts[0]))
|
|
|
|
assert.Check(t, is.Equal(1000, targetPorts[1]))
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2016-12-09 15:17:57 -05:00
|
|
|
func TestUpdatePortsDuplicate(t *testing.T) {
|
2016-09-08 13:11:39 -04:00
|
|
|
// Test case for #25375
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("publish-add", "80:80")
|
|
|
|
|
|
|
|
portConfigs := []swarm.PortConfig{
|
2016-12-09 15:17:57 -05:00
|
|
|
{
|
|
|
|
TargetPort: 80,
|
|
|
|
PublishedPort: 80,
|
|
|
|
Protocol: swarm.PortConfigProtocolTCP,
|
|
|
|
PublishMode: swarm.PortConfigPublishModeIngress,
|
|
|
|
},
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
err := updatePorts(flags, &portConfigs)
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(portConfigs, 1))
|
|
|
|
assert.Check(t, is.Equal(uint32(80), portConfigs[0].TargetPort))
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
2016-10-13 14:28:32 -04:00
|
|
|
|
|
|
|
func TestUpdateHealthcheckTable(t *testing.T) {
|
|
|
|
type test struct {
|
|
|
|
flags [][2]string
|
|
|
|
initial *container.HealthConfig
|
|
|
|
expected *container.HealthConfig
|
|
|
|
err string
|
|
|
|
}
|
|
|
|
testCases := []test{
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"no-healthcheck", "true"}},
|
|
|
|
initial: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}, Retries: 10},
|
|
|
|
expected: &container.HealthConfig{Test: []string{"NONE"}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-cmd", "cmd1"}},
|
|
|
|
initial: &container.HealthConfig{Test: []string{"NONE"}},
|
|
|
|
expected: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-retries", "10"}},
|
|
|
|
initial: &container.HealthConfig{Test: []string{"NONE"}},
|
|
|
|
expected: &container.HealthConfig{Retries: 10},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-retries", "10"}},
|
|
|
|
initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}},
|
|
|
|
expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-interval", "1m"}},
|
|
|
|
initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}},
|
|
|
|
expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Interval: time.Minute},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-cmd", ""}},
|
|
|
|
initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10},
|
|
|
|
expected: &container.HealthConfig{Retries: 10},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-retries", "0"}},
|
|
|
|
initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10},
|
|
|
|
expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}},
|
|
|
|
},
|
2016-11-29 04:58:47 -05:00
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-start-period", "1m"}},
|
|
|
|
initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}},
|
|
|
|
expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, StartPeriod: time.Minute},
|
|
|
|
},
|
2016-10-13 14:28:32 -04:00
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-cmd", "cmd1"}, {"no-healthcheck", "true"}},
|
|
|
|
err: "--no-healthcheck conflicts with --health-* options",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-interval", "10m"}, {"no-healthcheck", "true"}},
|
|
|
|
err: "--no-healthcheck conflicts with --health-* options",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
flags: [][2]string{{"health-timeout", "1m"}, {"no-healthcheck", "true"}},
|
|
|
|
err: "--no-healthcheck conflicts with --health-* options",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for i, c := range testCases {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
for _, flag := range c.flags {
|
|
|
|
flags.Set(flag[0], flag[1])
|
|
|
|
}
|
|
|
|
cspec := &swarm.ContainerSpec{
|
|
|
|
Healthcheck: c.initial,
|
|
|
|
}
|
|
|
|
err := updateHealthcheck(flags, cspec)
|
|
|
|
if c.err != "" {
|
2018-03-06 15:54:24 -05:00
|
|
|
assert.Error(t, err, c.err)
|
2016-10-13 14:28:32 -04:00
|
|
|
} else {
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2016-10-13 14:28:32 -04:00
|
|
|
if !reflect.DeepEqual(cspec.Healthcheck, c.expected) {
|
|
|
|
t.Errorf("incorrect result for test %d, expected health config:\n\t%#v\ngot:\n\t%#v", i, c.expected, cspec.Healthcheck)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-03 11:05:00 -04:00
|
|
|
|
|
|
|
func TestUpdateHosts(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("host-add", "example.net:2.2.2.2")
|
|
|
|
flags.Set("host-add", "ipv6.net:2001:db8:abc8::1")
|
2020-04-15 03:13:11 -04:00
|
|
|
// adding the special "host-gateway" target should work
|
|
|
|
flags.Set("host-add", "host.docker.internal:host-gateway")
|
2016-11-03 11:05:00 -04:00
|
|
|
// remove with ipv6 should work
|
|
|
|
flags.Set("host-rm", "example.net:2001:db8:abc8::1")
|
|
|
|
// just hostname should work as well
|
|
|
|
flags.Set("host-rm", "example.net")
|
2020-04-15 03:13:11 -04:00
|
|
|
// removing the special "host-gateway" target should work
|
|
|
|
flags.Set("host-rm", "gateway.docker.internal:host-gateway")
|
2016-11-03 11:05:00 -04:00
|
|
|
// bad format error
|
2018-03-06 14:03:47 -05:00
|
|
|
assert.ErrorContains(t, flags.Set("host-add", "$example.com$"), `bad format for add-host: "$example.com$"`)
|
2016-11-03 11:05:00 -04:00
|
|
|
|
2020-04-15 03:13:11 -04:00
|
|
|
hosts := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2001:db8:abc8::1 example.net", "gateway.docker.internal:host-gateway"}
|
|
|
|
expected := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2.2.2.2 example.net", "2001:db8:abc8::1 ipv6.net", "host-gateway host.docker.internal"}
|
2016-11-03 11:05:00 -04:00
|
|
|
|
Preserve sort-order of extra hosts, and allow duplicate entries
Extra hosts (`extra_hosts` in compose-file, or `--hosts` in services) adds
custom host/ip mappings to the container's `/etc/hosts`.
The current implementation used a `map[string]string{}` as intermediate
storage, and sorted the results alphabetically when converting to a service-spec.
As a result, duplicate hosts were removed, and order of host/ip mappings was not
preserved (in case the compose-file used a list instead of a map).
According to the **host.conf(5)** man page (http://man7.org/linux/man-pages/man5/host.conf.5.html)
multi Valid values are on and off. If set to on, the resolver
library will return all valid addresses for a host that
appears in the /etc/hosts file, instead of only the first.
This is off by default, as it may cause a substantial
performance loss at sites with large hosts files.
Multiple entries for a host are allowed, and even required for some situations,
for example, to add mappings for IPv4 and IPv6 addreses for a host, as illustrated
by the example hosts file in the **hosts(5)** man page (http://man7.org/linux/man-pages/man5/hosts.5.html):
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
# 127.0.1.1 is often used for the FQDN of the machine
127.0.1.1 thishost.mydomain.org thishost
192.168.1.10 foo.mydomain.org foo
192.168.1.13 bar.mydomain.org bar
146.82.138.7 master.debian.org master
209.237.226.90 www.opensource.org
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
This patch changes the intermediate storage format to use a `[]string`, and only
sorts entries if the input format in the compose file is a mapping. If the input
format is a list, the original sort-order is preserved.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-29 20:33:23 -04:00
|
|
|
err := updateHosts(flags, &hosts)
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.DeepEqual(expected, hosts))
|
Preserve sort-order of extra hosts, and allow duplicate entries
Extra hosts (`extra_hosts` in compose-file, or `--hosts` in services) adds
custom host/ip mappings to the container's `/etc/hosts`.
The current implementation used a `map[string]string{}` as intermediate
storage, and sorted the results alphabetically when converting to a service-spec.
As a result, duplicate hosts were removed, and order of host/ip mappings was not
preserved (in case the compose-file used a list instead of a map).
According to the **host.conf(5)** man page (http://man7.org/linux/man-pages/man5/host.conf.5.html)
multi Valid values are on and off. If set to on, the resolver
library will return all valid addresses for a host that
appears in the /etc/hosts file, instead of only the first.
This is off by default, as it may cause a substantial
performance loss at sites with large hosts files.
Multiple entries for a host are allowed, and even required for some situations,
for example, to add mappings for IPv4 and IPv6 addreses for a host, as illustrated
by the example hosts file in the **hosts(5)** man page (http://man7.org/linux/man-pages/man5/hosts.5.html):
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
# 127.0.1.1 is often used for the FQDN of the machine
127.0.1.1 thishost.mydomain.org thishost
192.168.1.10 foo.mydomain.org foo
192.168.1.13 bar.mydomain.org bar
146.82.138.7 master.debian.org master
209.237.226.90 www.opensource.org
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
This patch changes the intermediate storage format to use a `[]string`, and only
sorts entries if the input format in the compose file is a mapping. If the input
format is a list, the original sort-order is preserved.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-29 20:33:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateHostsPreservesOrder(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("host-add", "foobar:127.0.0.2")
|
|
|
|
flags.Set("host-add", "foobar:127.0.0.1")
|
|
|
|
flags.Set("host-add", "foobar:127.0.0.3")
|
|
|
|
|
|
|
|
hosts := []string{}
|
|
|
|
err := updateHosts(flags, &hosts)
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.DeepEqual([]string{"127.0.0.2 foobar", "127.0.0.1 foobar", "127.0.0.3 foobar"}, hosts))
|
2016-11-03 11:05:00 -04:00
|
|
|
}
|
2016-08-18 21:09:07 -04:00
|
|
|
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
func TestUpdateHostsReplaceEntry(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("host-add", "foobar:127.0.0.4")
|
|
|
|
flags.Set("host-rm", "foobar:127.0.0.2")
|
|
|
|
|
|
|
|
hosts := []string{"127.0.0.2 foobar", "127.0.0.1 foobar", "127.0.0.3 foobar"}
|
|
|
|
|
|
|
|
err := updateHosts(flags, &hosts)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.DeepEqual([]string{"127.0.0.1 foobar", "127.0.0.3 foobar", "127.0.0.4 foobar"}, hosts))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateHostsRemoveHost(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("host-rm", "host1")
|
|
|
|
|
|
|
|
hosts := []string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host1 host4", "127.0.0.3 host1"}
|
|
|
|
|
|
|
|
err := updateHosts(flags, &hosts)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
|
|
|
|
// Removing host `host1` should remove the entry from each line it appears in.
|
|
|
|
// If there are no other hosts in the entry, the entry itself should be removed.
|
|
|
|
assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host3 host2 host4", "127.0.0.1 host4"}, hosts))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateHostsRemoveHostIP(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("host-rm", "host1:127.0.0.1")
|
|
|
|
|
|
|
|
hosts := []string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host1 host4", "127.0.0.3 host1", "127.0.0.1 host1"}
|
|
|
|
|
|
|
|
err := updateHosts(flags, &hosts)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
|
|
|
|
// Removing host `host1` should remove the entry from each line it appears in,
|
|
|
|
// but only if the IP-address matches. If there are no other hosts in the entry,
|
|
|
|
// the entry itself should be removed.
|
|
|
|
assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host3 host1 host2 host4", "127.0.0.1 host4", "127.0.0.3 host1"}, hosts))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateHostsRemoveAll(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("host-add", "host-three:127.0.0.4")
|
|
|
|
flags.Set("host-add", "host-one:127.0.0.5")
|
|
|
|
flags.Set("host-rm", "host-one")
|
|
|
|
|
|
|
|
hosts := []string{"127.0.0.1 host-one", "127.0.0.2 host-two", "127.0.0.3 host-one"}
|
|
|
|
|
|
|
|
err := updateHosts(flags, &hosts)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.DeepEqual([]string{"127.0.0.2 host-two", "127.0.0.4 host-three", "127.0.0.5 host-one"}, hosts))
|
|
|
|
}
|
|
|
|
|
2016-08-18 21:09:07 -04:00
|
|
|
func TestUpdatePortsRmWithProtocol(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("publish-add", "8081:81")
|
|
|
|
flags.Set("publish-add", "8082:82")
|
|
|
|
flags.Set("publish-rm", "80")
|
|
|
|
flags.Set("publish-rm", "81/tcp")
|
|
|
|
flags.Set("publish-rm", "82/udp")
|
|
|
|
|
|
|
|
portConfigs := []swarm.PortConfig{
|
2016-12-09 15:17:57 -05:00
|
|
|
{
|
|
|
|
TargetPort: 80,
|
|
|
|
PublishedPort: 8080,
|
|
|
|
Protocol: swarm.PortConfigProtocolTCP,
|
|
|
|
PublishMode: swarm.PortConfigPublishModeIngress,
|
|
|
|
},
|
2016-08-18 21:09:07 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
err := updatePorts(flags, &portConfigs)
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(portConfigs, 2))
|
|
|
|
assert.Check(t, is.Equal(uint32(81), portConfigs[0].TargetPort))
|
|
|
|
assert.Check(t, is.Equal(uint32(82), portConfigs[1].TargetPort))
|
2016-08-18 21:09:07 -04:00
|
|
|
}
|
|
|
|
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
type secretAPIClientMock struct {
|
|
|
|
listResult []swarm.Secret
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s secretAPIClientMock) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
|
|
|
|
return s.listResult, nil
|
|
|
|
}
|
2022-09-29 11:21:51 -04:00
|
|
|
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
func (s secretAPIClientMock) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) {
|
|
|
|
return types.SecretCreateResponse{}, nil
|
|
|
|
}
|
2022-09-29 11:21:51 -04:00
|
|
|
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
func (s secretAPIClientMock) SecretRemove(ctx context.Context, id string) error {
|
|
|
|
return nil
|
|
|
|
}
|
2022-09-29 11:21:51 -04:00
|
|
|
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
func (s secretAPIClientMock) SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) {
|
|
|
|
return swarm.Secret{}, []byte{}, nil
|
|
|
|
}
|
2022-09-29 11:21:51 -04:00
|
|
|
|
2016-11-22 14:03:23 -05:00
|
|
|
func (s secretAPIClientMock) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error {
|
|
|
|
return nil
|
|
|
|
}
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
|
2022-07-13 06:29:49 -04:00
|
|
|
// TestUpdateSecretUpdateInPlace tests the ability to update the "target" of a
|
|
|
|
// secret with "docker service update" by combining "--secret-rm" and
|
|
|
|
// "--secret-add" for the same secret.
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
func TestUpdateSecretUpdateInPlace(t *testing.T) {
|
|
|
|
apiClient := secretAPIClientMock{
|
|
|
|
listResult: []swarm.Secret{
|
|
|
|
{
|
|
|
|
ID: "tn9qiblgnuuut11eufquw5dev",
|
|
|
|
Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "foo"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("secret-add", "source=foo,target=foo2")
|
|
|
|
flags.Set("secret-rm", "foo")
|
|
|
|
|
|
|
|
secrets := []*swarm.SecretReference{
|
|
|
|
{
|
|
|
|
File: &swarm.SecretReferenceFileTarget{
|
|
|
|
Name: "foo",
|
|
|
|
UID: "0",
|
|
|
|
GID: "0",
|
|
|
|
Mode: 292,
|
|
|
|
},
|
|
|
|
SecretID: "tn9qiblgnuuut11eufquw5dev",
|
|
|
|
SecretName: "foo",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
updatedSecrets, err := getUpdatedSecrets(apiClient, flags, secrets)
|
|
|
|
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Assert(t, is.Len(updatedSecrets, 1))
|
|
|
|
assert.Check(t, is.Equal("tn9qiblgnuuut11eufquw5dev", updatedSecrets[0].SecretID))
|
|
|
|
assert.Check(t, is.Equal("foo", updatedSecrets[0].SecretName))
|
|
|
|
assert.Check(t, is.Equal("foo2", updatedSecrets[0].File.Name))
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
}
|
2017-01-14 03:12:19 -05:00
|
|
|
|
|
|
|
func TestUpdateReadOnly(t *testing.T) {
|
2017-08-07 05:52:40 -04:00
|
|
|
spec := &swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cspec := spec.TaskTemplate.ContainerSpec
|
2017-01-14 03:12:19 -05:00
|
|
|
|
|
|
|
// Update with --read-only=true, changed to true
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("read-only", "true")
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, cspec.ReadOnly)
|
2017-01-14 03:12:19 -05:00
|
|
|
|
|
|
|
// Update without --read-only, no change
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, cspec.ReadOnly)
|
2017-01-14 03:12:19 -05:00
|
|
|
|
|
|
|
// Update with --read-only=false, changed to false
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("read-only", "false")
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, !cspec.ReadOnly)
|
2017-01-14 03:12:19 -05:00
|
|
|
}
|
2017-02-06 00:22:57 -05:00
|
|
|
|
2018-06-14 07:50:12 -04:00
|
|
|
func TestUpdateInit(t *testing.T) {
|
|
|
|
spec := &swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cspec := spec.TaskTemplate.ContainerSpec
|
|
|
|
|
|
|
|
// Update with --init=true
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("init", "true")
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-06-14 07:50:12 -04:00
|
|
|
assert.Check(t, is.Equal(true, *cspec.Init))
|
|
|
|
|
|
|
|
// Update without --init, no change
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-06-14 07:50:12 -04:00
|
|
|
assert.Check(t, is.Equal(true, *cspec.Init))
|
|
|
|
|
|
|
|
// Update with --init=false
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("init", "false")
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-06-14 07:50:12 -04:00
|
|
|
assert.Check(t, is.Equal(false, *cspec.Init))
|
|
|
|
}
|
|
|
|
|
2017-02-06 00:22:57 -05:00
|
|
|
func TestUpdateStopSignal(t *testing.T) {
|
2017-08-07 05:52:40 -04:00
|
|
|
spec := &swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cspec := spec.TaskTemplate.ContainerSpec
|
2017-02-06 00:22:57 -05:00
|
|
|
|
|
|
|
// Update with --stop-signal=SIGUSR1
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("stop-signal", "SIGUSR1")
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.Equal("SIGUSR1", cspec.StopSignal))
|
2017-02-06 00:22:57 -05:00
|
|
|
|
|
|
|
// Update without --stop-signal, no change
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.Equal("SIGUSR1", cspec.StopSignal))
|
2017-02-06 00:22:57 -05:00
|
|
|
|
|
|
|
// Update with --stop-signal=SIGWINCH
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set("stop-signal", "SIGWINCH")
|
cli/command/service: SA1012: do not pass a nil Context (staticcheck)
```
cli/command/service/update_test.go:31:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:535:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
cli/command/service/update_test.go:540:16: SA1012: do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use (staticcheck)
updateService(nil, nil, flags, spec)
^
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-10-28 20:49:41 -04:00
|
|
|
updateService(context.TODO(), nil, flags, spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.Equal("SIGWINCH", cspec.StopSignal))
|
2017-02-06 00:22:57 -05:00
|
|
|
}
|
2017-11-17 09:31:13 -05:00
|
|
|
|
|
|
|
func TestUpdateIsolationValid(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set("isolation", "process")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
2017-11-17 09:31:13 -05:00
|
|
|
spec := swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
err = updateService(context.Background(), nil, flags, &spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(container.IsolationProcess, spec.TaskTemplate.ContainerSpec.Isolation))
|
2017-11-17 09:31:13 -05:00
|
|
|
}
|
|
|
|
|
Fix cpu/memory limits and reservations being reset on service update
Before this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Notice that the memory limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000
},
"Reservations": {
"NanoCPUs": 2000000000
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Notice that the CPU limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"MemoryBytes": 209715200
},
"Reservations": {
"MemoryBytes": 209715200
}
}
After this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Confirm that the CPU limits/reservations are updated, but memory limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Confirm that the Mempry limits/reservations are updated, but CPU limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
}
}
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-05-23 19:11:49 -04:00
|
|
|
// TestUpdateLimitsReservations tests that limits and reservations are updated,
|
|
|
|
// and that values are not updated are not reset to their default value
|
|
|
|
func TestUpdateLimitsReservations(t *testing.T) {
|
2018-12-12 19:58:12 -05:00
|
|
|
// test that updating works if the service did not previously
|
|
|
|
// have limits set (https://github.com/moby/moby/issues/38363)
|
2020-04-29 11:11:48 -04:00
|
|
|
t.Run("update limits from scratch", func(t *testing.T) {
|
|
|
|
spec := swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set(flagLimitCPU, "2")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = flags.Set(flagLimitMemory, "200M")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = flags.Set(flagLimitPids, "100")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = updateService(context.Background(), nil, flags, &spec)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
|
|
|
})
|
2018-12-12 19:58:12 -05:00
|
|
|
|
|
|
|
// test that updating works if the service did not previously
|
|
|
|
// have reservations set (https://github.com/moby/moby/issues/38363)
|
2020-04-29 11:11:48 -04:00
|
|
|
t.Run("update reservations from scratch", func(t *testing.T) {
|
|
|
|
spec := swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set(flagReserveCPU, "2")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = flags.Set(flagReserveMemory, "200M")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = updateService(context.Background(), nil, flags, &spec)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
})
|
2018-12-12 19:58:12 -05:00
|
|
|
|
2020-04-29 11:11:48 -04:00
|
|
|
spec := swarm.ServiceSpec{
|
Fix cpu/memory limits and reservations being reset on service update
Before this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Notice that the memory limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000
},
"Reservations": {
"NanoCPUs": 2000000000
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Notice that the CPU limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"MemoryBytes": 209715200
},
"Reservations": {
"MemoryBytes": 209715200
}
}
After this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Confirm that the CPU limits/reservations are updated, but memory limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Confirm that the Mempry limits/reservations are updated, but CPU limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
}
}
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-05-23 19:11:49 -04:00
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
Resources: &swarm.ResourceRequirements{
|
2020-06-15 05:26:48 -04:00
|
|
|
Limits: &swarm.Limit{
|
Fix cpu/memory limits and reservations being reset on service update
Before this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Notice that the memory limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000
},
"Reservations": {
"NanoCPUs": 2000000000
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Notice that the CPU limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"MemoryBytes": 209715200
},
"Reservations": {
"MemoryBytes": 209715200
}
}
After this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Confirm that the CPU limits/reservations are updated, but memory limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Confirm that the Mempry limits/reservations are updated, but CPU limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
}
}
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-05-23 19:11:49 -04:00
|
|
|
NanoCPUs: 1000000000,
|
|
|
|
MemoryBytes: 104857600,
|
2020-04-29 11:11:48 -04:00
|
|
|
Pids: 100,
|
Fix cpu/memory limits and reservations being reset on service update
Before this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Notice that the memory limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000
},
"Reservations": {
"NanoCPUs": 2000000000
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Notice that the CPU limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"MemoryBytes": 209715200
},
"Reservations": {
"MemoryBytes": 209715200
}
}
After this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Confirm that the CPU limits/reservations are updated, but memory limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Confirm that the Mempry limits/reservations are updated, but CPU limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
}
}
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-05-23 19:11:49 -04:00
|
|
|
},
|
|
|
|
Reservations: &swarm.Resources{
|
|
|
|
NanoCPUs: 1000000000,
|
|
|
|
MemoryBytes: 104857600,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-04-29 11:11:48 -04:00
|
|
|
// Updating without flags set should not modify existing values
|
|
|
|
t.Run("update without flags set", func(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := updateService(context.Background(), nil, flags, &spec)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(1000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(1000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600)))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Updating CPU limit/reservation should not affect memory limit/reservation
|
|
|
|
// and pids-limt
|
|
|
|
t.Run("update cpu limit and reservation", func(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set(flagLimitCPU, "2")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = flags.Set(flagReserveCPU, "2")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = updateService(context.Background(), nil, flags, &spec)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(104857600)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(104857600)))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Updating Memory limit/reservation should not affect CPU limit/reservation
|
|
|
|
// and pids-limt
|
|
|
|
t.Run("update memory limit and reservation", func(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set(flagLimitMemory, "200M")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = flags.Set(flagReserveMemory, "200M")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = updateService(context.Background(), nil, flags, &spec)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(100)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Updating PidsLimit should only modify PidsLimit, other values unchanged
|
|
|
|
t.Run("update pids limit", func(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set(flagLimitPids, "2")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = updateService(context.Background(), nil, flags, &spec)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(2)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("update pids limit to default", func(t *testing.T) {
|
|
|
|
// Updating PidsLimit to 0 should work
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set(flagLimitPids, "0")
|
|
|
|
assert.NilError(t, err)
|
|
|
|
err = updateService(context.Background(), nil, flags, &spec)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.MemoryBytes, int64(209715200)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Limits.Pids, int64(0)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.NanoCPUs, int64(2000000000)))
|
|
|
|
assert.Check(t, is.Equal(spec.TaskTemplate.Resources.Reservations.MemoryBytes, int64(209715200)))
|
|
|
|
})
|
Fix cpu/memory limits and reservations being reset on service update
Before this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Notice that the memory limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000
},
"Reservations": {
"NanoCPUs": 2000000000
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Notice that the CPU limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"MemoryBytes": 209715200
},
"Reservations": {
"MemoryBytes": 209715200
}
}
After this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Confirm that the CPU limits/reservations are updated, but memory limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Confirm that the Mempry limits/reservations are updated, but CPU limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
}
}
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-05-23 19:11:49 -04:00
|
|
|
}
|
|
|
|
|
2017-11-17 09:31:13 -05:00
|
|
|
func TestUpdateIsolationInvalid(t *testing.T) {
|
|
|
|
// validation depends on daemon os / version so validation should be done on the daemon side
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set("isolation", "test")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
2017-11-17 09:31:13 -05:00
|
|
|
spec := swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
err = updateService(context.Background(), nil, flags, &spec)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.Equal(container.Isolation("test"), spec.TaskTemplate.ContainerSpec.Isolation))
|
2017-11-17 09:31:13 -05:00
|
|
|
}
|
2017-11-17 17:05:44 -05:00
|
|
|
|
|
|
|
func TestAddGenericResources(t *testing.T) {
|
|
|
|
task := &swarm.TaskSpec{}
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, addGenericResources(flags, task))
|
2017-11-17 17:05:44 -05:00
|
|
|
|
|
|
|
flags.Set(flagGenericResourcesAdd, "foo=1")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, addGenericResources(flags, task))
|
|
|
|
assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 1))
|
2017-11-17 17:05:44 -05:00
|
|
|
|
|
|
|
// Checks that foo isn't added a 2nd time
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set(flagGenericResourcesAdd, "bar=1")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, addGenericResources(flags, task))
|
|
|
|
assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 2))
|
2017-11-17 17:05:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestRemoveGenericResources(t *testing.T) {
|
|
|
|
task := &swarm.TaskSpec{}
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, removeGenericResources(flags, task))
|
2017-11-17 17:05:44 -05:00
|
|
|
|
|
|
|
flags.Set(flagGenericResourcesRemove, "foo")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.ErrorContains(removeGenericResources(flags, task), ""))
|
2017-11-17 17:05:44 -05:00
|
|
|
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set(flagGenericResourcesAdd, "foo=1")
|
|
|
|
addGenericResources(flags, task)
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set(flagGenericResourcesAdd, "bar=1")
|
|
|
|
addGenericResources(flags, task)
|
|
|
|
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set(flagGenericResourcesRemove, "foo")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, removeGenericResources(flags, task))
|
|
|
|
assert.Check(t, is.Len(task.Resources.Reservations.GenericResources, 1))
|
2017-11-17 17:05:44 -05:00
|
|
|
}
|
2018-01-03 09:40:55 -05:00
|
|
|
|
|
|
|
func TestUpdateNetworks(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
nws := []types.NetworkResource{
|
|
|
|
{Name: "aaa-network", ID: "id555"},
|
|
|
|
{Name: "mmm-network", ID: "id999"},
|
|
|
|
{Name: "zzz-network", ID: "id111"},
|
|
|
|
}
|
|
|
|
|
|
|
|
client := &fakeClient{
|
|
|
|
networkInspectFunc: func(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) {
|
|
|
|
for _, network := range nws {
|
|
|
|
if network.ID == networkID || network.Name == networkID {
|
|
|
|
return network, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return types.NetworkResource{}, fmt.Errorf("network not found: %s", networkID)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
svc := swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
Networks: []swarm.NetworkAttachmentConfig{
|
|
|
|
{Target: "id999"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
err := flags.Set(flagNetworkAdd, "aaa-network")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
2018-01-03 09:40:55 -05:00
|
|
|
err = updateService(ctx, client, flags, &svc)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks))
|
2018-01-03 09:40:55 -05:00
|
|
|
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
err = flags.Set(flagNetworkAdd, "aaa-network")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
2018-01-03 09:40:55 -05:00
|
|
|
err = updateService(ctx, client, flags, &svc)
|
2018-03-06 15:54:24 -05:00
|
|
|
assert.Error(t, err, "service is already attached to network aaa-network")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks))
|
2018-01-03 09:40:55 -05:00
|
|
|
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
err = flags.Set(flagNetworkAdd, "id555")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
2018-01-03 09:40:55 -05:00
|
|
|
err = updateService(ctx, client, flags, &svc)
|
2018-03-06 15:54:24 -05:00
|
|
|
assert.Error(t, err, "service is already attached to network id555")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}, {Target: "id999"}}, svc.TaskTemplate.Networks))
|
2018-01-03 09:40:55 -05:00
|
|
|
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
err = flags.Set(flagNetworkRemove, "id999")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
2018-01-03 09:40:55 -05:00
|
|
|
err = updateService(ctx, client, flags, &svc)
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id555"}}, svc.TaskTemplate.Networks))
|
2018-01-03 09:40:55 -05:00
|
|
|
|
|
|
|
flags = newUpdateCommand(nil).Flags()
|
|
|
|
err = flags.Set(flagNetworkAdd, "mmm-network")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
2018-01-03 09:40:55 -05:00
|
|
|
err = flags.Set(flagNetworkRemove, "aaa-network")
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.NilError(t, err)
|
2018-01-03 09:40:55 -05:00
|
|
|
err = updateService(ctx, client, flags, &svc)
|
2018-03-06 14:44:13 -05:00
|
|
|
assert.NilError(t, err)
|
2018-03-05 18:53:52 -05:00
|
|
|
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id999"}}, svc.TaskTemplate.Networks))
|
2018-01-03 09:40:55 -05:00
|
|
|
}
|
2019-01-09 12:10:35 -05:00
|
|
|
|
|
|
|
func TestUpdateMaxReplicas(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
svc := swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{},
|
|
|
|
Placement: &swarm.Placement{
|
|
|
|
MaxReplicas: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set(flagMaxReplicas, "2")
|
|
|
|
err := updateService(ctx, nil, flags, &svc)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
|
|
|
|
assert.DeepEqual(t, svc.TaskTemplate.Placement, &swarm.Placement{MaxReplicas: uint64(2)})
|
|
|
|
}
|
2019-02-12 10:07:07 -05:00
|
|
|
|
|
|
|
func TestUpdateSysCtls(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
spec map[string]string
|
|
|
|
add []string
|
|
|
|
rm []string
|
|
|
|
expected map[string]string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "from scratch",
|
|
|
|
add: []string{"sysctl.zet=value-99", "sysctl.alpha=value-1"},
|
|
|
|
expected: map[string]string{"sysctl.zet": "value-99", "sysctl.alpha": "value-1"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "append new",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
add: []string{"new.sysctl=newvalue"},
|
|
|
|
expected: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2", "new.sysctl": "newvalue"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "append duplicate is a no-op",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
add: []string{"sysctl.one=value-1"},
|
|
|
|
expected: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove and append existing is a no-op",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
add: []string{"sysctl.one=value-1"},
|
|
|
|
rm: []string{"sysctl.one=value-1"},
|
|
|
|
expected: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove and append new should append",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
add: []string{"new.sysctl=newvalue"},
|
|
|
|
rm: []string{"new.sysctl=newvalue"},
|
|
|
|
expected: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2", "new.sysctl": "newvalue"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "update existing",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
add: []string{"sysctl.one=newvalue"},
|
|
|
|
expected: map[string]string{"sysctl.one": "newvalue", "sysctl.two": "value-2"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "update existing twice",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
add: []string{"sysctl.one=newvalue", "sysctl.one=evennewervalue"},
|
|
|
|
expected: map[string]string{"sysctl.one": "evennewervalue", "sysctl.two": "value-2"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove all",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
rm: []string{"sysctl.one=value-1", "sysctl.two=value-2"},
|
|
|
|
expected: map[string]string{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove by key",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
rm: []string{"sysctl.one"},
|
|
|
|
expected: map[string]string{"sysctl.two": "value-2"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove by key and different value",
|
|
|
|
spec: map[string]string{"sysctl.one": "value-1", "sysctl.two": "value-2"},
|
|
|
|
rm: []string{"sysctl.one=anyvalueyoulike"},
|
|
|
|
expected: map[string]string{"sysctl.two": "value-2"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
svc := swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{Sysctls: tc.spec},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
for _, v := range tc.add {
|
|
|
|
assert.NilError(t, flags.Set(flagSysCtlAdd, v))
|
|
|
|
}
|
|
|
|
for _, v := range tc.rm {
|
|
|
|
assert.NilError(t, flags.Set(flagSysCtlRemove, v))
|
|
|
|
}
|
|
|
|
err := updateService(ctx, &fakeClient{}, flags, &svc)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
if !assert.Check(t, is.DeepEqual(svc.TaskTemplate.ContainerSpec.Sysctls, tc.expected)) {
|
|
|
|
t.Logf("expected: %v", tc.expected)
|
|
|
|
t.Logf("actual: %v", svc.TaskTemplate.ContainerSpec.Sysctls)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2019-03-28 15:06:06 -04:00
|
|
|
|
|
|
|
func TestUpdateGetUpdatedConfigs(t *testing.T) {
|
|
|
|
// cannedConfigs is a set of configs that we'll use over and over in the
|
|
|
|
// tests. it's a map of Name to Config
|
|
|
|
cannedConfigs := map[string]*swarm.Config{
|
|
|
|
"bar": {
|
|
|
|
ID: "barID",
|
|
|
|
Spec: swarm.ConfigSpec{
|
|
|
|
Annotations: swarm.Annotations{
|
|
|
|
Name: "bar",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"cred": {
|
|
|
|
ID: "credID",
|
|
|
|
Spec: swarm.ConfigSpec{
|
|
|
|
Annotations: swarm.Annotations{
|
|
|
|
Name: "cred",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"newCred": {
|
|
|
|
ID: "newCredID",
|
|
|
|
Spec: swarm.ConfigSpec{
|
|
|
|
Annotations: swarm.Annotations{
|
|
|
|
Name: "newCred",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
// cannedConfigRefs is the same thing, but with config references instead
|
|
|
|
// instead of ID, however, it just maps an arbitrary string value. this is
|
|
|
|
// so we could have multiple config refs using the same config
|
|
|
|
cannedConfigRefs := map[string]*swarm.ConfigReference{
|
|
|
|
"fooRef": {
|
|
|
|
ConfigID: "fooID",
|
|
|
|
ConfigName: "foo",
|
|
|
|
File: &swarm.ConfigReferenceFileTarget{
|
|
|
|
Name: "foo",
|
|
|
|
UID: "0",
|
|
|
|
GID: "0",
|
|
|
|
Mode: 0444,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"barRef": {
|
|
|
|
ConfigID: "barID",
|
|
|
|
ConfigName: "bar",
|
|
|
|
File: &swarm.ConfigReferenceFileTarget{
|
|
|
|
Name: "bar",
|
|
|
|
UID: "0",
|
|
|
|
GID: "0",
|
|
|
|
Mode: 0444,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"bazRef": {
|
|
|
|
ConfigID: "bazID",
|
|
|
|
ConfigName: "baz",
|
|
|
|
File: &swarm.ConfigReferenceFileTarget{
|
|
|
|
Name: "baz",
|
|
|
|
UID: "0",
|
|
|
|
GID: "0",
|
|
|
|
Mode: 0444,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"credRef": {
|
|
|
|
ConfigID: "credID",
|
|
|
|
ConfigName: "cred",
|
|
|
|
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
|
|
|
|
},
|
|
|
|
"newCredRef": {
|
|
|
|
ConfigID: "newCredID",
|
|
|
|
ConfigName: "newCred",
|
|
|
|
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
type flagVal [2]string
|
|
|
|
type test struct {
|
|
|
|
// the name of the subtest
|
|
|
|
name string
|
|
|
|
// flags are the flags we'll be setting
|
|
|
|
flags []flagVal
|
|
|
|
// oldConfigs are the configs that would already be on the service
|
2020-09-18 11:26:39 -04:00
|
|
|
// it is a slice of strings corresponding to the key of
|
2019-03-28 15:06:06 -04:00
|
|
|
// cannedConfigRefs
|
|
|
|
oldConfigs []string
|
|
|
|
// oldCredSpec is the credentialSpec being carried over from the old
|
|
|
|
// object
|
|
|
|
oldCredSpec *swarm.CredentialSpec
|
|
|
|
// lookupConfigs are the configs we're expecting to be listed. it is a
|
|
|
|
// slice of strings corresponding to the key of cannedConfigs
|
|
|
|
lookupConfigs []string
|
|
|
|
// expected is the configs we should get as a result. it is a slice of
|
|
|
|
// strings corresponding to the key in cannedConfigRefs
|
|
|
|
expected []string
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []test{
|
|
|
|
{
|
|
|
|
name: "no configs added or removed",
|
|
|
|
oldConfigs: []string{"fooRef"},
|
|
|
|
expected: []string{"fooRef"},
|
|
|
|
}, {
|
|
|
|
name: "add a config",
|
|
|
|
flags: []flagVal{{"config-add", "bar"}},
|
|
|
|
oldConfigs: []string{"fooRef"},
|
|
|
|
lookupConfigs: []string{"bar"},
|
|
|
|
expected: []string{"fooRef", "barRef"},
|
|
|
|
}, {
|
|
|
|
name: "remove a config",
|
|
|
|
flags: []flagVal{{"config-rm", "bar"}},
|
|
|
|
oldConfigs: []string{"fooRef", "barRef"},
|
|
|
|
expected: []string{"fooRef"},
|
|
|
|
}, {
|
|
|
|
name: "include an old credential spec",
|
|
|
|
oldConfigs: []string{"credRef"},
|
|
|
|
oldCredSpec: &swarm.CredentialSpec{Config: "credID"},
|
|
|
|
expected: []string{"credRef"},
|
|
|
|
}, {
|
|
|
|
name: "add a credential spec",
|
|
|
|
oldConfigs: []string{"fooRef"},
|
|
|
|
flags: []flagVal{{"credential-spec", "config://cred"}},
|
|
|
|
lookupConfigs: []string{"cred"},
|
|
|
|
expected: []string{"fooRef", "credRef"},
|
|
|
|
}, {
|
|
|
|
name: "change a credential spec",
|
|
|
|
oldConfigs: []string{"fooRef", "credRef"},
|
|
|
|
oldCredSpec: &swarm.CredentialSpec{Config: "credID"},
|
|
|
|
flags: []flagVal{{"credential-spec", "config://newCred"}},
|
|
|
|
lookupConfigs: []string{"newCred"},
|
|
|
|
expected: []string{"fooRef", "newCredRef"},
|
|
|
|
}, {
|
|
|
|
name: "credential spec no longer config",
|
|
|
|
oldConfigs: []string{"fooRef", "credRef"},
|
|
|
|
oldCredSpec: &swarm.CredentialSpec{Config: "credID"},
|
|
|
|
flags: []flagVal{{"credential-spec", "file://someFile"}},
|
|
|
|
lookupConfigs: []string{},
|
|
|
|
expected: []string{"fooRef"},
|
|
|
|
}, {
|
|
|
|
name: "credential spec becomes config",
|
|
|
|
oldConfigs: []string{"fooRef"},
|
|
|
|
oldCredSpec: &swarm.CredentialSpec{File: "someFile"},
|
|
|
|
flags: []flagVal{{"credential-spec", "config://cred"}},
|
|
|
|
lookupConfigs: []string{"cred"},
|
|
|
|
expected: []string{"fooRef", "credRef"},
|
|
|
|
}, {
|
|
|
|
name: "remove credential spec",
|
|
|
|
oldConfigs: []string{"fooRef", "credRef"},
|
|
|
|
oldCredSpec: &swarm.CredentialSpec{Config: "credID"},
|
|
|
|
flags: []flagVal{{"credential-spec", ""}},
|
|
|
|
lookupConfigs: []string{},
|
|
|
|
expected: []string{"fooRef"},
|
|
|
|
}, {
|
|
|
|
name: "just frick my stuff up",
|
|
|
|
// a more complicated test. add barRef, remove bazRef, keep fooRef,
|
|
|
|
// change credentialSpec from credRef to newCredRef
|
|
|
|
oldConfigs: []string{"fooRef", "bazRef", "credRef"},
|
|
|
|
oldCredSpec: &swarm.CredentialSpec{Config: "cred"},
|
|
|
|
flags: []flagVal{
|
|
|
|
{"config-add", "bar"},
|
|
|
|
{"config-rm", "baz"},
|
|
|
|
{"credential-spec", "config://newCred"},
|
|
|
|
},
|
|
|
|
lookupConfigs: []string{"bar", "newCred"},
|
|
|
|
expected: []string{"fooRef", "barRef", "newCredRef"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
for _, f := range tc.flags {
|
|
|
|
flags.Set(f[0], f[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
// fakeConfigAPIClientList is actually defined in create_test.go,
|
|
|
|
// but we'll use it here as well
|
|
|
|
var fakeClient fakeConfigAPIClientList = func(_ context.Context, opts types.ConfigListOptions) ([]swarm.Config, error) {
|
|
|
|
names := opts.Filters.Get("name")
|
|
|
|
assert.Equal(t, len(names), len(tc.lookupConfigs))
|
|
|
|
|
|
|
|
configs := []swarm.Config{}
|
|
|
|
for _, lookup := range tc.lookupConfigs {
|
|
|
|
assert.Assert(t, is.Contains(names, lookup))
|
|
|
|
cfg, ok := cannedConfigs[lookup]
|
|
|
|
assert.Assert(t, ok)
|
|
|
|
configs = append(configs, *cfg)
|
|
|
|
}
|
|
|
|
return configs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// build the actual set of old configs and the container spec
|
|
|
|
oldConfigs := []*swarm.ConfigReference{}
|
|
|
|
for _, config := range tc.oldConfigs {
|
|
|
|
cfg, ok := cannedConfigRefs[config]
|
|
|
|
assert.Assert(t, ok)
|
|
|
|
oldConfigs = append(oldConfigs, cfg)
|
|
|
|
}
|
|
|
|
|
|
|
|
containerSpec := &swarm.ContainerSpec{
|
|
|
|
Configs: oldConfigs,
|
|
|
|
Privileges: &swarm.Privileges{
|
|
|
|
CredentialSpec: tc.oldCredSpec,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
finalConfigs, err := getUpdatedConfigs(fakeClient, flags, containerSpec)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
|
|
|
|
// ensure that the finalConfigs consists of all of the expected
|
|
|
|
// configs
|
|
|
|
assert.Equal(t, len(finalConfigs), len(tc.expected),
|
|
|
|
"%v final configs, %v expected",
|
|
|
|
len(finalConfigs), len(tc.expected),
|
|
|
|
)
|
|
|
|
for _, expected := range tc.expected {
|
|
|
|
assert.Assert(t, is.Contains(finalConfigs, cannedConfigRefs[expected]))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateCredSpec(t *testing.T) {
|
|
|
|
type testCase struct {
|
|
|
|
// name is the name of the subtest
|
|
|
|
name string
|
|
|
|
// flagVal is the value we're setting flagCredentialSpec to
|
|
|
|
flagVal string
|
|
|
|
// spec is the existing serviceSpec with its configs
|
|
|
|
spec *swarm.ContainerSpec
|
|
|
|
// expected is the expected value of the credential spec after the
|
|
|
|
// function. it may be nil
|
|
|
|
expected *swarm.CredentialSpec
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []testCase{
|
|
|
|
{
|
|
|
|
name: "add file credential spec",
|
|
|
|
flagVal: "file://somefile",
|
|
|
|
spec: &swarm.ContainerSpec{},
|
|
|
|
expected: &swarm.CredentialSpec{File: "somefile"},
|
|
|
|
}, {
|
|
|
|
name: "remove a file credential spec",
|
|
|
|
flagVal: "",
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
Privileges: &swarm.Privileges{
|
|
|
|
CredentialSpec: &swarm.CredentialSpec{
|
|
|
|
File: "someFile",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: nil,
|
|
|
|
}, {
|
|
|
|
name: "remove when no CredentialSpec exists",
|
|
|
|
flagVal: "",
|
|
|
|
spec: &swarm.ContainerSpec{},
|
|
|
|
expected: nil,
|
|
|
|
}, {
|
2021-12-16 23:54:28 -05:00
|
|
|
name: "add a config credential spec",
|
2019-03-28 15:06:06 -04:00
|
|
|
flagVal: "config://someConfigName",
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
Configs: []*swarm.ConfigReference{
|
|
|
|
{
|
|
|
|
ConfigName: "someConfigName",
|
|
|
|
ConfigID: "someConfigID",
|
|
|
|
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: &swarm.CredentialSpec{
|
|
|
|
Config: "someConfigID",
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
name: "remove a config credential spec",
|
|
|
|
flagVal: "",
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
Privileges: &swarm.Privileges{
|
|
|
|
CredentialSpec: &swarm.CredentialSpec{
|
|
|
|
Config: "someConfigID",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: nil,
|
|
|
|
}, {
|
|
|
|
name: "update a config credential spec",
|
|
|
|
flagVal: "config://someConfigName",
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
Configs: []*swarm.ConfigReference{
|
|
|
|
{
|
|
|
|
ConfigName: "someConfigName",
|
|
|
|
ConfigID: "someConfigID",
|
|
|
|
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Privileges: &swarm.Privileges{
|
|
|
|
CredentialSpec: &swarm.CredentialSpec{
|
|
|
|
Config: "someDifferentConfigID",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: &swarm.CredentialSpec{
|
|
|
|
Config: "someConfigID",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
flags.Set(flagCredentialSpec, tc.flagVal)
|
|
|
|
|
|
|
|
updateCredSpecConfig(flags, tc.spec)
|
|
|
|
// handle the case where tc.spec.Privileges is nil
|
|
|
|
if tc.expected == nil {
|
|
|
|
assert.Assert(t, tc.spec.Privileges == nil || tc.spec.Privileges.CredentialSpec == nil)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.Assert(t, tc.spec.Privileges != nil)
|
|
|
|
assert.DeepEqual(t, tc.spec.Privileges.CredentialSpec, tc.expected)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-07-29 08:35:51 -04:00
|
|
|
|
|
|
|
func TestUpdateCaps(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
// name is the name of the testcase
|
|
|
|
name string
|
|
|
|
// flagAdd is the value passed to --cap-add
|
|
|
|
flagAdd []string
|
|
|
|
// flagDrop is the value passed to --cap-drop
|
|
|
|
flagDrop []string
|
|
|
|
// spec is the original ContainerSpec, before being updated
|
|
|
|
spec *swarm.ContainerSpec
|
|
|
|
// expectedAdd is the set of requested caps the ContainerSpec should have once updated
|
|
|
|
expectedAdd []string
|
|
|
|
// expectedDrop is the set of dropped caps the ContainerSpec should have once updated
|
|
|
|
expectedDrop []string
|
|
|
|
}{
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
{
|
|
|
|
// Note that this won't be run as updateCapabilities is gated by anyChanged(flags, flagCapAdd, flagCapDrop)
|
|
|
|
name: "Empty spec, no updates",
|
|
|
|
spec: &swarm.ContainerSpec{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Note that this won't be run as updateCapabilities is gated by anyChanged(flags, flagCapAdd, flagCapDrop)
|
|
|
|
name: "No updates",
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_MOUNT", "CAP_NET_ADMIN"},
|
|
|
|
CapabilityDrop: []string{"CAP_CHOWN", "CAP_SYS_ADMIN"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"CAP_MOUNT", "CAP_NET_ADMIN"},
|
|
|
|
expectedDrop: []string{"CAP_CHOWN", "CAP_SYS_ADMIN"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Note that this won't be run as updateCapabilities is gated by anyChanged(flags, flagCapAdd, flagCapDrop)
|
|
|
|
name: "Empty updates",
|
|
|
|
flagAdd: []string{},
|
|
|
|
flagDrop: []string{},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_MOUNT", "CAP_NET_ADMIN"},
|
|
|
|
CapabilityDrop: []string{"CAP_CHOWN", "CAP_SYS_ADMIN"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"CAP_MOUNT", "CAP_NET_ADMIN"},
|
|
|
|
expectedDrop: []string{"CAP_CHOWN", "CAP_SYS_ADMIN"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Note that this won't be run as updateCapabilities is gated by anyChanged(flags, flagCapAdd, flagCapDrop)
|
|
|
|
name: "Normalize cap-add only",
|
|
|
|
flagAdd: []string{},
|
|
|
|
flagDrop: []string{},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"ALL", "CAP_MOUNT", "CAP_NET_ADMIN"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"ALL"},
|
|
|
|
expectedDrop: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Note that this won't be run as updateCapabilities is gated by anyChanged(flags, flagCapAdd, flagCapDrop)
|
|
|
|
name: "Normalize cap-drop only",
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityDrop: []string{"ALL", "CAP_MOUNT", "CAP_NET_ADMIN"},
|
|
|
|
},
|
|
|
|
expectedDrop: []string{"ALL"},
|
|
|
|
},
|
2020-07-29 08:35:51 -04:00
|
|
|
{
|
|
|
|
name: "Add new caps",
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
flagAdd: []string{"CAP_NET_ADMIN"},
|
2020-07-29 08:35:51 -04:00
|
|
|
flagDrop: []string{},
|
|
|
|
spec: &swarm.ContainerSpec{},
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
expectedAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
expectedDrop: nil,
|
2020-07-29 08:35:51 -04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Drop new caps",
|
|
|
|
flagAdd: []string{},
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
flagDrop: []string{"CAP_NET_ADMIN"},
|
2020-07-29 08:35:51 -04:00
|
|
|
spec: &swarm.ContainerSpec{},
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
expectedAdd: nil,
|
|
|
|
expectedDrop: []string{"CAP_NET_ADMIN"},
|
2020-07-29 08:35:51 -04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Add a previously dropped cap",
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
flagAdd: []string{"CAP_NET_ADMIN"},
|
2020-07-29 08:35:51 -04:00
|
|
|
flagDrop: []string{},
|
|
|
|
spec: &swarm.ContainerSpec{
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
CapabilityDrop: []string{"CAP_NET_ADMIN"},
|
2020-07-29 08:35:51 -04:00
|
|
|
},
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
expectedAdd: nil,
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
expectedDrop: nil,
|
2020-07-29 08:35:51 -04:00
|
|
|
},
|
|
|
|
{
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
name: "Drop a previously requested cap, and add a new one",
|
|
|
|
flagAdd: []string{"CAP_CHOWN"},
|
|
|
|
flagDrop: []string{"CAP_NET_ADMIN"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"CAP_CHOWN"},
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
expectedDrop: nil,
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Add caps to service that has ALL caps has no effect",
|
|
|
|
flagAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"ALL"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"ALL"},
|
|
|
|
expectedDrop: nil,
|
|
|
|
},
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
{
|
|
|
|
name: "Drop ALL caps, then add new caps to service that has ALL caps",
|
|
|
|
flagAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
flagDrop: []string{"ALL"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"ALL"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
expectedDrop: nil,
|
|
|
|
},
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
{
|
|
|
|
name: "Add takes precedence on empty spec",
|
|
|
|
flagAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
flagDrop: []string{"CAP_NET_ADMIN"},
|
|
|
|
spec: &swarm.ContainerSpec{},
|
|
|
|
expectedAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
expectedDrop: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Add takes precedence on existing spec",
|
|
|
|
flagAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
flagDrop: []string{"CAP_NET_ADMIN"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
CapabilityDrop: []string{"CAP_NET_ADMIN"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
expectedDrop: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Drop all, and add new caps",
|
|
|
|
flagAdd: []string{"CAP_CHOWN"},
|
|
|
|
flagDrop: []string{"ALL"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_NET_ADMIN", "CAP_MOUNT"},
|
|
|
|
CapabilityDrop: []string{"CAP_NET_ADMIN", "CAP_MOUNT"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"CAP_CHOWN", "CAP_MOUNT", "CAP_NET_ADMIN"},
|
|
|
|
expectedDrop: []string{"ALL"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Add all caps",
|
|
|
|
flagAdd: []string{"ALL"},
|
|
|
|
flagDrop: []string{"CAP_NET_ADMIN", "CAP_SYS_ADMIN"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
CapabilityDrop: []string{"CAP_CHOWN"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"ALL"},
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
expectedDrop: []string{"CAP_CHOWN", "CAP_SYS_ADMIN"},
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Drop all, and add all",
|
|
|
|
flagAdd: []string{"ALL"},
|
|
|
|
flagDrop: []string{"ALL"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_NET_ADMIN"},
|
|
|
|
CapabilityDrop: []string{"CAP_CHOWN"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"ALL"},
|
|
|
|
expectedDrop: []string{"CAP_CHOWN"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Caps are normalized and sorted",
|
|
|
|
flagAdd: []string{"bbb", "aaa", "cAp_bBb", "cAp_aAa"},
|
|
|
|
flagDrop: []string{"zzz", "yyy", "cAp_yYy", "cAp_yYy"},
|
2020-07-29 08:35:51 -04:00
|
|
|
spec: &swarm.ContainerSpec{
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
CapabilityAdd: []string{"ccc", "CAP_DDD"},
|
|
|
|
CapabilityDrop: []string{"www", "CAP_XXX"},
|
2020-07-29 08:35:51 -04:00
|
|
|
},
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
expectedAdd: []string{"CAP_AAA", "CAP_BBB", "CAP_CCC", "CAP_DDD"},
|
|
|
|
expectedDrop: []string{"CAP_WWW", "CAP_XXX", "CAP_YYY", "CAP_ZZZ"},
|
2020-07-29 08:35:51 -04:00
|
|
|
},
|
2020-08-26 11:50:14 -04:00
|
|
|
{
|
|
|
|
name: "Reset capabilities",
|
|
|
|
flagAdd: []string{"RESET"},
|
|
|
|
flagDrop: []string{"RESET"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_AAA", "CAP_BBB", "CAP_CCC", "CAP_DDD"},
|
|
|
|
CapabilityDrop: []string{"CAP_WWW", "CAP_XXX", "CAP_YYY", "CAP_ZZZ"},
|
|
|
|
},
|
|
|
|
expectedAdd: nil,
|
|
|
|
expectedDrop: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Reset capabilities, and update after",
|
|
|
|
flagAdd: []string{"RESET", "CAP_ADD_ONE", "CAP_FOO"},
|
|
|
|
flagDrop: []string{"RESET", "CAP_DROP_ONE", "CAP_FOO"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_AAA", "CAP_BBB", "CAP_CCC", "CAP_DDD"},
|
|
|
|
CapabilityDrop: []string{"CAP_WWW", "CAP_XXX", "CAP_YYY", "CAP_ZZZ"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"CAP_ADD_ONE", "CAP_FOO"},
|
|
|
|
expectedDrop: []string{"CAP_DROP_ONE"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Reset capabilities, and add ALL",
|
|
|
|
flagAdd: []string{"RESET", "ALL"},
|
|
|
|
flagDrop: []string{"RESET", "ALL"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_AAA", "CAP_BBB", "CAP_CCC", "CAP_DDD"},
|
|
|
|
CapabilityDrop: []string{"CAP_WWW", "CAP_XXX", "CAP_YYY", "CAP_ZZZ"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"ALL"},
|
|
|
|
expectedDrop: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "Add ALL and RESET",
|
|
|
|
flagAdd: []string{"ALL", "RESET"},
|
|
|
|
flagDrop: []string{"ALL", "RESET"},
|
|
|
|
spec: &swarm.ContainerSpec{
|
|
|
|
CapabilityAdd: []string{"CAP_AAA", "CAP_BBB", "CAP_CCC", "CAP_DDD"},
|
|
|
|
CapabilityDrop: []string{"CAP_WWW", "CAP_XXX", "CAP_YYY", "CAP_ZZZ"},
|
|
|
|
},
|
|
|
|
expectedAdd: []string{"ALL"},
|
|
|
|
expectedDrop: nil,
|
|
|
|
},
|
2020-07-29 08:35:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
for _, c := range tc.flagAdd {
|
|
|
|
_ = flags.Set(flagCapAdd, c)
|
2020-07-29 08:35:51 -04:00
|
|
|
}
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
for _, c := range tc.flagDrop {
|
|
|
|
_ = flags.Set(flagCapDrop, c)
|
2020-07-29 08:35:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
updateCapabilities(flags, tc.spec)
|
|
|
|
|
|
|
|
assert.DeepEqual(t, tc.spec.CapabilityAdd, tc.expectedAdd)
|
|
|
|
assert.DeepEqual(t, tc.spec.CapabilityDrop, tc.expectedDrop)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-07-26 14:40:52 -04:00
|
|
|
|
|
|
|
func TestUpdateUlimits(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
spec []*units.Ulimit
|
|
|
|
rm []string
|
|
|
|
add []string
|
|
|
|
expected []*units.Ulimit
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "from scratch",
|
|
|
|
add: []string{"nofile=512:1024", "core=1024:1024"},
|
|
|
|
expected: []*units.Ulimit{
|
|
|
|
{Name: "core", Hard: 1024, Soft: 1024},
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "append new",
|
|
|
|
spec: []*units.Ulimit{
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
add: []string{"core=1024:1024"},
|
|
|
|
expected: []*units.Ulimit{
|
|
|
|
{Name: "core", Hard: 1024, Soft: 1024},
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove and append new should append",
|
|
|
|
spec: []*units.Ulimit{
|
|
|
|
{Name: "core", Hard: 1024, Soft: 1024},
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
rm: []string{"nofile=512:1024"},
|
|
|
|
add: []string{"nofile=512:1024"},
|
|
|
|
expected: []*units.Ulimit{
|
|
|
|
{Name: "core", Hard: 1024, Soft: 1024},
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "update existing",
|
|
|
|
spec: []*units.Ulimit{
|
|
|
|
{Name: "nofile", Hard: 2048, Soft: 1024},
|
|
|
|
},
|
|
|
|
add: []string{"nofile=512:1024"},
|
|
|
|
expected: []*units.Ulimit{
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "update existing twice",
|
|
|
|
spec: []*units.Ulimit{
|
|
|
|
{Name: "nofile", Hard: 2048, Soft: 1024},
|
|
|
|
},
|
|
|
|
add: []string{"nofile=256:512", "nofile=512:1024"},
|
|
|
|
expected: []*units.Ulimit{
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove all",
|
|
|
|
spec: []*units.Ulimit{
|
|
|
|
{Name: "core", Hard: 1024, Soft: 1024},
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
rm: []string{"nofile=512:1024", "core=1024:1024"},
|
|
|
|
expected: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove by key",
|
|
|
|
spec: []*units.Ulimit{
|
|
|
|
{Name: "core", Hard: 1024, Soft: 1024},
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
rm: []string{"core"},
|
|
|
|
expected: []*units.Ulimit{
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remove by key and different value",
|
|
|
|
spec: []*units.Ulimit{
|
|
|
|
{Name: "core", Hard: 1024, Soft: 1024},
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
rm: []string{"core=1234:5678"},
|
|
|
|
expected: []*units.Ulimit{
|
|
|
|
{Name: "nofile", Hard: 1024, Soft: 512},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range tests {
|
|
|
|
tc := tc
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
svc := swarm.ServiceSpec{
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
|
|
|
ContainerSpec: &swarm.ContainerSpec{Ulimits: tc.spec},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
flags := newUpdateCommand(nil).Flags()
|
|
|
|
for _, v := range tc.add {
|
|
|
|
assert.NilError(t, flags.Set(flagUlimitAdd, v))
|
|
|
|
}
|
|
|
|
for _, v := range tc.rm {
|
|
|
|
assert.NilError(t, flags.Set(flagUlimitRemove, v))
|
|
|
|
}
|
|
|
|
err := updateService(ctx, &fakeClient{}, flags, &svc)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
assert.DeepEqual(t, svc.TaskTemplate.ContainerSpec.Ulimits, tc.expected)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|