2016-09-08 13:11:39 -04:00
|
|
|
package service
|
|
|
|
|
|
|
|
import (
|
2018-05-03 21:02:44 -04:00
|
|
|
"context"
|
2016-09-08 13:11:39 -04:00
|
|
|
"fmt"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2017-04-17 18:07:56 -04:00
|
|
|
"github.com/docker/cli/cli"
|
|
|
|
"github.com/docker/cli/cli/command"
|
2017-05-15 08:45:19 -04:00
|
|
|
"github.com/docker/cli/opts"
|
2016-09-08 13:11:39 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
2016-10-13 14:28:32 -04:00
|
|
|
"github.com/docker/docker/api/types/container"
|
2016-09-08 13:11:39 -04:00
|
|
|
mounttypes "github.com/docker/docker/api/types/mount"
|
|
|
|
"github.com/docker/docker/api/types/swarm"
|
2017-02-16 12:27:01 -05:00
|
|
|
"github.com/docker/docker/api/types/versions"
|
2017-05-08 13:51:30 -04:00
|
|
|
"github.com/docker/docker/client"
|
2020-07-26 14:40:52 -04:00
|
|
|
units "github.com/docker/go-units"
|
2017-03-30 21:35:04 -04:00
|
|
|
"github.com/docker/swarmkit/api/defaults"
|
2017-03-09 13:23:45 -05:00
|
|
|
"github.com/pkg/errors"
|
2016-09-08 13:11:39 -04:00
|
|
|
"github.com/spf13/cobra"
|
|
|
|
"github.com/spf13/pflag"
|
|
|
|
)
|
|
|
|
|
2017-07-20 04:32:51 -04:00
|
|
|
func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
2017-05-15 08:45:19 -04:00
|
|
|
options := newServiceOptions()
|
2016-09-08 13:11:39 -04:00
|
|
|
|
|
|
|
cmd := &cobra.Command{
|
|
|
|
Use: "update [OPTIONS] SERVICE",
|
|
|
|
Short: "Update a service",
|
|
|
|
Args: cli.ExactArgs(1),
|
|
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
2017-05-15 08:45:19 -04:00
|
|
|
return runUpdate(dockerCli, cmd.Flags(), options, args[0])
|
2016-09-08 13:11:39 -04:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
flags := cmd.Flags()
|
|
|
|
flags.String("image", "", "Service image tag")
|
2016-12-07 14:37:55 -05:00
|
|
|
flags.Var(&ShlexOpt{}, "args", "Service command args")
|
2017-06-26 21:19:36 -04:00
|
|
|
flags.Bool(flagRollback, false, "Rollback to previous specification")
|
|
|
|
flags.SetAnnotation(flagRollback, "version", []string{"1.25"})
|
2016-10-20 15:04:01 -04:00
|
|
|
flags.Bool("force", false, "Force update even if no changes require it")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation("force", "version", []string{"1.25"})
|
2017-05-15 08:45:19 -04:00
|
|
|
addServiceFlags(flags, options, nil)
|
2016-09-08 13:11:39 -04:00
|
|
|
|
|
|
|
flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable")
|
2016-11-07 21:40:47 -05:00
|
|
|
flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagGroupRemove, "version", []string{"1.25"})
|
2016-09-08 13:11:39 -04:00
|
|
|
flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key")
|
|
|
|
flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key")
|
|
|
|
flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path")
|
2016-12-08 16:32:10 -05:00
|
|
|
// flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port")
|
|
|
|
flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port")
|
2016-09-08 13:11:39 -04:00
|
|
|
flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint")
|
2016-11-08 21:29:10 -05:00
|
|
|
flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagDNSRemove, "version", []string{"1.25"})
|
2016-11-08 21:29:10 -05:00
|
|
|
flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagDNSOptionRemove, "version", []string{"1.25"})
|
2016-11-08 21:29:10 -05:00
|
|
|
flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagDNSSearchRemove, "version", []string{"1.25"})
|
2016-11-03 11:05:00 -04:00
|
|
|
flags.Var(newListOptsVar(), flagHostRemove, "Remove a custom host-to-IP mapping (host:ip)")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagHostRemove, "version", []string{"1.25"})
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.labels, flagLabelAdd, "Add or update a service label")
|
|
|
|
flags.Var(&options.containerLabels, flagContainerLabelAdd, "Add or update a container label")
|
|
|
|
flags.Var(&options.env, flagEnvAdd, "Add or update an environment variable")
|
2016-10-27 18:51:02 -04:00
|
|
|
flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagSecretRemove, "version", []string{"1.25"})
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.secrets, flagSecretAdd, "Add or update a secret on a service")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagSecretAdd, "version", []string{"1.25"})
|
2017-05-08 13:36:04 -04:00
|
|
|
|
|
|
|
flags.Var(newListOptsVar(), flagConfigRemove, "Remove a configuration file")
|
|
|
|
flags.SetAnnotation(flagConfigRemove, "version", []string{"1.30"})
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.configs, flagConfigAdd, "Add or update a config file on a service")
|
2017-05-08 13:36:04 -04:00
|
|
|
flags.SetAnnotation(flagConfigAdd, "version", []string{"1.30"})
|
|
|
|
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.mounts, flagMountAdd, "Add or update a mount on a service")
|
|
|
|
flags.Var(&options.constraints, flagConstraintAdd, "Add or update a placement constraint")
|
|
|
|
flags.Var(&options.placementPrefs, flagPlacementPrefAdd, "Add a placement preference")
|
2017-03-13 21:31:48 -04:00
|
|
|
flags.SetAnnotation(flagPlacementPrefAdd, "version", []string{"1.28"})
|
2017-01-19 18:27:37 -05:00
|
|
|
flags.Var(&placementPrefOpts{}, flagPlacementPrefRemove, "Remove a placement preference")
|
2017-03-13 21:31:48 -04:00
|
|
|
flags.SetAnnotation(flagPlacementPrefRemove, "version", []string{"1.28"})
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.networks, flagNetworkAdd, "Add a network")
|
2017-03-23 20:51:57 -04:00
|
|
|
flags.SetAnnotation(flagNetworkAdd, "version", []string{"1.29"})
|
|
|
|
flags.Var(newListOptsVar(), flagNetworkRemove, "Remove a network")
|
|
|
|
flags.SetAnnotation(flagNetworkRemove, "version", []string{"1.29"})
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.endpoint.publishPorts, flagPublishAdd, "Add or update a published port")
|
|
|
|
flags.Var(&options.groups, flagGroupAdd, "Add an additional supplementary user group to the container")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagGroupAdd, "version", []string{"1.25"})
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.dns, flagDNSAdd, "Add or update a custom DNS server")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagDNSAdd, "version", []string{"1.25"})
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.dnsOption, flagDNSOptionAdd, "Add or update a DNS option")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagDNSOptionAdd, "version", []string{"1.25"})
|
2017-05-15 08:45:19 -04:00
|
|
|
flags.Var(&options.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagDNSSearchAdd, "version", []string{"1.25"})
|
2017-10-28 10:47:53 -04:00
|
|
|
flags.Var(&options.hosts, flagHostAdd, "Add a custom host-to-IP mapping (host:ip)")
|
2017-01-16 11:57:26 -05:00
|
|
|
flags.SetAnnotation(flagHostAdd, "version", []string{"1.25"})
|
2018-06-14 07:50:12 -04:00
|
|
|
flags.BoolVar(&options.init, flagInit, false, "Use an init inside each service container to forward signals and reap processes")
|
|
|
|
flags.SetAnnotation(flagInit, "version", []string{"1.37"})
|
2019-02-12 10:07:07 -05:00
|
|
|
flags.Var(&options.sysctls, flagSysCtlAdd, "Add or update a Sysctl option")
|
|
|
|
flags.SetAnnotation(flagSysCtlAdd, "version", []string{"1.40"})
|
|
|
|
flags.Var(newListOptsVar(), flagSysCtlRemove, "Remove a Sysctl option")
|
|
|
|
flags.SetAnnotation(flagSysCtlRemove, "version", []string{"1.40"})
|
2020-07-26 14:40:52 -04:00
|
|
|
flags.Var(&options.ulimits, flagUlimitAdd, "Add or update a ulimit option")
|
|
|
|
flags.SetAnnotation(flagUlimitAdd, "version", []string{"1.41"})
|
|
|
|
flags.Var(newListOptsVar(), flagUlimitRemove, "Remove a ulimit option")
|
|
|
|
flags.SetAnnotation(flagUlimitRemove, "version", []string{"1.41"})
|
2016-10-26 23:05:39 -04:00
|
|
|
|
2017-11-17 17:05:44 -05:00
|
|
|
// Add needs parsing, Remove only needs the key
|
|
|
|
flags.Var(newListOptsVar(), flagGenericResourcesRemove, "Remove a Generic resource")
|
|
|
|
flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"})
|
|
|
|
flags.Var(newListOptsVarWithValidator(ValidateSingleGenericResource), flagGenericResourcesAdd, "Add a Generic resource")
|
|
|
|
flags.SetAnnotation(flagHostAdd, "version", []string{"1.32"})
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
return cmd
|
|
|
|
}
|
|
|
|
|
|
|
|
func newListOptsVar() *opts.ListOpts {
|
|
|
|
return opts.NewListOptsRef(&[]string{}, nil)
|
|
|
|
}
|
|
|
|
|
2017-11-17 17:05:44 -05:00
|
|
|
func newListOptsVarWithValidator(validator opts.ValidatorFctType) *opts.ListOpts {
|
|
|
|
return opts.NewListOptsRef(&[]string{}, validator)
|
|
|
|
}
|
|
|
|
|
2022-07-13 06:29:49 -04:00
|
|
|
//nolint:gocyclo
|
2017-07-20 04:32:51 -04:00
|
|
|
func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, options *serviceOptions, serviceID string) error {
|
2016-09-08 13:11:39 -04:00
|
|
|
apiClient := dockerCli.Client()
|
|
|
|
ctx := context.Background()
|
|
|
|
|
2017-03-30 20:15:54 -04:00
|
|
|
service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
2016-09-08 13:11:39 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-26 21:19:36 -04:00
|
|
|
rollback, err := flags.GetBool(flagRollback)
|
2016-09-02 17:12:05 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-02-16 12:27:01 -05:00
|
|
|
// There are two ways to do user-requested rollback. The old way is
|
|
|
|
// client-side, but with a sufficiently recent daemon we prefer
|
|
|
|
// server-side, because it will honor the rollback parameters.
|
|
|
|
var (
|
|
|
|
clientSideRollback bool
|
|
|
|
serverSideRollback bool
|
|
|
|
)
|
|
|
|
|
2016-09-02 17:12:05 -04:00
|
|
|
spec := &service.Spec
|
|
|
|
if rollback {
|
2017-02-16 12:27:01 -05:00
|
|
|
// Rollback can't be combined with other flags.
|
|
|
|
otherFlagsPassed := false
|
|
|
|
flags.VisitAll(func(f *pflag.Flag) {
|
2017-06-26 21:19:36 -04:00
|
|
|
if f.Name == flagRollback || f.Name == flagDetach || f.Name == flagQuiet {
|
2017-02-16 12:27:01 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if flags.Changed(f.Name) {
|
|
|
|
otherFlagsPassed = true
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if otherFlagsPassed {
|
|
|
|
return errors.New("other flags may not be combined with --rollback")
|
|
|
|
}
|
|
|
|
|
2017-06-26 21:19:36 -04:00
|
|
|
if versions.LessThan(apiClient.ClientVersion(), "1.28") {
|
2017-02-16 12:27:01 -05:00
|
|
|
clientSideRollback = true
|
|
|
|
spec = service.PreviousSpec
|
|
|
|
if spec == nil {
|
2017-03-09 13:23:45 -05:00
|
|
|
return errors.Errorf("service does not have a previous specification to roll back to")
|
2017-02-16 12:27:01 -05:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
serverSideRollback = true
|
2016-09-02 17:12:05 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-16 12:27:01 -05:00
|
|
|
updateOpts := types.ServiceUpdateOptions{}
|
|
|
|
if serverSideRollback {
|
|
|
|
updateOpts.Rollback = "previous"
|
|
|
|
}
|
|
|
|
|
2017-03-23 20:51:57 -04:00
|
|
|
err = updateService(ctx, apiClient, flags, spec)
|
2016-09-08 13:11:39 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-05 20:02:26 -05:00
|
|
|
if flags.Changed("image") {
|
2017-05-11 05:07:35 -04:00
|
|
|
if err := resolveServiceImageDigestContentTrust(dockerCli, spec); err != nil {
|
2016-12-05 20:02:26 -05:00
|
|
|
return err
|
|
|
|
}
|
2017-05-15 19:01:48 -04:00
|
|
|
if !options.noResolveImage && versions.GreaterThanOrEqualTo(apiClient.ClientVersion(), "1.30") {
|
|
|
|
updateOpts.QueryRegistry = true
|
|
|
|
}
|
2016-12-05 20:02:26 -05:00
|
|
|
}
|
|
|
|
|
2016-10-27 18:51:02 -04:00
|
|
|
updatedSecrets, err := getUpdatedSecrets(apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets
|
|
|
|
|
2019-03-28 15:06:06 -04:00
|
|
|
updatedConfigs, err := getUpdatedConfigs(apiClient, flags, spec.TaskTemplate.ContainerSpec)
|
2017-05-08 13:36:04 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
spec.TaskTemplate.ContainerSpec.Configs = updatedConfigs
|
|
|
|
|
2019-03-27 16:44:32 -04:00
|
|
|
// set the credential spec value after get the updated configs, because we
|
|
|
|
// might need the updated configs to set the correct value of the
|
|
|
|
// CredentialSpec.
|
2019-03-28 15:06:06 -04:00
|
|
|
updateCredSpecConfig(flags, spec.TaskTemplate.ContainerSpec)
|
2019-03-27 16:44:32 -04:00
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
// only send auth if flag was set
|
|
|
|
sendAuth, err := flags.GetBool(flagRegistryAuth)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if sendAuth {
|
|
|
|
// Retrieve encoded auth token from the image reference
|
|
|
|
// This would be the old image if it didn't change in this update
|
2016-09-02 17:12:05 -04:00
|
|
|
image := spec.TaskTemplate.ContainerSpec.Image
|
2016-09-09 15:38:00 -04:00
|
|
|
encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, image)
|
2016-09-08 13:11:39 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
updateOpts.EncodedRegistryAuth = encodedAuth
|
2017-02-16 12:27:01 -05:00
|
|
|
} else if clientSideRollback {
|
2016-09-02 17:12:05 -04:00
|
|
|
updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec
|
|
|
|
} else {
|
|
|
|
updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2016-11-14 21:08:24 -05:00
|
|
|
response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts)
|
2016-09-08 13:11:39 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-14 21:08:24 -05:00
|
|
|
for _, warning := range response.Warnings {
|
|
|
|
fmt.Fprintln(dockerCli.Err(), warning)
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID)
|
2017-02-16 20:05:36 -05:00
|
|
|
|
Use non-detached mode as default for service commands
Commit 330a0035334871d92207b583c1c36d52a244753f added a `--detach=false` option
to various service-related commands, with the intent to make this the default in
a future version (17.09).
This patch changes the default to use "interactive" (non-detached), allowing
users to override this by setting the `--detach` option.
To prevent problems when connecting to older daemon versions (17.05 and below,
see commit db60f255617fd90cb093813dcdfe7eec840eeff8), the detach option is
ignored for those versions, and detach is always true.
Before this change, a warning was printed to announce the upcoming default:
$ docker service create nginx:alpine
saxiyn3pe559d753730zr0xer
Since --detach=false was not specified, tasks will be created in the background.
In a future release, --detach=false will become the default.
After this change, no warning is printed, but `--detach` is disabled;
$ docker service create nginx:alpine
y9jujwzozi0hwgj5yaadzliq6
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
verify: Service converged
Setting the `--detach` flag makes the cli use the pre-17.06 behavior:
$ docker service create --detach nginx:alpine
280hjnzy0wzje5o56gr22a46n
Running against a 17.03 daemon, without specifying the `--detach` flag;
$ docker service create nginx:alpine
kqheg7ogj0kszoa34g4p73i8q
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-09-12 08:35:19 -04:00
|
|
|
if options.detach || versions.LessThan(apiClient.ClientVersion(), "1.29") {
|
2017-02-16 20:05:36 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-06-26 21:19:36 -04:00
|
|
|
return waitOnService(ctx, dockerCli, serviceID, options.quiet)
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2022-07-13 06:29:49 -04:00
|
|
|
//nolint:gocyclo
|
2017-05-03 17:58:52 -04:00
|
|
|
func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
|
2018-06-14 07:50:12 -04:00
|
|
|
updateBoolPtr := func(flag string, field **bool) {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
b, _ := flags.GetBool(flag)
|
|
|
|
*field = &b
|
|
|
|
}
|
|
|
|
}
|
2016-09-08 13:11:39 -04:00
|
|
|
updateString := func(flag string, field *string) {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
*field, _ = flags.GetString(flag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
updateInt64Value := func(flag string, field *int64) {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
*field = flags.Lookup(flag).Value.(int64Value).Value()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 10:06:07 -05:00
|
|
|
updateFloatValue := func(flag string, field *float32) {
|
2016-09-02 17:12:05 -04:00
|
|
|
if flags.Changed(flag) {
|
2016-11-08 10:06:07 -05:00
|
|
|
*field = flags.Lookup(flag).Value.(*floatValue).Value()
|
2016-09-02 17:12:05 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
updateDuration := func(flag string, field *time.Duration) {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
*field, _ = flags.GetDuration(flag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
updateDurationOpt := func(flag string, field **time.Duration) {
|
|
|
|
if flags.Changed(flag) {
|
2017-05-16 11:49:40 -04:00
|
|
|
val := *flags.Lookup(flag).Value.(*opts.DurationOpt).Value()
|
2016-09-08 13:11:39 -04:00
|
|
|
*field = &val
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-29 11:11:48 -04:00
|
|
|
updateInt64 := func(flag string, field *int64) {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
*field, _ = flags.GetInt64(flag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
updateUint64 := func(flag string, field *uint64) {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
*field, _ = flags.GetUint64(flag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
updateUint64Opt := func(flag string, field **uint64) {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
val := *flags.Lookup(flag).Value.(*Uint64Opt).Value()
|
|
|
|
*field = &val
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-17 09:31:13 -05:00
|
|
|
updateIsolation := func(flag string, field *container.Isolation) error {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
val, _ := flags.GetString(flag)
|
|
|
|
*field = container.Isolation(val)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-07 05:52:40 -04:00
|
|
|
cspec := spec.TaskTemplate.ContainerSpec
|
2016-09-08 13:11:39 -04:00
|
|
|
task := &spec.TaskTemplate
|
|
|
|
|
|
|
|
taskResources := func() *swarm.ResourceRequirements {
|
|
|
|
if task.Resources == nil {
|
|
|
|
task.Resources = &swarm.ResourceRequirements{}
|
|
|
|
}
|
2018-12-12 19:58:12 -05:00
|
|
|
if task.Resources.Limits == nil {
|
2020-06-15 05:26:48 -04:00
|
|
|
task.Resources.Limits = &swarm.Limit{}
|
2018-12-12 19:58:12 -05:00
|
|
|
}
|
|
|
|
if task.Resources.Reservations == nil {
|
|
|
|
task.Resources.Reservations = &swarm.Resources{}
|
|
|
|
}
|
2016-09-08 13:11:39 -04:00
|
|
|
return task.Resources
|
|
|
|
}
|
|
|
|
|
|
|
|
updateLabels(flags, &spec.Labels)
|
|
|
|
updateContainerLabels(flags, &cspec.Labels)
|
|
|
|
updateString("image", &cspec.Image)
|
|
|
|
updateStringToSlice(flags, "args", &cspec.Args)
|
2016-12-07 14:37:55 -05:00
|
|
|
updateStringToSlice(flags, flagEntrypoint, &cspec.Command)
|
2016-09-08 13:11:39 -04:00
|
|
|
updateEnvironment(flags, &cspec.Env)
|
|
|
|
updateString(flagWorkdir, &cspec.Dir)
|
|
|
|
updateString(flagUser, &cspec.User)
|
2016-11-23 14:42:56 -05:00
|
|
|
updateString(flagHostname, &cspec.Hostname)
|
2018-06-14 07:50:12 -04:00
|
|
|
updateBoolPtr(flagInit, &cspec.Init)
|
2017-11-17 09:31:13 -05:00
|
|
|
if err := updateIsolation(flagIsolation, &cspec.Isolation); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-24 04:30:54 -04:00
|
|
|
if err := updateMounts(flags, &cspec.Mounts); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-08 13:11:39 -04:00
|
|
|
|
2019-02-12 10:07:07 -05:00
|
|
|
updateSysCtls(flags, &task.ContainerSpec.Sysctls)
|
2020-07-26 14:40:52 -04:00
|
|
|
task.ContainerSpec.Ulimits = updateUlimits(flags, task.ContainerSpec.Ulimits)
|
2019-02-12 10:07:07 -05:00
|
|
|
|
2020-04-29 11:11:48 -04:00
|
|
|
if anyChanged(flags, flagLimitCPU, flagLimitMemory, flagLimitPids) {
|
Fix cpu/memory limits and reservations being reset on service update
Before this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Notice that the memory limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000
},
"Reservations": {
"NanoCPUs": 2000000000
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Notice that the CPU limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"MemoryBytes": 209715200
},
"Reservations": {
"MemoryBytes": 209715200
}
}
After this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Confirm that the CPU limits/reservations are updated, but memory limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Confirm that the Mempry limits/reservations are updated, but CPU limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
}
}
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-05-23 19:11:49 -04:00
|
|
|
taskResources().Limits = spec.TaskTemplate.Resources.Limits
|
2016-09-08 13:11:39 -04:00
|
|
|
updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs)
|
|
|
|
updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes)
|
2020-04-29 11:11:48 -04:00
|
|
|
updateInt64(flagLimitPids, &task.Resources.Limits.Pids)
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
2018-05-23 19:13:35 -04:00
|
|
|
|
|
|
|
if anyChanged(flags, flagReserveCPU, flagReserveMemory) {
|
Fix cpu/memory limits and reservations being reset on service update
Before this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Notice that the memory limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000
},
"Reservations": {
"NanoCPUs": 2000000000
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Notice that the CPU limit and reservation is not preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"MemoryBytes": 209715200
},
"Reservations": {
"MemoryBytes": 209715200
}
}
After this change:
----------------------------------------------------
Create a service with reservations and limits for memory and cpu:
docker service create --name test \
--limit-memory=100M --limit-cpu=1 \
--reserve-memory=100M --reserve-cpu=1 \
nginx:alpine
Verify the configuration
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 1000000000,
"MemoryBytes": 104857600
}
}
Update just CPU limit and reservation:
docker service update --limit-cpu=2 --reserve-cpu=2 test
Confirm that the CPU limits/reservations are updated, but memory limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 104857600
}
}
Update just Memory limit and reservation:
docker service update --limit-memory=200M --reserve-memory=200M test
Confirm that the Mempry limits/reservations are updated, but CPU limit and reservation are preserved:
docker service inspect --format '{{json .Spec.TaskTemplate.Resources}}' test
{
"Limits": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
},
"Reservations": {
"NanoCPUs": 2000000000,
"MemoryBytes": 209715200
}
}
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-05-23 19:11:49 -04:00
|
|
|
taskResources().Reservations = spec.TaskTemplate.Resources.Reservations
|
2016-09-08 13:11:39 -04:00
|
|
|
updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs)
|
|
|
|
updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes)
|
|
|
|
}
|
|
|
|
|
2017-11-17 17:05:44 -05:00
|
|
|
if err := addGenericResources(flags, task); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := removeGenericResources(flags, task); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod)
|
|
|
|
|
|
|
|
if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) {
|
|
|
|
if task.RestartPolicy == nil {
|
2017-03-30 21:35:04 -04:00
|
|
|
task.RestartPolicy = defaultRestartPolicy()
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
if flags.Changed(flagRestartCondition) {
|
|
|
|
value, _ := flags.GetString(flagRestartCondition)
|
|
|
|
task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value)
|
|
|
|
}
|
|
|
|
updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay)
|
|
|
|
updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts)
|
|
|
|
updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window)
|
|
|
|
}
|
|
|
|
|
|
|
|
if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) {
|
|
|
|
if task.Placement == nil {
|
|
|
|
task.Placement = &swarm.Placement{}
|
|
|
|
}
|
2017-01-19 18:27:37 -05:00
|
|
|
updatePlacementConstraints(flags, task.Placement)
|
|
|
|
}
|
|
|
|
|
|
|
|
if anyChanged(flags, flagPlacementPrefAdd, flagPlacementPrefRemove) {
|
|
|
|
if task.Placement == nil {
|
|
|
|
task.Placement = &swarm.Placement{}
|
|
|
|
}
|
|
|
|
updatePlacementPreferences(flags, task.Placement)
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2017-03-23 20:51:57 -04:00
|
|
|
if anyChanged(flags, flagNetworkAdd, flagNetworkRemove) {
|
|
|
|
if err := updateNetworks(ctx, apiClient, flags, spec); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
if err := updateReplicas(flags, &spec.Mode); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-01-09 12:10:35 -05:00
|
|
|
if anyChanged(flags, flagMaxReplicas) {
|
|
|
|
updateUint64(flagMaxReplicas, &task.Placement.MaxReplicas)
|
|
|
|
}
|
|
|
|
|
2017-01-18 14:38:19 -05:00
|
|
|
if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) {
|
2016-09-08 13:11:39 -04:00
|
|
|
if spec.UpdateConfig == nil {
|
2017-03-30 21:35:04 -04:00
|
|
|
spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update)
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism)
|
|
|
|
updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay)
|
2016-09-02 17:12:05 -04:00
|
|
|
updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor)
|
2016-09-08 13:11:39 -04:00
|
|
|
updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction)
|
2016-11-08 10:06:07 -05:00
|
|
|
updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio)
|
2017-01-18 14:38:19 -05:00
|
|
|
updateString(flagUpdateOrder, &spec.UpdateConfig.Order)
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2017-01-18 14:38:19 -05:00
|
|
|
if anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) {
|
2017-02-15 19:04:30 -05:00
|
|
|
if spec.RollbackConfig == nil {
|
2017-03-30 21:35:04 -04:00
|
|
|
spec.RollbackConfig = updateConfigFromDefaults(defaults.Service.Rollback)
|
2017-02-15 19:04:30 -05:00
|
|
|
}
|
|
|
|
updateUint64(flagRollbackParallelism, &spec.RollbackConfig.Parallelism)
|
|
|
|
updateDuration(flagRollbackDelay, &spec.RollbackConfig.Delay)
|
|
|
|
updateDuration(flagRollbackMonitor, &spec.RollbackConfig.Monitor)
|
|
|
|
updateString(flagRollbackFailureAction, &spec.RollbackConfig.FailureAction)
|
|
|
|
updateFloatValue(flagRollbackMaxFailureRatio, &spec.RollbackConfig.MaxFailureRatio)
|
2017-01-18 14:38:19 -05:00
|
|
|
updateString(flagRollbackOrder, &spec.RollbackConfig.Order)
|
2017-02-15 19:04:30 -05:00
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
if flags.Changed(flagEndpointMode) {
|
|
|
|
value, _ := flags.GetString(flagEndpointMode)
|
|
|
|
if spec.EndpointSpec == nil {
|
|
|
|
spec.EndpointSpec = &swarm.EndpointSpec{}
|
|
|
|
}
|
|
|
|
spec.EndpointSpec.Mode = swarm.ResolutionMode(value)
|
|
|
|
}
|
|
|
|
|
|
|
|
if anyChanged(flags, flagGroupAdd, flagGroupRemove) {
|
|
|
|
if err := updateGroups(flags, &cspec.Groups); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-08 16:32:10 -05:00
|
|
|
if anyChanged(flags, flagPublishAdd, flagPublishRemove) {
|
2016-09-08 13:11:39 -04:00
|
|
|
if spec.EndpointSpec == nil {
|
|
|
|
spec.EndpointSpec = &swarm.EndpointSpec{}
|
|
|
|
}
|
|
|
|
if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 21:29:10 -05:00
|
|
|
if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) {
|
2016-10-26 23:05:39 -04:00
|
|
|
if cspec.DNSConfig == nil {
|
|
|
|
cspec.DNSConfig = &swarm.DNSConfig{}
|
|
|
|
}
|
|
|
|
if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-03 11:05:00 -04:00
|
|
|
if anyChanged(flags, flagHostAdd, flagHostRemove) {
|
|
|
|
if err := updateHosts(flags, &cspec.Hosts); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-10-20 15:04:01 -04:00
|
|
|
force, err := flags.GetBool("force")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if force {
|
|
|
|
spec.TaskTemplate.ForceUpdate++
|
|
|
|
}
|
|
|
|
|
2016-10-13 14:28:32 -04:00
|
|
|
if err := updateHealthcheck(flags, cspec); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:31:44 -04:00
|
|
|
if flags.Changed(flagTTY) {
|
|
|
|
tty, err := flags.GetBool(flagTTY)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
cspec.TTY = tty
|
|
|
|
}
|
|
|
|
|
2017-01-14 03:12:19 -05:00
|
|
|
if flags.Changed(flagReadOnly) {
|
|
|
|
readOnly, err := flags.GetBool(flagReadOnly)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
cspec.ReadOnly = readOnly
|
|
|
|
}
|
|
|
|
|
2017-02-06 00:22:57 -05:00
|
|
|
updateString(flagStopSignal, &cspec.StopSignal)
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
|
|
|
|
if anyChanged(flags, flagCapAdd, flagCapDrop) {
|
|
|
|
updateCapabilities(flags, cspec)
|
|
|
|
}
|
2017-02-06 00:22:57 -05:00
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-07 14:37:55 -05:00
|
|
|
func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) {
|
2016-09-08 13:11:39 -04:00
|
|
|
if !flags.Changed(flag) {
|
2016-12-07 14:37:55 -05:00
|
|
|
return
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2016-12-07 14:37:55 -05:00
|
|
|
*field = flags.Lookup(flag).Value.(*ShlexOpt).Value()
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func anyChanged(flags *pflag.FlagSet, fields ...string) bool {
|
|
|
|
for _, flag := range fields {
|
|
|
|
if flags.Changed(flag) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-11-17 17:05:44 -05:00
|
|
|
func addGenericResources(flags *pflag.FlagSet, spec *swarm.TaskSpec) error {
|
|
|
|
if !flags.Changed(flagGenericResourcesAdd) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if spec.Resources == nil {
|
|
|
|
spec.Resources = &swarm.ResourceRequirements{}
|
|
|
|
}
|
|
|
|
|
|
|
|
if spec.Resources.Reservations == nil {
|
|
|
|
spec.Resources.Reservations = &swarm.Resources{}
|
|
|
|
}
|
|
|
|
|
|
|
|
values := flags.Lookup(flagGenericResourcesAdd).Value.(*opts.ListOpts).GetAll()
|
|
|
|
generic, err := ParseGenericResources(values)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
m, err := buildGenericResourceMap(spec.Resources.Reservations.GenericResources)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, toAddRes := range generic {
|
|
|
|
m[toAddRes.DiscreteResourceSpec.Kind] = toAddRes
|
|
|
|
}
|
|
|
|
|
|
|
|
spec.Resources.Reservations.GenericResources = buildGenericResourceList(m)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func removeGenericResources(flags *pflag.FlagSet, spec *swarm.TaskSpec) error {
|
|
|
|
// Can only be Discrete Resources
|
|
|
|
if !flags.Changed(flagGenericResourcesRemove) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if spec.Resources == nil {
|
|
|
|
spec.Resources = &swarm.ResourceRequirements{}
|
|
|
|
}
|
|
|
|
|
|
|
|
if spec.Resources.Reservations == nil {
|
|
|
|
spec.Resources.Reservations = &swarm.Resources{}
|
|
|
|
}
|
|
|
|
|
|
|
|
values := flags.Lookup(flagGenericResourcesRemove).Value.(*opts.ListOpts).GetAll()
|
|
|
|
|
|
|
|
m, err := buildGenericResourceMap(spec.Resources.Reservations.GenericResources)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, toRemoveRes := range values {
|
|
|
|
if _, ok := m[toRemoveRes]; !ok {
|
|
|
|
return fmt.Errorf("could not find generic-resource `%s` to remove it", toRemoveRes)
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(m, toRemoveRes)
|
|
|
|
}
|
|
|
|
|
|
|
|
spec.Resources.Reservations.GenericResources = buildGenericResourceList(m)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-19 18:27:37 -05:00
|
|
|
func updatePlacementConstraints(flags *pflag.FlagSet, placement *swarm.Placement) {
|
2016-11-08 10:06:07 -05:00
|
|
|
if flags.Changed(flagConstraintAdd) {
|
|
|
|
values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll()
|
|
|
|
placement.Constraints = append(placement.Constraints, values...)
|
|
|
|
}
|
2016-09-08 13:11:39 -04:00
|
|
|
toRemove := buildToRemoveSet(flags, flagConstraintRemove)
|
2016-11-08 10:06:07 -05:00
|
|
|
|
|
|
|
newConstraints := []string{}
|
|
|
|
for _, constraint := range placement.Constraints {
|
|
|
|
if _, exists := toRemove[constraint]; !exists {
|
|
|
|
newConstraints = append(newConstraints, constraint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Sort so that result is predictable.
|
|
|
|
sort.Strings(newConstraints)
|
|
|
|
|
|
|
|
placement.Constraints = newConstraints
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2017-01-19 18:27:37 -05:00
|
|
|
func updatePlacementPreferences(flags *pflag.FlagSet, placement *swarm.Placement) {
|
|
|
|
var newPrefs []swarm.PlacementPreference
|
|
|
|
|
|
|
|
if flags.Changed(flagPlacementPrefRemove) {
|
|
|
|
for _, existing := range placement.Preferences {
|
|
|
|
removed := false
|
|
|
|
for _, removal := range flags.Lookup(flagPlacementPrefRemove).Value.(*placementPrefOpts).prefs {
|
|
|
|
if removal.Spread != nil && existing.Spread != nil && removal.Spread.SpreadDescriptor == existing.Spread.SpreadDescriptor {
|
|
|
|
removed = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !removed {
|
|
|
|
newPrefs = append(newPrefs, existing)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
newPrefs = placement.Preferences
|
|
|
|
}
|
|
|
|
|
|
|
|
if flags.Changed(flagPlacementPrefAdd) {
|
2017-06-21 00:11:59 -04:00
|
|
|
newPrefs = append(newPrefs,
|
|
|
|
flags.Lookup(flagPlacementPrefAdd).Value.(*placementPrefOpts).prefs...)
|
2017-01-19 18:27:37 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
placement.Preferences = newPrefs
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) {
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
if *field != nil && flags.Changed(flagContainerLabelRemove) {
|
|
|
|
toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll()
|
|
|
|
for _, label := range toRemove {
|
|
|
|
delete(*field, label)
|
|
|
|
}
|
|
|
|
}
|
2016-09-08 13:11:39 -04:00
|
|
|
if flags.Changed(flagContainerLabelAdd) {
|
|
|
|
if *field == nil {
|
|
|
|
*field = map[string]string{}
|
|
|
|
}
|
|
|
|
|
|
|
|
values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll()
|
2017-06-05 18:23:21 -04:00
|
|
|
for key, value := range opts.ConvertKVStringsToMap(values) {
|
2016-09-08 13:11:39 -04:00
|
|
|
(*field)[key] = value
|
|
|
|
}
|
|
|
|
}
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
}
|
2016-09-08 13:11:39 -04:00
|
|
|
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
func updateLabels(flags *pflag.FlagSet, field *map[string]string) {
|
|
|
|
if *field != nil && flags.Changed(flagLabelRemove) {
|
|
|
|
toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll()
|
2016-09-08 13:11:39 -04:00
|
|
|
for _, label := range toRemove {
|
|
|
|
delete(*field, label)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if flags.Changed(flagLabelAdd) {
|
|
|
|
if *field == nil {
|
|
|
|
*field = map[string]string{}
|
|
|
|
}
|
|
|
|
|
|
|
|
values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll()
|
2017-06-05 18:23:21 -04:00
|
|
|
for key, value := range opts.ConvertKVStringsToMap(values) {
|
2016-09-08 13:11:39 -04:00
|
|
|
(*field)[key] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-12 10:07:07 -05:00
|
|
|
func updateSysCtls(flags *pflag.FlagSet, field *map[string]string) {
|
|
|
|
if *field != nil && flags.Changed(flagSysCtlRemove) {
|
|
|
|
values := flags.Lookup(flagSysCtlRemove).Value.(*opts.ListOpts).GetAll()
|
|
|
|
for key := range opts.ConvertKVStringsToMap(values) {
|
|
|
|
delete(*field, key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if flags.Changed(flagSysCtlAdd) {
|
|
|
|
if *field == nil {
|
|
|
|
*field = map[string]string{}
|
|
|
|
}
|
|
|
|
|
|
|
|
values := flags.Lookup(flagSysCtlAdd).Value.(*opts.ListOpts).GetAll()
|
|
|
|
for key, value := range opts.ConvertKVStringsToMap(values) {
|
|
|
|
(*field)[key] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-26 14:40:52 -04:00
|
|
|
func updateUlimits(flags *pflag.FlagSet, ulimits []*units.Ulimit) []*units.Ulimit {
|
|
|
|
newUlimits := make(map[string]*units.Ulimit)
|
|
|
|
|
|
|
|
for _, ulimit := range ulimits {
|
|
|
|
newUlimits[ulimit.Name] = ulimit
|
|
|
|
}
|
|
|
|
if flags.Changed(flagUlimitRemove) {
|
|
|
|
values := flags.Lookup(flagUlimitRemove).Value.(*opts.ListOpts).GetAll()
|
|
|
|
for key := range opts.ConvertKVStringsToMap(values) {
|
|
|
|
delete(newUlimits, key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if flags.Changed(flagUlimitAdd) {
|
|
|
|
for _, ulimit := range flags.Lookup(flagUlimitAdd).Value.(*opts.UlimitOpt).GetList() {
|
|
|
|
newUlimits[ulimit.Name] = ulimit
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var limits []*units.Ulimit
|
|
|
|
for _, ulimit := range newUlimits {
|
|
|
|
limits = append(limits, ulimit)
|
|
|
|
}
|
|
|
|
sort.SliceStable(limits, func(i, j int) bool {
|
|
|
|
return limits[i].Name < limits[j].Name
|
|
|
|
})
|
|
|
|
return limits
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
func updateEnvironment(flags *pflag.FlagSet, field *[]string) {
|
Fix order of processing of some xx-add/xx-rm service update flags
Combining `-add` and `-rm` flags on `docker service update` should
be usable to explicitly replace existing options. The current order
of processing did not allow this, causing the `-rm` flag to remove
properties that were specified in `-add`. This behavior was inconsistent
with (for example) `--host-add` and `--host-rm`.
This patch updates the behavior to first remove properties, then
add new properties.
Note that there's still some improvements to make, to make the removal
more granulas (e.g. to make `--label-rm label=some-value` only remove
the label if value matches `some-value`); these changes are left for
a follow-up.
Before this change:
-----------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, with the intent to replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
}
```
With this patch applied:
--------------------------------
Create a service with two env-vars
```bash
docker service create --env FOO=bar --env BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"FOO=bar",
"BAR=baz"
]
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --env-rm FOO --env-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Env }}' test | jq .
[
"BAR=baz",
"FOO=updated-foo"
]
```
Create a service with two labels
```bash
docker service create --label FOO=bar --label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --label-rm FOO --label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Create a service with two container labels
```bash
docker service create --container-label FOO=bar --container-label BAR=baz --name=test nginx:alpine
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "bar"
}
```
Update the service, and replace the value of `FOO` for a new value
```bash
docker service update --container-label-rm FOO --container-label-add FOO=updated-foo test
docker service inspect --format '{{json .Spec.TaskTemplate.ContainerSpec.Labels }}' test | jq .
{
"BAR": "baz",
"FOO": "updated-foo"
}
```
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-04 13:09:59 -04:00
|
|
|
toRemove := buildToRemoveSet(flags, flagEnvRemove)
|
|
|
|
*field = removeItems(*field, toRemove, envKey)
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
if flags.Changed(flagEnvAdd) {
|
2017-04-04 21:16:57 -04:00
|
|
|
envSet := map[string]string{}
|
|
|
|
for _, v := range *field {
|
|
|
|
envSet[envKey(v)] = v
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts)
|
|
|
|
for _, v := range value.GetAll() {
|
|
|
|
envSet[envKey(v)] = v
|
|
|
|
}
|
|
|
|
|
2017-04-04 21:16:57 -04:00
|
|
|
*field = []string{}
|
|
|
|
for _, v := range envSet {
|
|
|
|
*field = append(*field, v)
|
|
|
|
}
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
func getUpdatedSecrets(apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) {
|
|
|
|
newSecrets := []*swarm.SecretReference{}
|
|
|
|
|
|
|
|
toRemove := buildToRemoveSet(flags, flagSecretRemove)
|
|
|
|
for _, secret := range secrets {
|
|
|
|
if _, exists := toRemove[secret.SecretName]; !exists {
|
|
|
|
newSecrets = append(newSecrets, secret)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-27 18:51:02 -04:00
|
|
|
if flags.Changed(flagSecretAdd) {
|
2016-11-03 11:08:22 -04:00
|
|
|
values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value()
|
2016-10-27 18:51:02 -04:00
|
|
|
|
2017-01-10 17:40:53 -05:00
|
|
|
addSecrets, err := ParseSecrets(apiClient, values)
|
2016-10-27 18:51:02 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
Update order of '--secret-rm' and '--secret-add'
When using both `--secret-rm` and `--secret-add` on `docker service update`,
`--secret-rm` was always performed last. This made it impossible to update
a secret that was already in use on a service (for example, to change
it's permissions, or mount-location inside the container).
This patch changes the order in which `rm` and `add` are performed,
allowing updating a secret in a single `docker service update`.
Before this change, the `rm` was always performed "last", so the secret
was always removed:
$ echo "foo" | docker secret create foo -f -
foo
$ docker service create --name myservice --secret foo nginx:alpine
62xjcr9sr0c2hvepdzqrn3ssn
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
null
After this change, the `rm` is performed _first_, allowing users to
update a secret without updating the service _twice_;
$ echo "foo" | docker secret create foo -f -
1bllmvw3a1yaq3eixqw3f7bjl
$ docker service create --name myservice --secret foo nginx:alpine
lr6s3uoggli1x0hab78glpcxo
$ docker service update --secret-rm foo --secret-add source=foo,target=foo2 myservice
myservice
$ docker service inspect --format '{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}' myservice | jq .
[
{
"File": {
"Name": "foo2",
"UID": "0",
"GID": "0",
"Mode": 292
},
"SecretID": "tn9qiblgnuuut11eufquw5dev",
"SecretName": "foo"
}
]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-12-30 12:15:53 -05:00
|
|
|
newSecrets = append(newSecrets, addSecrets...)
|
2016-10-27 18:51:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return newSecrets, nil
|
|
|
|
}
|
|
|
|
|
2019-03-28 15:06:06 -04:00
|
|
|
func getUpdatedConfigs(apiClient client.ConfigAPIClient, flags *pflag.FlagSet, spec *swarm.ContainerSpec) ([]*swarm.ConfigReference, error) {
|
|
|
|
var (
|
|
|
|
// credSpecConfigName stores the name of the config specified by the
|
|
|
|
// credential-spec flag. if a Runtime target Config with this name is
|
|
|
|
// already in the containerSpec, then this value will be set to
|
|
|
|
// emptystring in the removeConfigs stage. otherwise, a ConfigReference
|
|
|
|
// will be created to pass to ParseConfigs to get the ConfigID.
|
|
|
|
credSpecConfigName string
|
|
|
|
// credSpecConfigID stores the ID of the credential spec config if that
|
|
|
|
// config is being carried over from the old set of references
|
|
|
|
credSpecConfigID string
|
|
|
|
)
|
|
|
|
|
|
|
|
if flags.Changed(flagCredentialSpec) {
|
|
|
|
credSpec := flags.Lookup(flagCredentialSpec).Value.(*credentialSpecOpt).Value()
|
|
|
|
credSpecConfigName = credSpec.Config
|
|
|
|
} else {
|
|
|
|
// if the credential spec flag has not changed, then check if there
|
|
|
|
// already is a credentialSpec. if there is one, and it's for a Config,
|
|
|
|
// then it's from the old object, and its value is the config ID. we
|
|
|
|
// need this so we don't remove the config if the credential spec is
|
|
|
|
// not being updated.
|
|
|
|
if spec.Privileges != nil && spec.Privileges.CredentialSpec != nil {
|
|
|
|
if config := spec.Privileges.CredentialSpec.Config; config != "" {
|
|
|
|
credSpecConfigID = config
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newConfigs := removeConfigs(flags, spec, credSpecConfigName, credSpecConfigID)
|
2017-05-08 13:36:04 -04:00
|
|
|
|
2019-03-27 16:44:32 -04:00
|
|
|
// resolveConfigs is a slice of any new configs that need to have the ID
|
|
|
|
// resolved
|
|
|
|
resolveConfigs := []*swarm.ConfigReference{}
|
|
|
|
|
2019-03-28 15:06:06 -04:00
|
|
|
if flags.Changed(flagConfigAdd) {
|
|
|
|
resolveConfigs = append(resolveConfigs, flags.Lookup(flagConfigAdd).Value.(*opts.ConfigOpt).Value()...)
|
|
|
|
}
|
2019-03-27 16:44:32 -04:00
|
|
|
|
2019-03-28 15:06:06 -04:00
|
|
|
// if credSpecConfigNameis non-empty at this point, it means its a new
|
|
|
|
// config, and we need to resolve its ID accordingly.
|
|
|
|
if credSpecConfigName != "" {
|
|
|
|
resolveConfigs = append(resolveConfigs, &swarm.ConfigReference{
|
|
|
|
ConfigName: credSpecConfigName,
|
|
|
|
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resolveConfigs) > 0 {
|
|
|
|
addConfigs, err := ParseConfigs(apiClient, resolveConfigs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-03-27 16:44:32 -04:00
|
|
|
}
|
2019-03-28 15:06:06 -04:00
|
|
|
newConfigs = append(newConfigs, addConfigs...)
|
2019-03-27 16:44:32 -04:00
|
|
|
}
|
|
|
|
|
2019-03-28 15:06:06 -04:00
|
|
|
return newConfigs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// removeConfigs figures out which configs in the existing spec should be kept
|
|
|
|
// after the update.
|
|
|
|
func removeConfigs(flags *pflag.FlagSet, spec *swarm.ContainerSpec, credSpecName, credSpecID string) []*swarm.ConfigReference {
|
|
|
|
keepConfigs := []*swarm.ConfigReference{}
|
|
|
|
|
2017-05-08 13:36:04 -04:00
|
|
|
toRemove := buildToRemoveSet(flags, flagConfigRemove)
|
2019-03-28 15:06:06 -04:00
|
|
|
// all configs in spec.Configs should have both a Name and ID, because
|
|
|
|
// they come from an already-accepted spec.
|
|
|
|
for _, config := range spec.Configs {
|
2019-03-27 16:44:32 -04:00
|
|
|
// if the config is a Runtime target, make sure it's still in use right
|
|
|
|
// now, the only use for Runtime target is credential specs. if, in
|
|
|
|
// the future, more uses are added, then this check will need to be
|
|
|
|
// made more intelligent.
|
|
|
|
if config.Runtime != nil {
|
2019-03-28 15:06:06 -04:00
|
|
|
// if we're carrying over a credential spec explicitly (because the
|
|
|
|
// user passed --credential-spec with the same config name) then we
|
|
|
|
// should match on credSpecName. if we're carrying over a
|
|
|
|
// credential spec implicitly (because the user did not pass any
|
|
|
|
// --credential-spec flag) then we should match on credSpecID. in
|
|
|
|
// either case, we're keeping the config that already exists.
|
|
|
|
if config.ConfigName == credSpecName || config.ConfigID == credSpecID {
|
|
|
|
keepConfigs = append(keepConfigs, config)
|
2019-03-27 16:44:32 -04:00
|
|
|
}
|
|
|
|
// continue the loop, to skip the part where we check if the config
|
|
|
|
// is in toRemove.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-05-08 13:36:04 -04:00
|
|
|
if _, exists := toRemove[config.ConfigName]; !exists {
|
2019-03-28 15:06:06 -04:00
|
|
|
keepConfigs = append(keepConfigs, config)
|
2017-05-08 13:36:04 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-28 15:06:06 -04:00
|
|
|
return keepConfigs
|
2017-05-08 13:36:04 -04:00
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
func envKey(value string) string {
|
|
|
|
kv := strings.SplitN(value, "=", 2)
|
|
|
|
return kv[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} {
|
|
|
|
var empty struct{}
|
|
|
|
toRemove := make(map[string]struct{})
|
|
|
|
|
|
|
|
if !flags.Changed(flag) {
|
|
|
|
return toRemove
|
|
|
|
}
|
|
|
|
|
|
|
|
toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll()
|
|
|
|
for _, key := range toRemoveSlice {
|
|
|
|
toRemove[key] = empty
|
|
|
|
}
|
|
|
|
return toRemove
|
|
|
|
}
|
|
|
|
|
|
|
|
func removeItems(
|
|
|
|
seq []string,
|
|
|
|
toRemove map[string]struct{},
|
|
|
|
keyFunc func(string) string,
|
|
|
|
) []string {
|
|
|
|
newSeq := []string{}
|
|
|
|
for _, item := range seq {
|
|
|
|
if _, exists := toRemove[keyFunc(item)]; !exists {
|
|
|
|
newSeq = append(newSeq, item)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return newSeq
|
|
|
|
}
|
|
|
|
|
2016-08-24 04:30:54 -04:00
|
|
|
func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error {
|
|
|
|
mountsByTarget := map[string]mounttypes.Mount{}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
if flags.Changed(flagMountAdd) {
|
2016-10-24 23:26:54 -04:00
|
|
|
values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value()
|
2016-08-24 04:30:54 -04:00
|
|
|
for _, mount := range values {
|
|
|
|
if _, ok := mountsByTarget[mount.Target]; ok {
|
2017-03-09 13:23:45 -05:00
|
|
|
return errors.Errorf("duplicate mount target")
|
2016-08-24 04:30:54 -04:00
|
|
|
}
|
|
|
|
mountsByTarget[mount.Target] = mount
|
|
|
|
}
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
2016-08-24 04:30:54 -04:00
|
|
|
// Add old list of mount points minus updated one.
|
2016-09-08 13:11:39 -04:00
|
|
|
for _, mount := range *mounts {
|
2016-08-24 04:30:54 -04:00
|
|
|
if _, ok := mountsByTarget[mount.Target]; !ok {
|
|
|
|
mountsByTarget[mount.Target] = mount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newMounts := []mounttypes.Mount{}
|
|
|
|
|
|
|
|
toRemove := buildToRemoveSet(flags, flagMountRemove)
|
|
|
|
|
|
|
|
for _, mount := range mountsByTarget {
|
2016-09-08 13:11:39 -04:00
|
|
|
if _, exists := toRemove[mount.Target]; !exists {
|
|
|
|
newMounts = append(newMounts, mount)
|
|
|
|
}
|
|
|
|
}
|
2018-07-06 15:49:10 -04:00
|
|
|
sort.Slice(newMounts, func(i, j int) bool {
|
|
|
|
a, b := newMounts[i], newMounts[j]
|
|
|
|
|
|
|
|
if a.Source == b.Source {
|
|
|
|
return a.Target < b.Target
|
|
|
|
}
|
|
|
|
|
|
|
|
return a.Source < b.Source
|
|
|
|
})
|
2016-09-08 13:11:39 -04:00
|
|
|
*mounts = newMounts
|
2016-08-24 04:30:54 -04:00
|
|
|
return nil
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func updateGroups(flags *pflag.FlagSet, groups *[]string) error {
|
|
|
|
if flags.Changed(flagGroupAdd) {
|
2016-11-08 10:06:07 -05:00
|
|
|
values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll()
|
2016-09-08 13:11:39 -04:00
|
|
|
*groups = append(*groups, values...)
|
|
|
|
}
|
|
|
|
toRemove := buildToRemoveSet(flags, flagGroupRemove)
|
|
|
|
|
|
|
|
newGroups := []string{}
|
|
|
|
for _, group := range *groups {
|
|
|
|
if _, exists := toRemove[group]; !exists {
|
|
|
|
newGroups = append(newGroups, group)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Sort so that result is predictable.
|
|
|
|
sort.Strings(newGroups)
|
|
|
|
|
|
|
|
*groups = newGroups
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-26 23:05:39 -04:00
|
|
|
func removeDuplicates(entries []string) []string {
|
|
|
|
hit := map[string]bool{}
|
|
|
|
newEntries := []string{}
|
|
|
|
for _, v := range entries {
|
|
|
|
if !hit[v] {
|
|
|
|
newEntries = append(newEntries, v)
|
|
|
|
hit[v] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return newEntries
|
|
|
|
}
|
|
|
|
|
|
|
|
func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error {
|
|
|
|
newConfig := &swarm.DNSConfig{}
|
|
|
|
|
|
|
|
nameservers := (*config).Nameservers
|
|
|
|
if flags.Changed(flagDNSAdd) {
|
|
|
|
values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll()
|
|
|
|
nameservers = append(nameservers, values...)
|
|
|
|
}
|
|
|
|
nameservers = removeDuplicates(nameservers)
|
|
|
|
toRemove := buildToRemoveSet(flags, flagDNSRemove)
|
|
|
|
for _, nameserver := range nameservers {
|
|
|
|
if _, exists := toRemove[nameserver]; !exists {
|
|
|
|
newConfig.Nameservers = append(newConfig.Nameservers, nameserver)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Sort so that result is predictable.
|
|
|
|
sort.Strings(newConfig.Nameservers)
|
|
|
|
|
|
|
|
search := (*config).Search
|
|
|
|
if flags.Changed(flagDNSSearchAdd) {
|
|
|
|
values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll()
|
|
|
|
search = append(search, values...)
|
|
|
|
}
|
|
|
|
search = removeDuplicates(search)
|
|
|
|
toRemove = buildToRemoveSet(flags, flagDNSSearchRemove)
|
|
|
|
for _, entry := range search {
|
|
|
|
if _, exists := toRemove[entry]; !exists {
|
|
|
|
newConfig.Search = append(newConfig.Search, entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Sort so that result is predictable.
|
|
|
|
sort.Strings(newConfig.Search)
|
|
|
|
|
|
|
|
options := (*config).Options
|
2016-11-08 21:29:10 -05:00
|
|
|
if flags.Changed(flagDNSOptionAdd) {
|
|
|
|
values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll()
|
2016-10-26 23:05:39 -04:00
|
|
|
options = append(options, values...)
|
|
|
|
}
|
|
|
|
options = removeDuplicates(options)
|
2016-11-08 21:29:10 -05:00
|
|
|
toRemove = buildToRemoveSet(flags, flagDNSOptionRemove)
|
2016-10-26 23:05:39 -04:00
|
|
|
for _, option := range options {
|
|
|
|
if _, exists := toRemove[option]; !exists {
|
|
|
|
newConfig.Options = append(newConfig.Options, option)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Sort so that result is predictable.
|
|
|
|
sort.Strings(newConfig.Options)
|
|
|
|
|
|
|
|
*config = newConfig
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
func portConfigToString(portConfig *swarm.PortConfig) string {
|
|
|
|
protocol := portConfig.Protocol
|
2016-11-10 15:13:26 -05:00
|
|
|
mode := portConfig.PublishMode
|
|
|
|
return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode)
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error {
|
|
|
|
// The key of the map is `port/protocol`, e.g., `80/tcp`
|
|
|
|
portSet := map[string]swarm.PortConfig{}
|
|
|
|
|
2016-12-09 15:17:57 -05:00
|
|
|
// Build the current list of portConfig
|
2016-09-08 13:11:39 -04:00
|
|
|
for _, entry := range *portConfig {
|
2019-10-29 09:12:09 -04:00
|
|
|
entry := entry
|
2016-09-08 13:11:39 -04:00
|
|
|
if _, ok := portSet[portConfigToString(&entry)]; !ok {
|
|
|
|
portSet[portConfigToString(&entry)] = entry
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newPorts := []swarm.PortConfig{}
|
2016-12-09 15:17:57 -05:00
|
|
|
|
|
|
|
// Clean current ports
|
|
|
|
toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value()
|
2016-09-08 13:11:39 -04:00
|
|
|
portLoop:
|
|
|
|
for _, port := range portSet {
|
2016-12-08 16:32:10 -05:00
|
|
|
for _, pConfig := range toRemove {
|
2016-11-10 15:13:26 -05:00
|
|
|
if equalProtocol(port.Protocol, pConfig.Protocol) &&
|
|
|
|
port.TargetPort == pConfig.TargetPort &&
|
|
|
|
equalPublishMode(port.PublishMode, pConfig.PublishMode) {
|
|
|
|
continue portLoop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
newPorts = append(newPorts, port)
|
|
|
|
}
|
2016-11-10 15:13:26 -05:00
|
|
|
|
2016-12-09 15:17:57 -05:00
|
|
|
// Check to see if there are any conflict in flags.
|
|
|
|
if flags.Changed(flagPublishAdd) {
|
|
|
|
ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value()
|
|
|
|
|
|
|
|
for _, port := range ports {
|
2019-10-29 09:12:09 -04:00
|
|
|
port := port
|
2017-01-05 14:21:22 -05:00
|
|
|
if _, ok := portSet[portConfigToString(&port)]; ok {
|
2016-12-09 15:17:57 -05:00
|
|
|
continue
|
|
|
|
}
|
2020-01-16 06:47:12 -05:00
|
|
|
// portSet[portConfigToString(&port)] = port
|
2016-12-09 15:17:57 -05:00
|
|
|
newPorts = append(newPorts, port)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
// Sort the PortConfig to avoid unnecessary updates
|
2018-07-06 15:49:10 -04:00
|
|
|
sort.Slice(newPorts, func(i, j int) bool {
|
|
|
|
// We convert PortConfig into `port/protocol`, e.g., `80/tcp`
|
|
|
|
// In updatePorts we already filter out with map so there is duplicate entries
|
|
|
|
return portConfigToString(&newPorts[i]) < portConfigToString(&newPorts[j])
|
|
|
|
})
|
2016-09-08 13:11:39 -04:00
|
|
|
*portConfig = newPorts
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-10 15:13:26 -05:00
|
|
|
func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool {
|
|
|
|
return prot1 == prot2 ||
|
|
|
|
(prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) ||
|
|
|
|
(prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP)
|
|
|
|
}
|
|
|
|
|
|
|
|
func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool {
|
|
|
|
return mode1 == mode2 ||
|
|
|
|
(mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) ||
|
|
|
|
(mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress)
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error {
|
|
|
|
if !flags.Changed(flagReplicas) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if serviceMode == nil || serviceMode.Replicated == nil {
|
2017-03-09 13:23:45 -05:00
|
|
|
return errors.Errorf("replicas can only be used with replicated mode")
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
type hostMapping struct {
|
|
|
|
IPAddr string
|
|
|
|
Host string
|
|
|
|
}
|
|
|
|
|
Preserve sort-order of extra hosts, and allow duplicate entries
Extra hosts (`extra_hosts` in compose-file, or `--hosts` in services) adds
custom host/ip mappings to the container's `/etc/hosts`.
The current implementation used a `map[string]string{}` as intermediate
storage, and sorted the results alphabetically when converting to a service-spec.
As a result, duplicate hosts were removed, and order of host/ip mappings was not
preserved (in case the compose-file used a list instead of a map).
According to the **host.conf(5)** man page (http://man7.org/linux/man-pages/man5/host.conf.5.html)
multi Valid values are on and off. If set to on, the resolver
library will return all valid addresses for a host that
appears in the /etc/hosts file, instead of only the first.
This is off by default, as it may cause a substantial
performance loss at sites with large hosts files.
Multiple entries for a host are allowed, and even required for some situations,
for example, to add mappings for IPv4 and IPv6 addreses for a host, as illustrated
by the example hosts file in the **hosts(5)** man page (http://man7.org/linux/man-pages/man5/hosts.5.html):
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
# 127.0.1.1 is often used for the FQDN of the machine
127.0.1.1 thishost.mydomain.org thishost
192.168.1.10 foo.mydomain.org foo
192.168.1.13 bar.mydomain.org bar
146.82.138.7 master.debian.org master
209.237.226.90 www.opensource.org
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
This patch changes the intermediate storage format to use a `[]string`, and only
sorts entries if the input format in the compose file is a mapping. If the input
format is a list, the original sort-order is preserved.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-29 20:33:23 -04:00
|
|
|
// updateHosts performs a diff between existing host entries, entries to be
|
|
|
|
// removed, and entries to be added. Host entries preserve the order in which they
|
|
|
|
// were added, as the specification mentions that in case multiple entries for a
|
|
|
|
// host exist, the first entry should be used (by default).
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
//
|
2020-09-18 11:26:39 -04:00
|
|
|
// Note that, even though unsupported by the CLI, the service specs format
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
// allow entries with both a _canonical_ hostname, and one or more aliases
|
|
|
|
// in an entry (IP-address canonical_hostname [alias ...])
|
|
|
|
//
|
|
|
|
// Entries can be removed by either a specific `<host-name>:<ip-address>` mapping,
|
|
|
|
// or by `<host>` alone:
|
|
|
|
//
|
2022-07-13 06:29:49 -04:00
|
|
|
// - If both IP-address and host-name is provided, the hostname is removed only
|
|
|
|
// from entries that match the given IP-address.
|
|
|
|
// - If only a host-name is provided, the hostname is removed from any entry it
|
|
|
|
// is part of (either as canonical host-name, or as alias).
|
|
|
|
// - If, after removing the host-name from an entry, no host-names remain in
|
|
|
|
// the entry, the entry itself is removed.
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
//
|
|
|
|
// For example, the list of host-entries before processing could look like this:
|
|
|
|
//
|
2022-07-13 06:29:49 -04:00
|
|
|
// hosts = &[]string{
|
|
|
|
// "127.0.0.2 host3 host1 host2 host4",
|
|
|
|
// "127.0.0.1 host1 host4",
|
|
|
|
// "127.0.0.3 host1",
|
|
|
|
// "127.0.0.1 host1",
|
|
|
|
// }
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
//
|
|
|
|
// Removing `host1` removes every occurrence:
|
|
|
|
//
|
2022-07-13 06:29:49 -04:00
|
|
|
// hosts = &[]string{
|
|
|
|
// "127.0.0.2 host3 host2 host4",
|
|
|
|
// "127.0.0.1 host4",
|
|
|
|
// }
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
//
|
|
|
|
// Removing `host1:127.0.0.1` on the other hand, only remove the host if the
|
|
|
|
// IP-address matches:
|
|
|
|
//
|
2022-07-13 06:29:49 -04:00
|
|
|
// hosts = &[]string{
|
|
|
|
// "127.0.0.2 host3 host1 host2 host4",
|
|
|
|
// "127.0.0.1 host4",
|
|
|
|
// "127.0.0.3 host1",
|
|
|
|
// }
|
2016-11-03 11:05:00 -04:00
|
|
|
func updateHosts(flags *pflag.FlagSet, hosts *[]string) error {
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
var toRemove []hostMapping
|
2016-11-03 11:05:00 -04:00
|
|
|
if flags.Changed(flagHostRemove) {
|
|
|
|
extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll()
|
|
|
|
for _, entry := range extraHostsToRemove {
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
v := strings.SplitN(entry, ":", 2)
|
|
|
|
if len(v) > 1 {
|
|
|
|
toRemove = append(toRemove, hostMapping{IPAddr: v[1], Host: v[0]})
|
|
|
|
} else {
|
|
|
|
toRemove = append(toRemove, hostMapping{Host: v[0]})
|
|
|
|
}
|
2016-11-03 11:05:00 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
var newHosts []string
|
2016-11-03 11:05:00 -04:00
|
|
|
for _, entry := range *hosts {
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
// Since this is in SwarmKit format, we need to find the key, which is canonical_hostname of:
|
2016-11-03 11:05:00 -04:00
|
|
|
// IP_address canonical_hostname [aliases...]
|
|
|
|
parts := strings.Fields(entry)
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
if len(parts) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ip := parts[0]
|
|
|
|
hostNames := parts[1:]
|
|
|
|
for _, rm := range toRemove {
|
|
|
|
if rm.IPAddr != "" && rm.IPAddr != ip {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for i, h := range hostNames {
|
|
|
|
if h == rm.Host {
|
|
|
|
hostNames = append(hostNames[:i], hostNames[i+1:]...)
|
|
|
|
}
|
2016-11-03 11:05:00 -04:00
|
|
|
}
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
}
|
|
|
|
if len(hostNames) > 0 {
|
|
|
|
newHosts = append(newHosts, fmt.Sprintf("%s %s", ip, strings.Join(hostNames, " ")))
|
2016-11-03 11:05:00 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix service update --host-rm not being granular enough
Removing a host by `<host>:<ip>` should only remove occurences of the host with
a matching IP-address, instead of removing all entries for that host.
In addition, combining `--host-rm` and `--host-add` for the same host should
result in the new host being added.
This patch fixes the way the diff is calculated to allow combining
removing/adding, and to support entries having both a canonical, and aliases.
Aliases cannot be added by the CLI, but are supported in the Service spec, thus
should be taken into account:
Entries can be removed by either a specific `<host-name>:<ip-address>`
mapping, or by `<host>` alone:
- If both IP-address and host-name is provided, only remove the hostname
from entries that match the given IP-address.
- If only a host-name is provided, remove the hostname from any entry it
is part of (either as _canonical_ host-name, or as _alias_).
- If, after removing the host-name from an entry, no host-names remain in
the entry, the entry itself should be removed.
For example, the list of host-entries before processing could look like this:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host1 host4",
"127.0.0.3 host1",
"127.0.0.1 host1",
}
Removing `host1` removes every occurrence:
hosts = &[]string{
"127.0.0.2 host3 host2 host4",
"127.0.0.1 host4",
}
Whereas removing `host1:127.0.0.1` only remove the host if the IP-address matches:
hosts = &[]string{
"127.0.0.2 host3 host1 host2 host4",
"127.0.0.1 host4",
"127.0.0.3 host1",
}
Before this patch:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.4 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[]
After this patch is applied:
$ docker service create --name my-service --host foo:127.0.0.1 --host foo:127.0.0.2 --host foo:127.0.0.3 nginx:alpine
$ docker service update --host-rm foo:127.0.0.1 --host-add foo:127.0.0.5 my-service
$ docker service inspect --format '{{.Spec.TaskTemplate.ContainerSpec.Hosts}}' my-service
[127.0.0.2 foo 127.0.0.3 foo 127.0.0.4 foo]
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-28 13:34:58 -04:00
|
|
|
// Append new hosts (in SwarmKit format)
|
|
|
|
if flags.Changed(flagHostAdd) {
|
|
|
|
values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll())
|
|
|
|
newHosts = append(newHosts, values...)
|
|
|
|
}
|
|
|
|
*hosts = removeDuplicates(newHosts)
|
2016-11-03 11:05:00 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:11:39 -04:00
|
|
|
// updateLogDriver updates the log driver only if the log driver flag is set.
|
|
|
|
// All options will be replaced with those provided on the command line.
|
|
|
|
func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error {
|
|
|
|
if !flags.Changed(flagLogDriver) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
name, err := flags.GetString(flagLogDriver)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if name == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
taskTemplate.LogDriver = &swarm.Driver{
|
|
|
|
Name: name,
|
2017-06-05 18:23:21 -04:00
|
|
|
Options: opts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()),
|
2016-09-08 13:11:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2016-10-13 14:28:32 -04:00
|
|
|
|
|
|
|
func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error {
|
2016-11-29 04:58:47 -05:00
|
|
|
if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) {
|
2016-10-13 14:28:32 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if containerSpec.Healthcheck == nil {
|
|
|
|
containerSpec.Healthcheck = &container.HealthConfig{}
|
|
|
|
}
|
|
|
|
noHealthcheck, err := flags.GetBool(flagNoHealthcheck)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if noHealthcheck {
|
2016-11-29 04:58:47 -05:00
|
|
|
if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout, flagHealthStartPeriod) {
|
2016-10-13 14:28:32 -04:00
|
|
|
containerSpec.Healthcheck = &container.HealthConfig{
|
|
|
|
Test: []string{"NONE"},
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-09 13:23:45 -05:00
|
|
|
return errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck)
|
2016-10-13 14:28:32 -04:00
|
|
|
}
|
|
|
|
if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" {
|
|
|
|
containerSpec.Healthcheck.Test = nil
|
|
|
|
}
|
|
|
|
if flags.Changed(flagHealthInterval) {
|
2017-05-16 11:49:40 -04:00
|
|
|
val := *flags.Lookup(flagHealthInterval).Value.(*opts.PositiveDurationOpt).Value()
|
2016-10-13 14:28:32 -04:00
|
|
|
containerSpec.Healthcheck.Interval = val
|
|
|
|
}
|
|
|
|
if flags.Changed(flagHealthTimeout) {
|
2017-05-16 11:49:40 -04:00
|
|
|
val := *flags.Lookup(flagHealthTimeout).Value.(*opts.PositiveDurationOpt).Value()
|
2016-10-13 14:28:32 -04:00
|
|
|
containerSpec.Healthcheck.Timeout = val
|
|
|
|
}
|
2016-11-29 04:58:47 -05:00
|
|
|
if flags.Changed(flagHealthStartPeriod) {
|
2017-05-16 11:49:40 -04:00
|
|
|
val := *flags.Lookup(flagHealthStartPeriod).Value.(*opts.PositiveDurationOpt).Value()
|
2016-11-29 04:58:47 -05:00
|
|
|
containerSpec.Healthcheck.StartPeriod = val
|
|
|
|
}
|
2016-10-13 14:28:32 -04:00
|
|
|
if flags.Changed(flagHealthRetries) {
|
|
|
|
containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries)
|
|
|
|
}
|
|
|
|
if flags.Changed(flagHealthCmd) {
|
|
|
|
cmd, _ := flags.GetString(flagHealthCmd)
|
|
|
|
if cmd != "" {
|
|
|
|
containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd}
|
|
|
|
} else {
|
|
|
|
containerSpec.Healthcheck.Test = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-23 20:51:57 -04:00
|
|
|
|
|
|
|
func updateNetworks(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
|
|
|
|
// spec.TaskTemplate.Networks takes precedence over the deprecated
|
|
|
|
// spec.Networks field. If spec.Network is in use, we'll migrate those
|
|
|
|
// values to spec.TaskTemplate.Networks.
|
|
|
|
specNetworks := spec.TaskTemplate.Networks
|
|
|
|
if len(specNetworks) == 0 {
|
|
|
|
specNetworks = spec.Networks
|
|
|
|
}
|
|
|
|
spec.Networks = nil
|
|
|
|
|
|
|
|
toRemove := buildToRemoveSet(flags, flagNetworkRemove)
|
|
|
|
idsToRemove := make(map[string]struct{})
|
|
|
|
for networkIDOrName := range toRemove {
|
2017-06-12 22:53:53 -04:00
|
|
|
network, err := apiClient.NetworkInspect(ctx, networkIDOrName, types.NetworkInspectOptions{Scope: "swarm"})
|
2017-03-23 20:51:57 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
idsToRemove[network.ID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
existingNetworks := make(map[string]struct{})
|
|
|
|
var newNetworks []swarm.NetworkAttachmentConfig
|
|
|
|
for _, network := range specNetworks {
|
|
|
|
if _, exists := idsToRemove[network.Target]; exists {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
newNetworks = append(newNetworks, network)
|
|
|
|
existingNetworks[network.Target] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
if flags.Changed(flagNetworkAdd) {
|
2017-05-09 19:29:04 -04:00
|
|
|
values := flags.Lookup(flagNetworkAdd).Value.(*opts.NetworkOpt)
|
2018-01-03 09:40:55 -05:00
|
|
|
networks := convertNetworks(*values)
|
2017-03-23 20:51:57 -04:00
|
|
|
for _, network := range networks {
|
2018-01-03 09:40:55 -05:00
|
|
|
nwID, err := resolveNetworkID(ctx, apiClient, network.Target)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, exists := existingNetworks[nwID]; exists {
|
2017-03-23 20:51:57 -04:00
|
|
|
return errors.Errorf("service is already attached to network %s", network.Target)
|
|
|
|
}
|
2018-01-03 09:40:55 -05:00
|
|
|
network.Target = nwID
|
2017-03-23 20:51:57 -04:00
|
|
|
newNetworks = append(newNetworks, network)
|
|
|
|
existingNetworks[network.Target] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-06 15:49:10 -04:00
|
|
|
sort.Slice(newNetworks, func(i, j int) bool {
|
|
|
|
return newNetworks[i].Target < newNetworks[j].Target
|
|
|
|
})
|
2017-03-23 20:51:57 -04:00
|
|
|
|
|
|
|
spec.TaskTemplate.Networks = newNetworks
|
|
|
|
return nil
|
|
|
|
}
|
2019-03-28 15:06:06 -04:00
|
|
|
|
|
|
|
// updateCredSpecConfig updates the value of the credential spec Config field
|
|
|
|
// to the config ID if the credential spec has changed. it mutates the passed
|
|
|
|
// spec. it does not handle the case where the credential spec specifies a
|
|
|
|
// config that does not exist -- that case is handled as part of
|
|
|
|
// getUpdatedConfigs
|
|
|
|
func updateCredSpecConfig(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) {
|
|
|
|
if flags.Changed(flagCredentialSpec) {
|
|
|
|
credSpecOpt := flags.Lookup(flagCredentialSpec)
|
|
|
|
// if the flag has changed, and the value is empty string, then we
|
|
|
|
// should remove any credential spec that might be present
|
|
|
|
if credSpecOpt.Value.String() == "" {
|
|
|
|
if containerSpec.Privileges != nil {
|
|
|
|
containerSpec.Privileges.CredentialSpec = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// otherwise, set the credential spec to be the parsed value
|
|
|
|
credSpec := credSpecOpt.Value.(*credentialSpecOpt).Value()
|
|
|
|
|
|
|
|
// if this is a Config credential spec, we we still need to replace the
|
|
|
|
// value of credSpec.Config with the config ID instead of Name.
|
|
|
|
if credSpec.Config != "" {
|
|
|
|
for _, config := range containerSpec.Configs {
|
|
|
|
// if the config name matches, then set the config ID. we do
|
|
|
|
// not need to worry about if this is a Runtime target or not.
|
|
|
|
// even if it is not a Runtime target, getUpdatedConfigs
|
|
|
|
// ensures that a Runtime target for this config exists, and
|
|
|
|
// the Name is unique so the ID is correct no matter the
|
|
|
|
// target.
|
|
|
|
if config.ConfigName == credSpec.Config {
|
|
|
|
credSpec.Config = config.ConfigID
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if containerSpec.Privileges == nil {
|
|
|
|
containerSpec.Privileges = &swarm.Privileges{}
|
|
|
|
}
|
|
|
|
|
|
|
|
containerSpec.Privileges.CredentialSpec = credSpec
|
|
|
|
}
|
|
|
|
}
|
2020-07-29 08:35:51 -04:00
|
|
|
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
// updateCapabilities calculates the list of capabilities to "drop" and to "add"
|
|
|
|
// after applying the capabilities passed through `--cap-add` and `--cap-drop`
|
|
|
|
// to the existing list of added/dropped capabilities in the service spec.
|
|
|
|
//
|
|
|
|
// Adding capabilities takes precedence over "dropping" the same capability, so
|
|
|
|
// if both `--cap-add` and `--cap-drop` are specifying the same capability, the
|
|
|
|
// `--cap-drop` is ignored.
|
|
|
|
//
|
|
|
|
// Capabilities to "drop" are removed from the existing list of "added"
|
|
|
|
// capabilities, and vice-versa (capabilities to "add" are removed from the existing
|
|
|
|
// list of capabilities to "drop").
|
|
|
|
//
|
|
|
|
// Capabilities are normalized, sorted, and duplicates are removed to prevent
|
|
|
|
// service tasks from being updated if no changes are made. If a list has the "ALL"
|
|
|
|
// capability set, then any other capability is removed from that list.
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
//
|
|
|
|
// Adding/removing capabilities when updating a service is handled as a tri-state;
|
|
|
|
//
|
2022-07-13 06:29:49 -04:00
|
|
|
// - if the capability was previously "dropped", then remove it from "CapabilityDrop",
|
|
|
|
// but NOT added to "CapabilityAdd". However, if the capability was not yet in
|
|
|
|
// the service's "CapabilityDrop", then it's simply added to the service's "CapabilityAdd"
|
|
|
|
// - likewise, if the capability was previously "added", then it's removed from
|
|
|
|
// "CapabilityAdd", but NOT added to "CapabilityDrop". If the capability was
|
|
|
|
// not yet in the service's "CapabilityAdd", then simply add it to the service's
|
|
|
|
// "CapabilityDrop".
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
//
|
|
|
|
// In other words, given a service with the following:
|
|
|
|
//
|
|
|
|
// | CapDrop | CapAdd |
|
2022-07-13 06:29:49 -04:00
|
|
|
// |----------------|---------------|
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
// | CAP_SOME_CAP | |
|
|
|
|
//
|
|
|
|
// When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
|
|
|
|
// dropped capability is removed:
|
|
|
|
//
|
|
|
|
// | CapDrop | CapAdd |
|
2022-07-13 06:29:49 -04:00
|
|
|
// |----------------|---------------|
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
// | | |
|
|
|
|
//
|
|
|
|
// After updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
|
|
|
|
// capability is now added:
|
|
|
|
//
|
|
|
|
// | CapDrop | CapAdd |
|
2022-07-13 06:29:49 -04:00
|
|
|
// |----------------|---------------|
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
// | | CAP_SOME_CAP |
|
2020-07-29 08:35:51 -04:00
|
|
|
func updateCapabilities(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) {
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
var (
|
|
|
|
toAdd, toDrop map[string]bool
|
2020-07-29 08:35:51 -04:00
|
|
|
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
capDrop = opts.CapabilitiesMap(containerSpec.CapabilityDrop)
|
|
|
|
capAdd = opts.CapabilitiesMap(containerSpec.CapabilityAdd)
|
|
|
|
)
|
2020-07-29 08:35:51 -04:00
|
|
|
if flags.Changed(flagCapAdd) {
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
toAdd = opts.CapabilitiesMap(flags.Lookup(flagCapAdd).Value.(*opts.ListOpts).GetAll())
|
2020-08-26 11:50:14 -04:00
|
|
|
if toAdd[opts.ResetCapabilities] {
|
|
|
|
capAdd = make(map[string]bool)
|
|
|
|
delete(toAdd, opts.ResetCapabilities)
|
|
|
|
}
|
2020-07-29 08:35:51 -04:00
|
|
|
}
|
|
|
|
if flags.Changed(flagCapDrop) {
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
toDrop = opts.CapabilitiesMap(flags.Lookup(flagCapDrop).Value.(*opts.ListOpts).GetAll())
|
2020-08-26 11:50:14 -04:00
|
|
|
if toDrop[opts.ResetCapabilities] {
|
|
|
|
capDrop = make(map[string]bool)
|
|
|
|
delete(toDrop, opts.ResetCapabilities)
|
|
|
|
}
|
2020-07-29 08:35:51 -04:00
|
|
|
}
|
|
|
|
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
// First remove the capabilities to "drop" from the service's exiting
|
|
|
|
// list of capabilities to "add". If a capability is both added and dropped
|
|
|
|
// on update, then "adding" takes precedence.
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
//
|
|
|
|
// Dropping a capability when updating a service is considered a tri-state;
|
|
|
|
//
|
|
|
|
// - if the capability was previously "added", then remove it from
|
|
|
|
// "CapabilityAdd", and do NOT add it to "CapabilityDrop"
|
|
|
|
// - if the capability was not yet in the service's "CapabilityAdd",
|
|
|
|
// then simply add it to the service's "CapabilityDrop"
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
for c := range toDrop {
|
|
|
|
if !toAdd[c] {
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
if capAdd[c] {
|
|
|
|
delete(capAdd, c)
|
|
|
|
} else {
|
|
|
|
capDrop[c] = true
|
|
|
|
}
|
2020-07-29 08:35:51 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
// And remove the capabilities we're "adding" from the service's existing
|
|
|
|
// list of capabilities to "drop".
|
|
|
|
//
|
|
|
|
// "Adding" capabilities takes precedence over "dropping" them, so if a
|
|
|
|
// capability is set both as "add" and "drop", remove the capability from
|
|
|
|
// the service's list of dropped capabilities (if present).
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
//
|
|
|
|
// Adding a capability when updating a service is considered a tri-state;
|
|
|
|
//
|
|
|
|
// - if the capability was previously "dropped", then remove it from
|
|
|
|
// "CapabilityDrop", and do NOT add it to "CapabilityAdd"
|
|
|
|
// - if the capability was not yet in the service's "CapabilityDrop",
|
|
|
|
// then simply add it to the service's "CapabilityAdd"
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
for c := range toAdd {
|
Service cap-add/cap-drop: handle updates as "tri-state"
Adding/removing capabilities when updating a service is considered a tri-state;
- if the capability was previously "dropped", then remove it from "CapabilityDrop",
but do NOT add it to "CapabilityAdd". However, if the capability was not yet in
the service's "CapabilityDrop", then simply add it to the service's "CapabilityAdd"
- likewise, if the capability was previously "added", then remove it from
"CapabilityAdd", but do NOT add it to "CapabilityDrop". If the capability was
not yet in the service's "CapabilityAdd", then simply add it to the service's
"CapabilityDrop".
In other words, given a service with the following:
| CapDrop | CapAdd |
| -------------- | ------------- |
| CAP_SOME_CAP | |
When updating the service, and applying `--cap-add CAP_SOME_CAP`, the previously
dropped capability is removed:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | |
When updating the service a second time, applying `--cap-add CAP_SOME_CAP`,
capability is now added:
| CapDrop | CapAdd |
| -------------- | ------------- |
| | CAP_SOME_CAP |
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 11:28:59 -04:00
|
|
|
if capDrop[c] {
|
|
|
|
delete(capDrop, c)
|
|
|
|
} else {
|
|
|
|
capAdd[c] = true
|
|
|
|
}
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the service's existing lists are updated, apply the new
|
|
|
|
// capabilities to add/drop to both lists. Sort the lists to prevent
|
|
|
|
// unneeded updates to service-tasks.
|
|
|
|
containerSpec.CapabilityDrop = capsList(capDrop)
|
|
|
|
containerSpec.CapabilityAdd = capsList(capAdd)
|
|
|
|
}
|
|
|
|
|
|
|
|
func capsList(caps map[string]bool) []string {
|
|
|
|
if caps[opts.AllCapabilities] {
|
|
|
|
return []string{opts.AllCapabilities}
|
|
|
|
}
|
|
|
|
var out []string
|
|
|
|
for c := range caps {
|
|
|
|
out = append(out, c)
|
2020-07-29 08:35:51 -04:00
|
|
|
}
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
sort.Strings(out)
|
|
|
|
return out
|
2020-07-29 08:35:51 -04:00
|
|
|
}
|