2016-12-05 16:14:08 -05:00
|
|
|
package convert
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2017-01-10 17:40:53 -05:00
|
|
|
"os"
|
2017-01-31 15:45:45 -05:00
|
|
|
"sort"
|
2017-02-17 00:34:49 -05:00
|
|
|
"strings"
|
2016-12-05 16:14:08 -05:00
|
|
|
"time"
|
|
|
|
|
2017-04-17 18:07:56 -04:00
|
|
|
servicecli "github.com/docker/cli/cli/command/service"
|
|
|
|
composetypes "github.com/docker/cli/cli/compose/types"
|
2017-05-15 08:45:19 -04:00
|
|
|
"github.com/docker/cli/opts"
|
2016-12-05 16:14:08 -05:00
|
|
|
"github.com/docker/docker/api/types/container"
|
|
|
|
"github.com/docker/docker/api/types/swarm"
|
2017-03-24 19:58:42 -04:00
|
|
|
"github.com/docker/docker/api/types/versions"
|
2017-05-08 13:51:30 -04:00
|
|
|
"github.com/docker/docker/client"
|
2020-07-26 16:14:49 -04:00
|
|
|
"github.com/docker/go-units"
|
2017-03-24 10:43:28 -04:00
|
|
|
"github.com/pkg/errors"
|
2016-12-05 16:14:08 -05:00
|
|
|
)
|
|
|
|
|
2017-06-02 19:21:41 -04:00
|
|
|
const (
|
|
|
|
defaultNetwork = "default"
|
|
|
|
// LabelImage is the label used to store image name provided in the compose file
|
|
|
|
LabelImage = "com.docker.stack.image"
|
|
|
|
)
|
2017-02-22 13:52:09 -05:00
|
|
|
|
2016-12-05 16:14:08 -05:00
|
|
|
// Services from compose-file types to engine API types
|
|
|
|
func Services(
|
|
|
|
namespace Namespace,
|
|
|
|
config *composetypes.Config,
|
2017-03-24 19:58:42 -04:00
|
|
|
client client.CommonAPIClient,
|
2016-12-05 16:14:08 -05:00
|
|
|
) (map[string]swarm.ServiceSpec, error) {
|
|
|
|
result := make(map[string]swarm.ServiceSpec)
|
|
|
|
|
|
|
|
services := config.Services
|
|
|
|
volumes := config.Volumes
|
|
|
|
networks := config.Networks
|
|
|
|
|
|
|
|
for _, service := range services {
|
2017-01-18 13:06:36 -05:00
|
|
|
secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets)
|
2017-01-10 17:40:53 -05:00
|
|
|
if err != nil {
|
2017-03-24 10:43:28 -04:00
|
|
|
return nil, errors.Wrapf(err, "service %s", service.Name)
|
2017-01-10 17:40:53 -05:00
|
|
|
}
|
2019-04-01 14:38:11 -04:00
|
|
|
configs, err := convertServiceConfigObjs(client, namespace, service, config.Configs)
|
2017-05-15 11:19:32 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "service %s", service.Name)
|
|
|
|
}
|
|
|
|
|
2017-06-08 04:44:05 -04:00
|
|
|
serviceSpec, err := Service(client.ClientVersion(), namespace, service, networks, volumes, secrets, configs)
|
2016-12-05 16:14:08 -05:00
|
|
|
if err != nil {
|
2017-03-24 10:43:28 -04:00
|
|
|
return nil, errors.Wrapf(err, "service %s", service.Name)
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
result[service.Name] = serviceSpec
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2017-06-08 04:44:05 -04:00
|
|
|
// Service converts a ServiceConfig into a swarm ServiceSpec
|
|
|
|
func Service(
|
2017-03-24 19:58:42 -04:00
|
|
|
apiVersion string,
|
2016-12-05 16:14:08 -05:00
|
|
|
namespace Namespace,
|
|
|
|
service composetypes.ServiceConfig,
|
|
|
|
networkConfigs map[string]composetypes.NetworkConfig,
|
|
|
|
volumes map[string]composetypes.VolumeConfig,
|
2017-01-10 17:40:53 -05:00
|
|
|
secrets []*swarm.SecretReference,
|
2017-05-15 11:19:32 -04:00
|
|
|
configs []*swarm.ConfigReference,
|
2016-12-05 16:14:08 -05:00
|
|
|
) (swarm.ServiceSpec, error) {
|
|
|
|
name := namespace.Scope(service.Name)
|
2019-04-02 07:40:25 -04:00
|
|
|
endpoint := convertEndpointSpec(service.Deploy.EndpointMode, service.Ports)
|
2016-12-05 16:14:08 -05:00
|
|
|
|
|
|
|
mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas)
|
|
|
|
if err != nil {
|
|
|
|
return swarm.ServiceSpec{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
mounts, err := Volumes(service.Volumes, volumes, namespace)
|
|
|
|
if err != nil {
|
|
|
|
return swarm.ServiceSpec{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resources, err := convertResources(service.Deploy.Resources)
|
|
|
|
if err != nil {
|
|
|
|
return swarm.ServiceSpec{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
restartPolicy, err := convertRestartPolicy(
|
|
|
|
service.Restart, service.Deploy.RestartPolicy)
|
|
|
|
if err != nil {
|
|
|
|
return swarm.ServiceSpec{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
healthcheck, err := convertHealthcheck(service.HealthCheck)
|
|
|
|
if err != nil {
|
|
|
|
return swarm.ServiceSpec{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name)
|
|
|
|
if err != nil {
|
|
|
|
return swarm.ServiceSpec{}, err
|
|
|
|
}
|
|
|
|
|
2019-04-02 07:40:25 -04:00
|
|
|
dnsConfig := convertDNSConfig(service.DNS, service.DNSSearch)
|
2017-03-23 19:38:17 -04:00
|
|
|
|
2017-05-11 08:30:04 -04:00
|
|
|
var privileges swarm.Privileges
|
2019-04-01 14:38:11 -04:00
|
|
|
privileges.CredentialSpec, err = convertCredentialSpec(
|
|
|
|
namespace, service.CredentialSpec, configs,
|
|
|
|
)
|
2017-05-11 08:30:04 -04:00
|
|
|
if err != nil {
|
|
|
|
return swarm.ServiceSpec{}, err
|
|
|
|
}
|
|
|
|
|
2016-12-05 16:14:08 -05:00
|
|
|
var logDriver *swarm.Driver
|
|
|
|
if service.Logging != nil {
|
|
|
|
logDriver = &swarm.Driver{
|
|
|
|
Name: service.Logging.Driver,
|
|
|
|
Options: service.Logging.Options,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
capAdd, capDrop := opts.EffectiveCapAddCapDrop(service.CapAdd, service.CapDrop)
|
|
|
|
|
2016-12-05 16:14:08 -05:00
|
|
|
serviceSpec := swarm.ServiceSpec{
|
|
|
|
Annotations: swarm.Annotations{
|
|
|
|
Name: name,
|
|
|
|
Labels: AddStackLabel(namespace, service.Deploy.Labels),
|
|
|
|
},
|
|
|
|
TaskTemplate: swarm.TaskSpec{
|
2017-08-07 05:52:40 -04:00
|
|
|
ContainerSpec: &swarm.ContainerSpec{
|
2016-12-05 16:14:08 -05:00
|
|
|
Image: service.Image,
|
|
|
|
Command: service.Entrypoint,
|
|
|
|
Args: service.Command,
|
|
|
|
Hostname: service.Hostname,
|
Preserve sort-order of extra hosts, and allow duplicate entries
Extra hosts (`extra_hosts` in compose-file, or `--hosts` in services) adds
custom host/ip mappings to the container's `/etc/hosts`.
The current implementation used a `map[string]string{}` as intermediate
storage, and sorted the results alphabetically when converting to a service-spec.
As a result, duplicate hosts were removed, and order of host/ip mappings was not
preserved (in case the compose-file used a list instead of a map).
According to the **host.conf(5)** man page (http://man7.org/linux/man-pages/man5/host.conf.5.html)
multi Valid values are on and off. If set to on, the resolver
library will return all valid addresses for a host that
appears in the /etc/hosts file, instead of only the first.
This is off by default, as it may cause a substantial
performance loss at sites with large hosts files.
Multiple entries for a host are allowed, and even required for some situations,
for example, to add mappings for IPv4 and IPv6 addreses for a host, as illustrated
by the example hosts file in the **hosts(5)** man page (http://man7.org/linux/man-pages/man5/hosts.5.html):
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
# 127.0.1.1 is often used for the FQDN of the machine
127.0.1.1 thishost.mydomain.org thishost
192.168.1.10 foo.mydomain.org foo
192.168.1.13 bar.mydomain.org bar
146.82.138.7 master.debian.org master
209.237.226.90 www.opensource.org
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
This patch changes the intermediate storage format to use a `[]string`, and only
sorts entries if the input format in the compose file is a mapping. If the input
format is a list, the original sort-order is preserved.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-29 20:33:23 -04:00
|
|
|
Hosts: convertExtraHosts(service.ExtraHosts),
|
2017-03-23 19:38:17 -04:00
|
|
|
DNSConfig: dnsConfig,
|
2016-12-05 16:14:08 -05:00
|
|
|
Healthcheck: healthcheck,
|
2017-01-27 10:09:02 -05:00
|
|
|
Env: sortStrings(convertEnvironment(service.Environment)),
|
2016-12-05 16:14:08 -05:00
|
|
|
Labels: AddStackLabel(namespace, service.Labels),
|
|
|
|
Dir: service.WorkingDir,
|
|
|
|
User: service.User,
|
|
|
|
Mounts: mounts,
|
2018-08-29 17:29:39 -04:00
|
|
|
StopGracePeriod: composetypes.ConvertDurationPtr(service.StopGracePeriod),
|
2017-07-25 21:59:25 -04:00
|
|
|
StopSignal: service.StopSignal,
|
2016-12-05 16:14:08 -05:00
|
|
|
TTY: service.Tty,
|
|
|
|
OpenStdin: service.StdinOpen,
|
2017-01-10 17:40:53 -05:00
|
|
|
Secrets: secrets,
|
2017-05-27 00:01:22 -04:00
|
|
|
Configs: configs,
|
2017-05-11 09:26:38 -04:00
|
|
|
ReadOnly: service.ReadOnly,
|
2017-05-11 08:30:04 -04:00
|
|
|
Privileges: &privileges,
|
2017-11-17 09:31:13 -05:00
|
|
|
Isolation: container.Isolation(service.Isolation),
|
2018-06-18 04:48:03 -04:00
|
|
|
Init: service.Init,
|
2019-02-12 10:07:07 -05:00
|
|
|
Sysctls: service.Sysctls,
|
Service cap-add/cap-drop: improve handling of combinations and special "ALL" value
When creating and updating services, we need to avoid unneeded service churn.
The interaction of separate lists to "add" and "drop" capabilities, a special
("ALL") capability, as well as a "relaxed" format for accepted capabilities
(case-insensitive, `CAP_` prefix optional) make this rather involved.
This patch updates how we handle `--cap-add` / `--cap-drop` when _creating_ as
well as _updating_, with the following rules/assumptions applied:
- both existing (service spec) and new (values passed through flags or in
the compose-file) are normalized and de-duplicated before use.
- the special "ALL" capability is equivalent to "all capabilities" and taken
into account when normalizing capabilities. Combining "ALL" capabilities
and other capabilities is therefore equivalent to just specifying "ALL".
- adding capabilities takes precedence over dropping, which means that if
a capability is both set to be "dropped" and to be "added", it is removed
from the list to "drop".
- the final lists should be sorted and normalized to reduce service churn
- no validation of capabilities is handled by the client. Validation is
delegated to the daemon/server.
When deploying a service using a docker-compose file, the docker-compose file
is *mostly* handled as being "declarative". However, many of the issues outlined
above also apply to compose-files, so similar handling is applied to compose
files as well to prevent service churn.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-25 07:03:06 -04:00
|
|
|
CapabilityAdd: capAdd,
|
|
|
|
CapabilityDrop: capDrop,
|
2020-07-26 16:14:49 -04:00
|
|
|
Ulimits: convertUlimits(service.Ulimits),
|
2016-12-05 16:14:08 -05:00
|
|
|
},
|
|
|
|
LogDriver: logDriver,
|
|
|
|
Resources: resources,
|
|
|
|
RestartPolicy: restartPolicy,
|
|
|
|
Placement: &swarm.Placement{
|
|
|
|
Constraints: service.Deploy.Placement.Constraints,
|
2017-05-06 09:49:42 -04:00
|
|
|
Preferences: getPlacementPreference(service.Deploy.Placement.Preferences),
|
2018-09-30 10:04:35 -04:00
|
|
|
MaxReplicas: service.Deploy.Placement.MaxReplicas,
|
2016-12-05 16:14:08 -05:00
|
|
|
},
|
|
|
|
},
|
2018-05-29 05:37:51 -04:00
|
|
|
EndpointSpec: endpoint,
|
|
|
|
Mode: mode,
|
|
|
|
UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig),
|
|
|
|
RollbackConfig: convertUpdateConfig(service.Deploy.RollbackConfig),
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
|
2017-06-02 19:21:41 -04:00
|
|
|
// add an image label to serviceSpec
|
|
|
|
serviceSpec.Labels[LabelImage] = service.Image
|
|
|
|
|
2017-03-24 19:58:42 -04:00
|
|
|
// ServiceSpec.Networks is deprecated and should not have been used by
|
|
|
|
// this package. It is possible to update TaskTemplate.Networks, but it
|
|
|
|
// is not possible to update ServiceSpec.Networks. Unfortunately, we
|
|
|
|
// can't unconditionally start using TaskTemplate.Networks, because that
|
|
|
|
// will break with older daemons that don't support migrating from
|
|
|
|
// ServiceSpec.Networks to TaskTemplate.Networks. So which field to use
|
|
|
|
// is conditional on daemon version.
|
|
|
|
if versions.LessThan(apiVersion, "1.29") {
|
|
|
|
serviceSpec.Networks = networks
|
|
|
|
} else {
|
|
|
|
serviceSpec.TaskTemplate.Networks = networks
|
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
return serviceSpec, nil
|
|
|
|
}
|
|
|
|
|
2017-05-06 09:49:42 -04:00
|
|
|
func getPlacementPreference(preferences []composetypes.PlacementPreferences) []swarm.PlacementPreference {
|
|
|
|
result := []swarm.PlacementPreference{}
|
|
|
|
for _, preference := range preferences {
|
|
|
|
spreadDescriptor := preference.Spread
|
|
|
|
result = append(result, swarm.PlacementPreference{
|
|
|
|
Spread: &swarm.SpreadOver{
|
|
|
|
SpreadDescriptor: spreadDescriptor,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2017-01-27 10:09:02 -05:00
|
|
|
func sortStrings(strs []string) []string {
|
|
|
|
sort.Strings(strs)
|
|
|
|
return strs
|
|
|
|
}
|
|
|
|
|
2016-12-05 16:14:08 -05:00
|
|
|
func convertServiceNetworks(
|
|
|
|
networks map[string]*composetypes.ServiceNetworkConfig,
|
|
|
|
networkConfigs networkMap,
|
|
|
|
namespace Namespace,
|
|
|
|
name string,
|
|
|
|
) ([]swarm.NetworkAttachmentConfig, error) {
|
|
|
|
if len(networks) == 0 {
|
2017-02-22 13:52:09 -05:00
|
|
|
networks = map[string]*composetypes.ServiceNetworkConfig{
|
|
|
|
defaultNetwork: {},
|
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
nets := []swarm.NetworkAttachmentConfig{}
|
|
|
|
for networkName, network := range networks {
|
|
|
|
networkConfig, ok := networkConfigs[networkName]
|
2017-02-22 13:52:09 -05:00
|
|
|
if !ok && networkName != defaultNetwork {
|
2017-03-24 10:43:28 -04:00
|
|
|
return nil, errors.Errorf("undefined network %q", networkName)
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
var aliases []string
|
|
|
|
if network != nil {
|
|
|
|
aliases = network.Aliases
|
|
|
|
}
|
|
|
|
target := namespace.Scope(networkName)
|
2018-03-12 13:09:59 -04:00
|
|
|
if networkConfig.Name != "" {
|
2017-12-11 11:29:45 -05:00
|
|
|
target = networkConfig.Name
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
2017-05-25 22:50:08 -04:00
|
|
|
netAttachConfig := swarm.NetworkAttachmentConfig{
|
2016-12-05 16:14:08 -05:00
|
|
|
Target: target,
|
2017-05-26 14:25:20 -04:00
|
|
|
Aliases: aliases,
|
2017-05-25 22:50:08 -04:00
|
|
|
}
|
2017-05-26 14:25:20 -04:00
|
|
|
// Only add default aliases to user defined networks. Other networks do
|
|
|
|
// not support aliases.
|
2017-05-25 22:50:08 -04:00
|
|
|
if container.NetworkMode(target).IsUserDefined() {
|
2017-05-26 14:25:20 -04:00
|
|
|
netAttachConfig.Aliases = append(netAttachConfig.Aliases, name)
|
2017-05-25 22:50:08 -04:00
|
|
|
}
|
|
|
|
nets = append(nets, netAttachConfig)
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
2017-01-27 10:09:02 -05:00
|
|
|
|
2018-07-08 15:08:17 -04:00
|
|
|
sort.Slice(nets, func(i, j int) bool {
|
|
|
|
return nets[i].Target < nets[j].Target
|
|
|
|
})
|
2016-12-05 16:14:08 -05:00
|
|
|
return nets, nil
|
|
|
|
}
|
|
|
|
|
2017-01-10 17:40:53 -05:00
|
|
|
// TODO: fix secrets API so that SecretAPIClient is not required here
|
|
|
|
func convertServiceSecrets(
|
|
|
|
client client.SecretAPIClient,
|
|
|
|
namespace Namespace,
|
|
|
|
secrets []composetypes.ServiceSecretConfig,
|
2017-01-18 13:06:36 -05:00
|
|
|
secretSpecs map[string]composetypes.SecretConfig,
|
2017-01-10 17:40:53 -05:00
|
|
|
) ([]*swarm.SecretReference, error) {
|
2017-03-16 13:54:18 -04:00
|
|
|
refs := []*swarm.SecretReference{}
|
2017-01-18 13:06:36 -05:00
|
|
|
|
2017-11-06 17:56:36 -05:00
|
|
|
lookup := func(key string) (composetypes.FileObjectConfig, error) {
|
2017-11-22 06:18:05 -05:00
|
|
|
secretSpec, exists := secretSpecs[key]
|
2017-03-24 10:43:28 -04:00
|
|
|
if !exists {
|
2017-11-06 17:56:36 -05:00
|
|
|
return composetypes.FileObjectConfig{}, errors.Errorf("undefined secret %q", key)
|
2017-01-26 12:00:46 -05:00
|
|
|
}
|
2017-11-22 06:18:05 -05:00
|
|
|
return composetypes.FileObjectConfig(secretSpec), nil
|
2017-11-06 17:56:36 -05:00
|
|
|
}
|
2017-11-22 06:18:05 -05:00
|
|
|
for _, secret := range secrets {
|
|
|
|
obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(secret), lookup)
|
2017-11-06 17:56:36 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-02-14 09:12:03 -05:00
|
|
|
}
|
2017-01-26 12:00:46 -05:00
|
|
|
|
2017-11-06 17:56:36 -05:00
|
|
|
file := swarm.SecretReferenceFileTarget(obj.File)
|
2017-03-16 13:54:18 -04:00
|
|
|
refs = append(refs, &swarm.SecretReference{
|
2017-11-06 17:56:36 -05:00
|
|
|
File: &file,
|
|
|
|
SecretName: obj.Name,
|
2017-01-10 17:40:53 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-09-07 05:04:10 -04:00
|
|
|
secrs, err := servicecli.ParseSecrets(client, refs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// sort to ensure idempotence (don't restart services just because the entries are in different order)
|
|
|
|
sort.SliceStable(secrs, func(i, j int) bool { return secrs[i].SecretName < secrs[j].SecretName })
|
|
|
|
return secrs, err
|
2017-01-10 17:40:53 -05:00
|
|
|
}
|
|
|
|
|
2019-04-01 14:38:11 -04:00
|
|
|
// convertServiceConfigObjs takes an API client, a namespace, a ServiceConfig,
|
|
|
|
// and a set of compose Config specs, and creates the swarm ConfigReferences
|
|
|
|
// required by the serivce. Unlike convertServiceSecrets, this takes the whole
|
|
|
|
// ServiceConfig, because some Configs may be needed as a result of other
|
|
|
|
// fields (like CredentialSpecs).
|
|
|
|
//
|
2017-05-15 11:19:32 -04:00
|
|
|
// TODO: fix configs API so that ConfigsAPIClient is not required here
|
|
|
|
func convertServiceConfigObjs(
|
|
|
|
client client.ConfigAPIClient,
|
|
|
|
namespace Namespace,
|
2019-04-01 14:38:11 -04:00
|
|
|
service composetypes.ServiceConfig,
|
2017-05-15 11:19:32 -04:00
|
|
|
configSpecs map[string]composetypes.ConfigObjConfig,
|
|
|
|
) ([]*swarm.ConfigReference, error) {
|
|
|
|
refs := []*swarm.ConfigReference{}
|
|
|
|
|
2017-11-06 17:56:36 -05:00
|
|
|
lookup := func(key string) (composetypes.FileObjectConfig, error) {
|
|
|
|
configSpec, exists := configSpecs[key]
|
2017-05-15 11:19:32 -04:00
|
|
|
if !exists {
|
2017-11-06 17:56:36 -05:00
|
|
|
return composetypes.FileObjectConfig{}, errors.Errorf("undefined config %q", key)
|
2017-05-15 11:19:32 -04:00
|
|
|
}
|
2017-11-06 17:56:36 -05:00
|
|
|
return composetypes.FileObjectConfig(configSpec), nil
|
|
|
|
}
|
2019-04-01 14:38:11 -04:00
|
|
|
for _, config := range service.Configs {
|
2017-11-06 17:56:36 -05:00
|
|
|
obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(config), lookup)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-05-15 11:19:32 -04:00
|
|
|
}
|
|
|
|
|
2017-11-06 17:56:36 -05:00
|
|
|
file := swarm.ConfigReferenceFileTarget(obj.File)
|
2017-05-15 11:19:32 -04:00
|
|
|
refs = append(refs, &swarm.ConfigReference{
|
2017-11-06 17:56:36 -05:00
|
|
|
File: &file,
|
|
|
|
ConfigName: obj.Name,
|
2017-05-15 11:19:32 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-04-01 14:38:11 -04:00
|
|
|
// finally, after converting all of the file objects, create any
|
|
|
|
// Runtime-type configs that are needed. these are configs that are not
|
|
|
|
// mounted into the container, but are used in some other way by the
|
|
|
|
// container runtime. Currently, this only means CredentialSpecs, but in
|
|
|
|
// the future it may be used for other fields
|
|
|
|
|
|
|
|
// grab the CredentialSpec out of the Service
|
|
|
|
credSpec := service.CredentialSpec
|
|
|
|
// if the credSpec uses a config, then we should grab the config name, and
|
|
|
|
// create a config reference for it. A File or Registry-type CredentialSpec
|
|
|
|
// does not need this operation.
|
|
|
|
if credSpec.Config != "" {
|
|
|
|
// look up the config in the configSpecs.
|
|
|
|
obj, err := lookup(credSpec.Config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the actual correct name.
|
|
|
|
name := namespace.Scope(credSpec.Config)
|
|
|
|
if obj.Name != "" {
|
|
|
|
name = obj.Name
|
|
|
|
}
|
|
|
|
|
|
|
|
// now append a Runtime-type config.
|
|
|
|
refs = append(refs, &swarm.ConfigReference{
|
|
|
|
ConfigName: name,
|
|
|
|
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-09-07 05:04:10 -04:00
|
|
|
confs, err := servicecli.ParseConfigs(client, refs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// sort to ensure idempotence (don't restart services just because the entries are in different order)
|
|
|
|
sort.SliceStable(confs, func(i, j int) bool { return confs[i].ConfigName < confs[j].ConfigName })
|
|
|
|
return confs, err
|
2017-05-15 11:19:32 -04:00
|
|
|
}
|
|
|
|
|
2017-11-06 17:56:36 -05:00
|
|
|
type swarmReferenceTarget struct {
|
|
|
|
Name string
|
|
|
|
UID string
|
|
|
|
GID string
|
|
|
|
Mode os.FileMode
|
|
|
|
}
|
|
|
|
|
|
|
|
type swarmReferenceObject struct {
|
|
|
|
File swarmReferenceTarget
|
|
|
|
ID string
|
|
|
|
Name string
|
|
|
|
}
|
|
|
|
|
|
|
|
func convertFileObject(
|
|
|
|
namespace Namespace,
|
|
|
|
config composetypes.FileReferenceConfig,
|
|
|
|
lookup func(key string) (composetypes.FileObjectConfig, error),
|
|
|
|
) (swarmReferenceObject, error) {
|
|
|
|
obj, err := lookup(config.Source)
|
|
|
|
if err != nil {
|
|
|
|
return swarmReferenceObject{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
source := namespace.Scope(config.Source)
|
2017-11-22 06:18:05 -05:00
|
|
|
if obj.Name != "" {
|
|
|
|
source = obj.Name
|
2017-11-06 17:56:36 -05:00
|
|
|
}
|
|
|
|
|
2019-04-01 14:38:11 -04:00
|
|
|
target := config.Target
|
|
|
|
if target == "" {
|
|
|
|
target = config.Source
|
|
|
|
}
|
|
|
|
|
2017-11-06 17:56:36 -05:00
|
|
|
uid := config.UID
|
|
|
|
gid := config.GID
|
|
|
|
if uid == "" {
|
|
|
|
uid = "0"
|
|
|
|
}
|
|
|
|
if gid == "" {
|
|
|
|
gid = "0"
|
|
|
|
}
|
|
|
|
mode := config.Mode
|
|
|
|
if mode == nil {
|
2022-09-30 13:13:22 -04:00
|
|
|
mode = uint32Ptr(0o444)
|
2017-11-06 17:56:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return swarmReferenceObject{
|
|
|
|
File: swarmReferenceTarget{
|
|
|
|
Name: target,
|
|
|
|
UID: uid,
|
|
|
|
GID: gid,
|
|
|
|
Mode: os.FileMode(*mode),
|
|
|
|
},
|
|
|
|
Name: source,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2017-02-14 09:12:03 -05:00
|
|
|
func uint32Ptr(value uint32) *uint32 {
|
|
|
|
return &value
|
|
|
|
}
|
|
|
|
|
Preserve sort-order of extra hosts, and allow duplicate entries
Extra hosts (`extra_hosts` in compose-file, or `--hosts` in services) adds
custom host/ip mappings to the container's `/etc/hosts`.
The current implementation used a `map[string]string{}` as intermediate
storage, and sorted the results alphabetically when converting to a service-spec.
As a result, duplicate hosts were removed, and order of host/ip mappings was not
preserved (in case the compose-file used a list instead of a map).
According to the **host.conf(5)** man page (http://man7.org/linux/man-pages/man5/host.conf.5.html)
multi Valid values are on and off. If set to on, the resolver
library will return all valid addresses for a host that
appears in the /etc/hosts file, instead of only the first.
This is off by default, as it may cause a substantial
performance loss at sites with large hosts files.
Multiple entries for a host are allowed, and even required for some situations,
for example, to add mappings for IPv4 and IPv6 addreses for a host, as illustrated
by the example hosts file in the **hosts(5)** man page (http://man7.org/linux/man-pages/man5/hosts.5.html):
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
# 127.0.1.1 is often used for the FQDN of the machine
127.0.1.1 thishost.mydomain.org thishost
192.168.1.10 foo.mydomain.org foo
192.168.1.13 bar.mydomain.org bar
146.82.138.7 master.debian.org master
209.237.226.90 www.opensource.org
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
This patch changes the intermediate storage format to use a `[]string`, and only
sorts entries if the input format in the compose file is a mapping. If the input
format is a list, the original sort-order is preserved.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-29 20:33:23 -04:00
|
|
|
// convertExtraHosts converts <host>:<ip> mappings to SwarmKit notation:
|
|
|
|
// "IP-address hostname(s)". The original order of mappings is preserved.
|
|
|
|
func convertExtraHosts(extraHosts composetypes.HostsList) []string {
|
2022-12-27 11:44:59 -05:00
|
|
|
hosts := make([]string, 0, len(extraHosts))
|
Preserve sort-order of extra hosts, and allow duplicate entries
Extra hosts (`extra_hosts` in compose-file, or `--hosts` in services) adds
custom host/ip mappings to the container's `/etc/hosts`.
The current implementation used a `map[string]string{}` as intermediate
storage, and sorted the results alphabetically when converting to a service-spec.
As a result, duplicate hosts were removed, and order of host/ip mappings was not
preserved (in case the compose-file used a list instead of a map).
According to the **host.conf(5)** man page (http://man7.org/linux/man-pages/man5/host.conf.5.html)
multi Valid values are on and off. If set to on, the resolver
library will return all valid addresses for a host that
appears in the /etc/hosts file, instead of only the first.
This is off by default, as it may cause a substantial
performance loss at sites with large hosts files.
Multiple entries for a host are allowed, and even required for some situations,
for example, to add mappings for IPv4 and IPv6 addreses for a host, as illustrated
by the example hosts file in the **hosts(5)** man page (http://man7.org/linux/man-pages/man5/hosts.5.html):
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
# 127.0.1.1 is often used for the FQDN of the machine
127.0.1.1 thishost.mydomain.org thishost
192.168.1.10 foo.mydomain.org foo
192.168.1.13 bar.mydomain.org bar
146.82.138.7 master.debian.org master
209.237.226.90 www.opensource.org
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
This patch changes the intermediate storage format to use a `[]string`, and only
sorts entries if the input format in the compose file is a mapping. If the input
format is a list, the original sort-order is preserved.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-29 20:33:23 -04:00
|
|
|
for _, hostIP := range extraHosts {
|
2022-12-27 11:44:59 -05:00
|
|
|
if hostName, ipAddr, ok := strings.Cut(hostIP, ":"); ok {
|
Preserve sort-order of extra hosts, and allow duplicate entries
Extra hosts (`extra_hosts` in compose-file, or `--hosts` in services) adds
custom host/ip mappings to the container's `/etc/hosts`.
The current implementation used a `map[string]string{}` as intermediate
storage, and sorted the results alphabetically when converting to a service-spec.
As a result, duplicate hosts were removed, and order of host/ip mappings was not
preserved (in case the compose-file used a list instead of a map).
According to the **host.conf(5)** man page (http://man7.org/linux/man-pages/man5/host.conf.5.html)
multi Valid values are on and off. If set to on, the resolver
library will return all valid addresses for a host that
appears in the /etc/hosts file, instead of only the first.
This is off by default, as it may cause a substantial
performance loss at sites with large hosts files.
Multiple entries for a host are allowed, and even required for some situations,
for example, to add mappings for IPv4 and IPv6 addreses for a host, as illustrated
by the example hosts file in the **hosts(5)** man page (http://man7.org/linux/man-pages/man5/hosts.5.html):
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
# 127.0.1.1 is often used for the FQDN of the machine
127.0.1.1 thishost.mydomain.org thishost
192.168.1.10 foo.mydomain.org foo
192.168.1.13 bar.mydomain.org bar
146.82.138.7 master.debian.org master
209.237.226.90 www.opensource.org
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
This patch changes the intermediate storage format to use a `[]string`, and only
sorts entries if the input format in the compose file is a mapping. If the input
format is a list, the original sort-order is preserved.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-29 20:33:23 -04:00
|
|
|
// Convert to SwarmKit notation: IP-address hostname(s)
|
2022-12-27 11:44:59 -05:00
|
|
|
hosts = append(hosts, ipAddr+" "+hostName)
|
Preserve sort-order of extra hosts, and allow duplicate entries
Extra hosts (`extra_hosts` in compose-file, or `--hosts` in services) adds
custom host/ip mappings to the container's `/etc/hosts`.
The current implementation used a `map[string]string{}` as intermediate
storage, and sorted the results alphabetically when converting to a service-spec.
As a result, duplicate hosts were removed, and order of host/ip mappings was not
preserved (in case the compose-file used a list instead of a map).
According to the **host.conf(5)** man page (http://man7.org/linux/man-pages/man5/host.conf.5.html)
multi Valid values are on and off. If set to on, the resolver
library will return all valid addresses for a host that
appears in the /etc/hosts file, instead of only the first.
This is off by default, as it may cause a substantial
performance loss at sites with large hosts files.
Multiple entries for a host are allowed, and even required for some situations,
for example, to add mappings for IPv4 and IPv6 addreses for a host, as illustrated
by the example hosts file in the **hosts(5)** man page (http://man7.org/linux/man-pages/man5/hosts.5.html):
# The following lines are desirable for IPv4 capable hosts
127.0.0.1 localhost
# 127.0.1.1 is often used for the FQDN of the machine
127.0.1.1 thishost.mydomain.org thishost
192.168.1.10 foo.mydomain.org foo
192.168.1.13 bar.mydomain.org bar
146.82.138.7 master.debian.org master
209.237.226.90 www.opensource.org
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
This patch changes the intermediate storage format to use a `[]string`, and only
sorts entries if the input format in the compose file is a mapping. If the input
format is a list, the original sort-order is preserved.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-29 20:33:23 -04:00
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
return hosts
|
|
|
|
}
|
|
|
|
|
|
|
|
func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) {
|
|
|
|
if healthcheck == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
var (
|
2016-11-29 04:58:47 -05:00
|
|
|
timeout, interval, startPeriod time.Duration
|
|
|
|
retries int
|
2016-12-05 16:14:08 -05:00
|
|
|
)
|
|
|
|
if healthcheck.Disable {
|
|
|
|
if len(healthcheck.Test) != 0 {
|
2017-03-09 13:23:45 -05:00
|
|
|
return nil, errors.Errorf("test and disable can't be set at the same time")
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
return &container.HealthConfig{
|
|
|
|
Test: []string{"NONE"},
|
|
|
|
}, nil
|
|
|
|
|
|
|
|
}
|
2017-08-29 21:09:06 -04:00
|
|
|
if healthcheck.Timeout != nil {
|
2018-08-29 17:29:39 -04:00
|
|
|
timeout = time.Duration(*healthcheck.Timeout)
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
2017-08-29 21:09:06 -04:00
|
|
|
if healthcheck.Interval != nil {
|
2018-08-29 17:29:39 -04:00
|
|
|
interval = time.Duration(*healthcheck.Interval)
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
2017-08-29 21:09:06 -04:00
|
|
|
if healthcheck.StartPeriod != nil {
|
2018-08-29 17:29:39 -04:00
|
|
|
startPeriod = time.Duration(*healthcheck.StartPeriod)
|
2016-11-29 04:58:47 -05:00
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
if healthcheck.Retries != nil {
|
|
|
|
retries = int(*healthcheck.Retries)
|
|
|
|
}
|
|
|
|
return &container.HealthConfig{
|
2016-11-29 04:58:47 -05:00
|
|
|
Test: healthcheck.Test,
|
|
|
|
Timeout: timeout,
|
|
|
|
Interval: interval,
|
|
|
|
Retries: retries,
|
|
|
|
StartPeriod: startPeriod,
|
2016-12-05 16:14:08 -05:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) {
|
|
|
|
// TODO: log if restart is being ignored
|
|
|
|
if source == nil {
|
2017-06-05 18:23:21 -04:00
|
|
|
policy, err := opts.ParseRestartPolicy(restart)
|
2016-12-05 16:14:08 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case policy.IsNone():
|
|
|
|
return nil, nil
|
|
|
|
case policy.IsAlways(), policy.IsUnlessStopped():
|
|
|
|
return &swarm.RestartPolicy{
|
|
|
|
Condition: swarm.RestartPolicyConditionAny,
|
|
|
|
}, nil
|
|
|
|
case policy.IsOnFailure():
|
|
|
|
attempts := uint64(policy.MaximumRetryCount)
|
|
|
|
return &swarm.RestartPolicy{
|
|
|
|
Condition: swarm.RestartPolicyConditionOnFailure,
|
|
|
|
MaxAttempts: &attempts,
|
|
|
|
}, nil
|
|
|
|
default:
|
2017-03-09 13:23:45 -05:00
|
|
|
return nil, errors.Errorf("unknown restart policy: %s", restart)
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
}
|
2018-08-29 17:29:39 -04:00
|
|
|
|
2016-12-05 16:14:08 -05:00
|
|
|
return &swarm.RestartPolicy{
|
|
|
|
Condition: swarm.RestartPolicyCondition(source.Condition),
|
2018-08-29 17:29:39 -04:00
|
|
|
Delay: composetypes.ConvertDurationPtr(source.Delay),
|
2016-12-05 16:14:08 -05:00
|
|
|
MaxAttempts: source.MaxAttempts,
|
2018-08-29 17:29:39 -04:00
|
|
|
Window: composetypes.ConvertDurationPtr(source.Window),
|
2016-12-05 16:14:08 -05:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig {
|
|
|
|
if source == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
parallel := uint64(1)
|
|
|
|
if source.Parallelism != nil {
|
|
|
|
parallel = *source.Parallelism
|
|
|
|
}
|
|
|
|
return &swarm.UpdateConfig{
|
|
|
|
Parallelism: parallel,
|
2018-08-29 17:29:39 -04:00
|
|
|
Delay: time.Duration(source.Delay),
|
2016-12-05 16:14:08 -05:00
|
|
|
FailureAction: source.FailureAction,
|
2018-08-29 17:29:39 -04:00
|
|
|
Monitor: time.Duration(source.Monitor),
|
2016-12-05 16:14:08 -05:00
|
|
|
MaxFailureRatio: source.MaxFailureRatio,
|
2017-07-19 04:55:34 -04:00
|
|
|
Order: source.Order,
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) {
|
|
|
|
resources := &swarm.ResourceRequirements{}
|
2017-01-09 14:22:02 -05:00
|
|
|
var err error
|
2016-12-05 16:14:08 -05:00
|
|
|
if source.Limits != nil {
|
2017-01-09 14:22:02 -05:00
|
|
|
var cpus int64
|
|
|
|
if source.Limits.NanoCPUs != "" {
|
|
|
|
cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
2020-06-15 05:26:48 -04:00
|
|
|
resources.Limits = &swarm.Limit{
|
2016-12-05 16:14:08 -05:00
|
|
|
NanoCPUs: cpus,
|
|
|
|
MemoryBytes: int64(source.Limits.MemoryBytes),
|
2020-05-09 17:08:42 -04:00
|
|
|
Pids: source.Limits.Pids,
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if source.Reservations != nil {
|
2017-01-09 14:22:02 -05:00
|
|
|
var cpus int64
|
|
|
|
if source.Reservations.NanoCPUs != "" {
|
|
|
|
cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
2017-11-17 17:05:59 -05:00
|
|
|
|
|
|
|
var generic []swarm.GenericResource
|
|
|
|
for _, res := range source.Reservations.GenericResources {
|
|
|
|
var r swarm.GenericResource
|
|
|
|
|
|
|
|
if res.DiscreteResourceSpec != nil {
|
|
|
|
r.DiscreteResourceSpec = &swarm.DiscreteGenericResource{
|
|
|
|
Kind: res.DiscreteResourceSpec.Kind,
|
|
|
|
Value: res.DiscreteResourceSpec.Value,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
generic = append(generic, r)
|
|
|
|
}
|
|
|
|
|
2016-12-05 16:14:08 -05:00
|
|
|
resources.Reservations = &swarm.Resources{
|
2017-11-17 17:05:59 -05:00
|
|
|
NanoCPUs: cpus,
|
|
|
|
MemoryBytes: int64(source.Reservations.MemoryBytes),
|
|
|
|
GenericResources: generic,
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2019-04-02 07:40:25 -04:00
|
|
|
func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortConfig) *swarm.EndpointSpec {
|
2016-12-05 16:14:08 -05:00
|
|
|
portConfigs := []swarm.PortConfig{}
|
2017-01-31 15:45:45 -05:00
|
|
|
for _, port := range source {
|
|
|
|
portConfig := swarm.PortConfig{
|
|
|
|
Protocol: swarm.PortConfigProtocol(port.Protocol),
|
|
|
|
TargetPort: port.Target,
|
|
|
|
PublishedPort: port.Published,
|
|
|
|
PublishMode: swarm.PortConfigPublishMode(port.Mode),
|
2017-01-12 12:01:29 -05:00
|
|
|
}
|
2017-01-31 15:45:45 -05:00
|
|
|
portConfigs = append(portConfigs, portConfig)
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
|
2018-07-08 15:08:17 -04:00
|
|
|
sort.Slice(portConfigs, func(i, j int) bool {
|
|
|
|
return portConfigs[i].PublishedPort < portConfigs[j].PublishedPort
|
|
|
|
})
|
|
|
|
|
2017-02-17 00:34:49 -05:00
|
|
|
return &swarm.EndpointSpec{
|
|
|
|
Mode: swarm.ResolutionMode(strings.ToLower(endpointMode)),
|
|
|
|
Ports: portConfigs,
|
2019-04-02 07:40:25 -04:00
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
|
2017-03-14 12:39:26 -04:00
|
|
|
func convertEnvironment(source map[string]*string) []string {
|
2016-12-05 16:14:08 -05:00
|
|
|
var output []string
|
|
|
|
|
|
|
|
for name, value := range source {
|
2017-03-14 12:39:26 -04:00
|
|
|
switch value {
|
|
|
|
case nil:
|
|
|
|
output = append(output, name)
|
|
|
|
default:
|
|
|
|
output = append(output, fmt.Sprintf("%s=%s", name, *value))
|
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return output
|
|
|
|
}
|
|
|
|
|
|
|
|
func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) {
|
|
|
|
serviceMode := swarm.ServiceMode{}
|
|
|
|
|
|
|
|
switch mode {
|
2021-01-04 08:07:01 -05:00
|
|
|
case "global-job":
|
|
|
|
if replicas != nil {
|
|
|
|
return serviceMode, errors.Errorf("replicas can only be used with replicated or replicated-job mode")
|
|
|
|
}
|
|
|
|
serviceMode.GlobalJob = &swarm.GlobalJob{}
|
2016-12-05 16:14:08 -05:00
|
|
|
case "global":
|
|
|
|
if replicas != nil {
|
2021-01-04 08:07:01 -05:00
|
|
|
return serviceMode, errors.Errorf("replicas can only be used with replicated or replicated-job mode")
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
serviceMode.Global = &swarm.GlobalService{}
|
2021-01-04 08:07:01 -05:00
|
|
|
case "replicated-job":
|
|
|
|
serviceMode.ReplicatedJob = &swarm.ReplicatedJob{
|
|
|
|
MaxConcurrent: replicas,
|
|
|
|
TotalCompletions: replicas,
|
|
|
|
}
|
2016-12-05 16:14:08 -05:00
|
|
|
case "replicated", "":
|
|
|
|
serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
|
|
|
|
default:
|
2017-03-09 13:23:45 -05:00
|
|
|
return serviceMode, errors.Errorf("Unknown mode: %s", mode)
|
2016-12-05 16:14:08 -05:00
|
|
|
}
|
|
|
|
return serviceMode, nil
|
|
|
|
}
|
2017-03-23 19:38:17 -04:00
|
|
|
|
2019-04-02 07:40:25 -04:00
|
|
|
func convertDNSConfig(DNS []string, DNSSearch []string) *swarm.DNSConfig {
|
2017-03-23 19:38:17 -04:00
|
|
|
if DNS != nil || DNSSearch != nil {
|
|
|
|
return &swarm.DNSConfig{
|
|
|
|
Nameservers: DNS,
|
|
|
|
Search: DNSSearch,
|
2019-04-02 07:40:25 -04:00
|
|
|
}
|
2017-03-23 19:38:17 -04:00
|
|
|
}
|
2019-04-02 07:40:25 -04:00
|
|
|
return nil
|
2017-03-23 19:38:17 -04:00
|
|
|
}
|
2017-05-11 08:30:04 -04:00
|
|
|
|
2019-04-01 14:38:11 -04:00
|
|
|
func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpecConfig, refs []*swarm.ConfigReference) (*swarm.CredentialSpec, error) {
|
2019-02-02 10:35:26 -05:00
|
|
|
var o []string
|
|
|
|
|
|
|
|
// Config was added in API v1.40
|
|
|
|
if spec.Config != "" {
|
|
|
|
o = append(o, `"Config"`)
|
|
|
|
}
|
|
|
|
if spec.File != "" {
|
|
|
|
o = append(o, `"File"`)
|
2017-05-11 08:30:04 -04:00
|
|
|
}
|
2019-02-02 10:35:26 -05:00
|
|
|
if spec.Registry != "" {
|
|
|
|
o = append(o, `"Registry"`)
|
|
|
|
}
|
|
|
|
l := len(o)
|
|
|
|
switch {
|
|
|
|
case l == 0:
|
|
|
|
return nil, nil
|
|
|
|
case l == 2:
|
|
|
|
return nil, errors.Errorf("invalid credential spec: cannot specify both %s and %s", o[0], o[1])
|
|
|
|
case l > 2:
|
|
|
|
return nil, errors.Errorf("invalid credential spec: cannot specify both %s, and %s", strings.Join(o[:l-1], ", "), o[l-1])
|
2017-05-11 08:30:04 -04:00
|
|
|
}
|
2017-06-09 17:42:16 -04:00
|
|
|
swarmCredSpec := swarm.CredentialSpec(spec)
|
2019-04-01 14:38:11 -04:00
|
|
|
// if we're using a swarm Config for the credential spec, over-write it
|
|
|
|
// here with the config ID
|
|
|
|
if swarmCredSpec.Config != "" {
|
|
|
|
for _, config := range refs {
|
|
|
|
if swarmCredSpec.Config == config.ConfigName {
|
|
|
|
swarmCredSpec.Config = config.ConfigID
|
|
|
|
return &swarmCredSpec, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// if none of the configs match, try namespacing
|
|
|
|
for _, config := range refs {
|
|
|
|
if namespace.Scope(swarmCredSpec.Config) == config.ConfigName {
|
|
|
|
swarmCredSpec.Config = config.ConfigID
|
|
|
|
return &swarmCredSpec, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, errors.Errorf("invalid credential spec: spec specifies config %v, but no such config can be found", swarmCredSpec.Config)
|
|
|
|
}
|
2017-06-09 17:42:16 -04:00
|
|
|
return &swarmCredSpec, nil
|
2017-05-11 08:30:04 -04:00
|
|
|
}
|
2020-07-26 16:14:49 -04:00
|
|
|
|
|
|
|
func convertUlimits(origUlimits map[string]*composetypes.UlimitsConfig) []*units.Ulimit {
|
|
|
|
newUlimits := make(map[string]*units.Ulimit)
|
|
|
|
for name, u := range origUlimits {
|
|
|
|
if u.Single != 0 {
|
|
|
|
newUlimits[name] = &units.Ulimit{
|
|
|
|
Name: name,
|
|
|
|
Soft: int64(u.Single),
|
|
|
|
Hard: int64(u.Single),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
newUlimits[name] = &units.Ulimit{
|
|
|
|
Name: name,
|
|
|
|
Soft: int64(u.Soft),
|
|
|
|
Hard: int64(u.Hard),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var ulimits []*units.Ulimit
|
|
|
|
for _, ulimit := range newUlimits {
|
|
|
|
ulimits = append(ulimits, ulimit)
|
|
|
|
}
|
|
|
|
sort.SliceStable(ulimits, func(i, j int) bool {
|
|
|
|
return ulimits[i].Name < ulimits[j].Name
|
|
|
|
})
|
|
|
|
return ulimits
|
|
|
|
}
|