Add support for maximum replicas per node without stack

Signed-off-by: Olli Janatuinen <olli.janatuinen@gmail.com>
This commit is contained in:
Olli Janatuinen 2019-01-09 19:10:35 +02:00
parent db166da03a
commit f7f4d3bbb8
7 changed files with 83 additions and 3 deletions

View File

@ -49,6 +49,9 @@ Placement:
{{- if .TaskPlacementPreferences }} {{- if .TaskPlacementPreferences }}
Preferences: {{ .TaskPlacementPreferences }} Preferences: {{ .TaskPlacementPreferences }}
{{- end }} {{- end }}
{{- if .MaxReplicas }}
Max Replicas Per Node: {{ .MaxReplicas }}
{{- end }}
{{- if .HasUpdateConfig }} {{- if .HasUpdateConfig }}
UpdateConfig: UpdateConfig:
Parallelism: {{ .UpdateParallelism }} Parallelism: {{ .UpdateParallelism }}
@ -284,6 +287,13 @@ func (ctx *serviceInspectContext) TaskPlacementPreferences() []string {
return strings return strings
} }
func (ctx *serviceInspectContext) MaxReplicas() uint64 {
if ctx.Service.Spec.TaskTemplate.Placement != nil {
return ctx.Service.Spec.TaskTemplate.Placement.MaxReplicas
}
return 0
}
func (ctx *serviceInspectContext) HasUpdateConfig() bool { func (ctx *serviceInspectContext) HasUpdateConfig() bool {
return ctx.Service.Spec.UpdateConfig != nil return ctx.Service.Spec.UpdateConfig != nil
} }

View File

@ -120,9 +120,16 @@ func GetServicesStatus(services []swarm.Service, nodes []swarm.Node, tasks []swa
for _, service := range services { for _, service := range services {
info[service.ID] = ListInfo{} info[service.ID] = ListInfo{}
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
info[service.ID] = ListInfo{ if service.Spec.TaskTemplate.Placement != nil && service.Spec.TaskTemplate.Placement.MaxReplicas > 0 {
Mode: "replicated", info[service.ID] = ListInfo{
Replicas: fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas), Mode: "replicated",
Replicas: fmt.Sprintf("%d/%d (max %d per node)", running[service.ID], *service.Spec.Mode.Replicated.Replicas, service.Spec.TaskTemplate.Placement.MaxReplicas),
}
} else {
info[service.ID] = ListInfo{
Mode: "replicated",
Replicas: fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas),
}
} }
} else if service.Spec.Mode.Global != nil { } else if service.Spec.Mode.Global != nil {
info[service.ID] = ListInfo{ info[service.ID] = ListInfo{

View File

@ -500,6 +500,7 @@ type serviceOptions struct {
restartPolicy restartPolicyOptions restartPolicy restartPolicyOptions
constraints opts.ListOpts constraints opts.ListOpts
placementPrefs placementPrefOpts placementPrefs placementPrefOpts
maxReplicas uint64
update updateOptions update updateOptions
rollback updateOptions rollback updateOptions
networks opts.NetworkOpt networks opts.NetworkOpt
@ -541,6 +542,10 @@ func (options *serviceOptions) ToServiceMode() (swarm.ServiceMode, error) {
return serviceMode, errors.Errorf("replicas can only be used with replicated mode") return serviceMode, errors.Errorf("replicas can only be used with replicated mode")
} }
if options.maxReplicas > 0 {
return serviceMode, errors.New("replicas-max-per-node can only be used with replicated mode")
}
serviceMode.Global = &swarm.GlobalService{} serviceMode.Global = &swarm.GlobalService{}
case "replicated": case "replicated":
serviceMode.Replicated = &swarm.ReplicatedService{ serviceMode.Replicated = &swarm.ReplicatedService{
@ -645,6 +650,7 @@ func (options *serviceOptions) ToService(ctx context.Context, apiClient client.N
Placement: &swarm.Placement{ Placement: &swarm.Placement{
Constraints: options.constraints.GetAll(), Constraints: options.constraints.GetAll(),
Preferences: options.placementPrefs.prefs, Preferences: options.placementPrefs.prefs,
MaxReplicas: options.maxReplicas,
}, },
LogDriver: options.logDriver.toLogDriver(), LogDriver: options.logDriver.toLogDriver(),
}, },
@ -747,6 +753,8 @@ func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValu
flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)")) flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
flags.Var(&opts.replicas, flagReplicas, "Number of tasks") flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
flags.Uint64Var(&opts.maxReplicas, flagMaxReplicas, defaultFlagValues.getUint64(flagMaxReplicas), "Maximum number of tasks per node (default 0 = unlimited)")
flags.SetAnnotation(flagMaxReplicas, "version", []string{"1.40"})
flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none"|"on-failure"|"any")`)) flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none"|"on-failure"|"any")`))
flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)")) flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)"))
@ -853,6 +861,7 @@ const (
flagLabelAdd = "label-add" flagLabelAdd = "label-add"
flagLimitCPU = "limit-cpu" flagLimitCPU = "limit-cpu"
flagLimitMemory = "limit-memory" flagLimitMemory = "limit-memory"
flagMaxReplicas = "replicas-max-per-node"
flagMode = "mode" flagMode = "mode"
flagMount = "mount" flagMount = "mount"
flagMountRemove = "mount-rm" flagMountRemove = "mount-rm"

View File

@ -224,3 +224,12 @@ func TestToServiceUpdateRollback(t *testing.T) {
assert.Check(t, is.DeepEqual(service.UpdateConfig, expected.UpdateConfig)) assert.Check(t, is.DeepEqual(service.UpdateConfig, expected.UpdateConfig))
assert.Check(t, is.DeepEqual(service.RollbackConfig, expected.RollbackConfig)) assert.Check(t, is.DeepEqual(service.RollbackConfig, expected.RollbackConfig))
} }
func TestToServiceMaxReplicasGlobalModeConflict(t *testing.T) {
opt := serviceOptions{
mode: "global",
maxReplicas: 1,
}
_, err := opt.ToServiceMode()
assert.Error(t, err, "replicas-max-per-node can only be used with replicated mode")
}

View File

@ -387,6 +387,10 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
return err return err
} }
if anyChanged(flags, flagMaxReplicas) {
updateUint64(flagMaxReplicas, &task.Placement.MaxReplicas)
}
if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) { if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) {
if spec.UpdateConfig == nil { if spec.UpdateConfig == nil {
spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update) spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update)

View File

@ -808,3 +808,23 @@ func TestUpdateNetworks(t *testing.T) {
assert.NilError(t, err) assert.NilError(t, err)
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id999"}}, svc.TaskTemplate.Networks)) assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id999"}}, svc.TaskTemplate.Networks))
} }
func TestUpdateMaxReplicas(t *testing.T) {
ctx := context.Background()
svc := swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{
ContainerSpec: &swarm.ContainerSpec{},
Placement: &swarm.Placement{
MaxReplicas: 1,
},
},
}
flags := newUpdateCommand(nil).Flags()
flags.Set(flagMaxReplicas, "2")
err := updateService(ctx, nil, flags, &svc)
assert.NilError(t, err)
assert.DeepEqual(t, svc.TaskTemplate.Placement, &swarm.Placement{MaxReplicas: uint64(2)})
}

View File

@ -61,6 +61,7 @@ Options:
-q, --quiet Suppress progress output -q, --quiet Suppress progress output
--read-only Mount the container's root filesystem as read only --read-only Mount the container's root filesystem as read only
--replicas uint Number of tasks --replicas uint Number of tasks
--replicas-max-per-node uint Maximum number of tasks per node (default 0 = unlimited)
--reserve-cpu decimal Reserve CPUs --reserve-cpu decimal Reserve CPUs
--reserve-memory bytes Reserve Memory --reserve-memory bytes Reserve Memory
--restart-condition string Restart when condition is met ("none"|"on-failure"|"any") (default "any") --restart-condition string Restart when condition is met ("none"|"on-failure"|"any") (default "any")
@ -757,6 +758,26 @@ appends a new placement preference after all existing placement preferences.
`--placement-pref-rm` removes an existing placement preference that matches the `--placement-pref-rm` removes an existing placement preference that matches the
argument. argument.
### Specify maximum replicas per node (--replicas-max-per-node)
Use the `--replicas-max-per-node` flag to set the maximum number of replica tasks that can run on a node.
The following command creates a nginx service with 2 replica tasks but only one replica task per node.
One example where this can be useful is to balance tasks over a set of data centers together with `--placement-pref`
and let `--replicas-max-per-node` setting make sure that replicas are not migrated to another datacenter during
maintenance or datacenter failure.
The example below illustrates this:
```bash
$ docker service create \
--name nginx \
--replicas 2 \
--replicas-max-per-node 1 \
--placement-pref 'spread=node.labels.datacenter' \
nginx
```
### Attach a service to an existing network (--network) ### Attach a service to an existing network (--network)
You can use overlay networks to connect one or more services within the swarm. You can use overlay networks to connect one or more services within the swarm.