mirror of https://github.com/docker/cli.git
Add support for maximum replicas per node without stack
Signed-off-by: Olli Janatuinen <olli.janatuinen@gmail.com>
This commit is contained in:
parent
db166da03a
commit
f7f4d3bbb8
|
@ -49,6 +49,9 @@ Placement:
|
|||
{{- if .TaskPlacementPreferences }}
|
||||
Preferences: {{ .TaskPlacementPreferences }}
|
||||
{{- end }}
|
||||
{{- if .MaxReplicas }}
|
||||
Max Replicas Per Node: {{ .MaxReplicas }}
|
||||
{{- end }}
|
||||
{{- if .HasUpdateConfig }}
|
||||
UpdateConfig:
|
||||
Parallelism: {{ .UpdateParallelism }}
|
||||
|
@ -284,6 +287,13 @@ func (ctx *serviceInspectContext) TaskPlacementPreferences() []string {
|
|||
return strings
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) MaxReplicas() uint64 {
|
||||
if ctx.Service.Spec.TaskTemplate.Placement != nil {
|
||||
return ctx.Service.Spec.TaskTemplate.Placement.MaxReplicas
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasUpdateConfig() bool {
|
||||
return ctx.Service.Spec.UpdateConfig != nil
|
||||
}
|
||||
|
|
|
@ -120,9 +120,16 @@ func GetServicesStatus(services []swarm.Service, nodes []swarm.Node, tasks []swa
|
|||
for _, service := range services {
|
||||
info[service.ID] = ListInfo{}
|
||||
if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
|
||||
info[service.ID] = ListInfo{
|
||||
Mode: "replicated",
|
||||
Replicas: fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas),
|
||||
if service.Spec.TaskTemplate.Placement != nil && service.Spec.TaskTemplate.Placement.MaxReplicas > 0 {
|
||||
info[service.ID] = ListInfo{
|
||||
Mode: "replicated",
|
||||
Replicas: fmt.Sprintf("%d/%d (max %d per node)", running[service.ID], *service.Spec.Mode.Replicated.Replicas, service.Spec.TaskTemplate.Placement.MaxReplicas),
|
||||
}
|
||||
} else {
|
||||
info[service.ID] = ListInfo{
|
||||
Mode: "replicated",
|
||||
Replicas: fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas),
|
||||
}
|
||||
}
|
||||
} else if service.Spec.Mode.Global != nil {
|
||||
info[service.ID] = ListInfo{
|
||||
|
|
|
@ -500,6 +500,7 @@ type serviceOptions struct {
|
|||
restartPolicy restartPolicyOptions
|
||||
constraints opts.ListOpts
|
||||
placementPrefs placementPrefOpts
|
||||
maxReplicas uint64
|
||||
update updateOptions
|
||||
rollback updateOptions
|
||||
networks opts.NetworkOpt
|
||||
|
@ -541,6 +542,10 @@ func (options *serviceOptions) ToServiceMode() (swarm.ServiceMode, error) {
|
|||
return serviceMode, errors.Errorf("replicas can only be used with replicated mode")
|
||||
}
|
||||
|
||||
if options.maxReplicas > 0 {
|
||||
return serviceMode, errors.New("replicas-max-per-node can only be used with replicated mode")
|
||||
}
|
||||
|
||||
serviceMode.Global = &swarm.GlobalService{}
|
||||
case "replicated":
|
||||
serviceMode.Replicated = &swarm.ReplicatedService{
|
||||
|
@ -645,6 +650,7 @@ func (options *serviceOptions) ToService(ctx context.Context, apiClient client.N
|
|||
Placement: &swarm.Placement{
|
||||
Constraints: options.constraints.GetAll(),
|
||||
Preferences: options.placementPrefs.prefs,
|
||||
MaxReplicas: options.maxReplicas,
|
||||
},
|
||||
LogDriver: options.logDriver.toLogDriver(),
|
||||
},
|
||||
|
@ -747,6 +753,8 @@ func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValu
|
|||
|
||||
flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
|
||||
flags.Uint64Var(&opts.maxReplicas, flagMaxReplicas, defaultFlagValues.getUint64(flagMaxReplicas), "Maximum number of tasks per node (default 0 = unlimited)")
|
||||
flags.SetAnnotation(flagMaxReplicas, "version", []string{"1.40"})
|
||||
|
||||
flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none"|"on-failure"|"any")`))
|
||||
flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)"))
|
||||
|
@ -853,6 +861,7 @@ const (
|
|||
flagLabelAdd = "label-add"
|
||||
flagLimitCPU = "limit-cpu"
|
||||
flagLimitMemory = "limit-memory"
|
||||
flagMaxReplicas = "replicas-max-per-node"
|
||||
flagMode = "mode"
|
||||
flagMount = "mount"
|
||||
flagMountRemove = "mount-rm"
|
||||
|
|
|
@ -224,3 +224,12 @@ func TestToServiceUpdateRollback(t *testing.T) {
|
|||
assert.Check(t, is.DeepEqual(service.UpdateConfig, expected.UpdateConfig))
|
||||
assert.Check(t, is.DeepEqual(service.RollbackConfig, expected.RollbackConfig))
|
||||
}
|
||||
|
||||
func TestToServiceMaxReplicasGlobalModeConflict(t *testing.T) {
|
||||
opt := serviceOptions{
|
||||
mode: "global",
|
||||
maxReplicas: 1,
|
||||
}
|
||||
_, err := opt.ToServiceMode()
|
||||
assert.Error(t, err, "replicas-max-per-node can only be used with replicated mode")
|
||||
}
|
||||
|
|
|
@ -387,6 +387,10 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
|||
return err
|
||||
}
|
||||
|
||||
if anyChanged(flags, flagMaxReplicas) {
|
||||
updateUint64(flagMaxReplicas, &task.Placement.MaxReplicas)
|
||||
}
|
||||
|
||||
if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) {
|
||||
if spec.UpdateConfig == nil {
|
||||
spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update)
|
||||
|
|
|
@ -808,3 +808,23 @@ func TestUpdateNetworks(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual([]swarm.NetworkAttachmentConfig{{Target: "id999"}}, svc.TaskTemplate.Networks))
|
||||
}
|
||||
|
||||
func TestUpdateMaxReplicas(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
svc := swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
Placement: &swarm.Placement{
|
||||
MaxReplicas: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
flags.Set(flagMaxReplicas, "2")
|
||||
err := updateService(ctx, nil, flags, &svc)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.DeepEqual(t, svc.TaskTemplate.Placement, &swarm.Placement{MaxReplicas: uint64(2)})
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ Options:
|
|||
-q, --quiet Suppress progress output
|
||||
--read-only Mount the container's root filesystem as read only
|
||||
--replicas uint Number of tasks
|
||||
--replicas-max-per-node uint Maximum number of tasks per node (default 0 = unlimited)
|
||||
--reserve-cpu decimal Reserve CPUs
|
||||
--reserve-memory bytes Reserve Memory
|
||||
--restart-condition string Restart when condition is met ("none"|"on-failure"|"any") (default "any")
|
||||
|
@ -757,6 +758,26 @@ appends a new placement preference after all existing placement preferences.
|
|||
`--placement-pref-rm` removes an existing placement preference that matches the
|
||||
argument.
|
||||
|
||||
### Specify maximum replicas per node (--replicas-max-per-node)
|
||||
|
||||
Use the `--replicas-max-per-node` flag to set the maximum number of replica tasks that can run on a node.
|
||||
The following command creates a nginx service with 2 replica tasks but only one replica task per node.
|
||||
|
||||
One example where this can be useful is to balance tasks over a set of data centers together with `--placement-pref`
|
||||
and let `--replicas-max-per-node` setting make sure that replicas are not migrated to another datacenter during
|
||||
maintenance or datacenter failure.
|
||||
|
||||
The example below illustrates this:
|
||||
|
||||
```bash
|
||||
$ docker service create \
|
||||
--name nginx \
|
||||
--replicas 2 \
|
||||
--replicas-max-per-node 1 \
|
||||
--placement-pref 'spread=node.labels.datacenter' \
|
||||
nginx
|
||||
```
|
||||
|
||||
### Attach a service to an existing network (--network)
|
||||
|
||||
You can use overlay networks to connect one or more services within the swarm.
|
||||
|
|
Loading…
Reference in New Issue