2018-10-23 11:05:44 -04:00
|
|
|
package service
|
2016-07-25 15:24:34 -04:00
|
|
|
|
|
|
|
import (
|
2017-02-08 01:51:33 -05:00
|
|
|
"fmt"
|
Improve presentation of published port ranges
Port mappings in `docker service ls` are quite verbose, and occupy a lot of
space when ranges of ports are published.
This patch improves the output by reconstructing ranges of ports.
Given the following service;
$ docker service create \
-p 60-61:60-61 \
-p 62:61 \
-p 80:80 \
-p 81:80 \
-p 90-95:90-95 \
-p 90-92:90-92/udp \
-p 93-96:93-96/udp \
--name foo \
nginx:alpine
Before this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60->60/tcp,*:61->61/tcp,*:62->61/tcp,*:80->80/tcp,*:81->80/tcp,*:90->90/tcp,*:91->91/tcp,*:92->92/tcp,*:93->93/tcp,*:94->94/tcp,*:95->95/tcp,*:90->90/udp,*:91->91/udp,*:92->92/udp,*:93->93/udp,*:94->94/udp,*:95->95/udp,*:96->96/udp
After this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60-62->60-61/tcp,*:80-81->80/tcp,*:90-95->90-95/tcp,*:90-96->90-96/udp
Additional enhancements can still be made, and marked as TODO in this change;
- combine non-consecutive ports mapped to a single port (`80->80`, `81->80`,
`84->80`, `86->80`, `87->80`); to be printed as `*:80-81,84,86-87->80`.
- combine `tcp` and `udp` mappings if their port-mapping is the same;
print `*:80-81->80-81/tcp+udp` instead of `*:80-81->80-81/tcp, *:80-81->80-81/udp`
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-02 06:50:52 -04:00
|
|
|
"sort"
|
2016-07-25 15:24:34 -04:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2018-10-23 11:05:44 -04:00
|
|
|
"github.com/docker/cli/cli/command/formatter"
|
2017-04-17 18:07:56 -04:00
|
|
|
"github.com/docker/cli/cli/command/inspect"
|
2017-01-25 19:54:18 -05:00
|
|
|
"github.com/docker/distribution/reference"
|
2017-04-07 19:37:03 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
2019-03-12 21:40:05 -04:00
|
|
|
"github.com/docker/docker/api/types/container"
|
2016-07-25 15:24:34 -04:00
|
|
|
mounttypes "github.com/docker/docker/api/types/mount"
|
|
|
|
"github.com/docker/docker/api/types/swarm"
|
2017-01-26 16:08:07 -05:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2016-07-25 15:24:34 -04:00
|
|
|
units "github.com/docker/go-units"
|
2017-03-09 13:23:45 -05:00
|
|
|
"github.com/pkg/errors"
|
2016-07-25 15:24:34 -04:00
|
|
|
)
|
|
|
|
|
2018-10-23 11:05:44 -04:00
|
|
|
const serviceInspectPrettyTemplate formatter.Format = `
|
2016-07-25 15:24:34 -04:00
|
|
|
ID: {{.ID}}
|
|
|
|
Name: {{.Name}}
|
|
|
|
{{- if .Labels }}
|
|
|
|
Labels:
|
|
|
|
{{- range $k, $v := .Labels }}
|
|
|
|
{{ $k }}{{if $v }}={{ $v }}{{ end }}
|
|
|
|
{{- end }}{{ end }}
|
2016-09-25 04:47:45 -04:00
|
|
|
Service Mode:
|
|
|
|
{{- if .IsModeGlobal }} Global
|
|
|
|
{{- else if .IsModeReplicated }} Replicated
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .ModeReplicatedReplicas }}
|
|
|
|
Replicas: {{ .ModeReplicatedReplicas }}
|
|
|
|
{{- end }}{{ end }}
|
|
|
|
{{- if .HasUpdateStatus }}
|
|
|
|
UpdateStatus:
|
|
|
|
State: {{ .UpdateStatusState }}
|
2016-12-01 17:08:06 -05:00
|
|
|
{{- if .HasUpdateStatusStarted }}
|
2016-07-25 15:24:34 -04:00
|
|
|
Started: {{ .UpdateStatusStarted }}
|
2016-12-01 17:08:06 -05:00
|
|
|
{{- end }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .UpdateIsCompleted }}
|
|
|
|
Completed: {{ .UpdateStatusCompleted }}
|
|
|
|
{{- end }}
|
|
|
|
Message: {{ .UpdateStatusMessage }}
|
|
|
|
{{- end }}
|
|
|
|
Placement:
|
2017-01-19 18:27:37 -05:00
|
|
|
{{- if .TaskPlacementConstraints }}
|
2017-02-25 22:51:03 -05:00
|
|
|
Constraints: {{ .TaskPlacementConstraints }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- end }}
|
2017-01-19 18:27:37 -05:00
|
|
|
{{- if .TaskPlacementPreferences }}
|
|
|
|
Preferences: {{ .TaskPlacementPreferences }}
|
|
|
|
{{- end }}
|
2019-01-09 12:10:35 -05:00
|
|
|
{{- if .MaxReplicas }}
|
|
|
|
Max Replicas Per Node: {{ .MaxReplicas }}
|
|
|
|
{{- end }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .HasUpdateConfig }}
|
|
|
|
UpdateConfig:
|
|
|
|
Parallelism: {{ .UpdateParallelism }}
|
2016-09-02 17:12:05 -04:00
|
|
|
{{- if .HasUpdateDelay}}
|
2016-07-25 15:24:34 -04:00
|
|
|
Delay: {{ .UpdateDelay }}
|
|
|
|
{{- end }}
|
|
|
|
On failure: {{ .UpdateOnFailure }}
|
2016-09-02 17:12:05 -04:00
|
|
|
{{- if .HasUpdateMonitor}}
|
|
|
|
Monitoring Period: {{ .UpdateMonitor }}
|
|
|
|
{{- end }}
|
|
|
|
Max failure ratio: {{ .UpdateMaxFailureRatio }}
|
2017-01-18 14:38:19 -05:00
|
|
|
Update order: {{ .UpdateOrder }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- end }}
|
2017-02-15 19:04:30 -05:00
|
|
|
{{- if .HasRollbackConfig }}
|
|
|
|
RollbackConfig:
|
|
|
|
Parallelism: {{ .RollbackParallelism }}
|
|
|
|
{{- if .HasRollbackDelay}}
|
|
|
|
Delay: {{ .RollbackDelay }}
|
|
|
|
{{- end }}
|
|
|
|
On failure: {{ .RollbackOnFailure }}
|
|
|
|
{{- if .HasRollbackMonitor}}
|
|
|
|
Monitoring Period: {{ .RollbackMonitor }}
|
|
|
|
{{- end }}
|
|
|
|
Max failure ratio: {{ .RollbackMaxFailureRatio }}
|
2017-01-18 14:38:19 -05:00
|
|
|
Rollback order: {{ .RollbackOrder }}
|
2017-02-15 19:04:30 -05:00
|
|
|
{{- end }}
|
2016-07-25 15:24:34 -04:00
|
|
|
ContainerSpec:
|
|
|
|
Image: {{ .ContainerImage }}
|
|
|
|
{{- if .ContainerArgs }}
|
|
|
|
Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }}
|
|
|
|
{{- end -}}
|
|
|
|
{{- if .ContainerEnv }}
|
|
|
|
Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }}
|
|
|
|
{{- end -}}
|
|
|
|
{{- if .ContainerWorkDir }}
|
|
|
|
Dir: {{ .ContainerWorkDir }}
|
|
|
|
{{- end -}}
|
2018-06-14 07:50:12 -04:00
|
|
|
{{- if .HasContainerInit }}
|
|
|
|
Init: {{ .ContainerInit }}
|
|
|
|
{{- end -}}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .ContainerUser }}
|
|
|
|
User: {{ .ContainerUser }}
|
|
|
|
{{- end }}
|
2019-02-12 10:07:07 -05:00
|
|
|
{{- if .ContainerSysCtls }}
|
|
|
|
SysCtls:
|
|
|
|
{{- range $k, $v := .ContainerSysCtls }}
|
|
|
|
{{ $k }}{{if $v }}: {{ $v }}{{ end }}
|
|
|
|
{{- end }}{{ end }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .ContainerMounts }}
|
|
|
|
Mounts:
|
|
|
|
{{- end }}
|
|
|
|
{{- range $mount := .ContainerMounts }}
|
2018-05-03 14:16:08 -04:00
|
|
|
Target: {{ $mount.Target }}
|
|
|
|
Source: {{ $mount.Source }}
|
|
|
|
ReadOnly: {{ $mount.ReadOnly }}
|
|
|
|
Type: {{ $mount.Type }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- end -}}
|
2018-04-15 13:38:59 -04:00
|
|
|
{{- if .Configs}}
|
|
|
|
Configs:
|
|
|
|
{{- range $config := .Configs }}
|
|
|
|
Target: {{$config.File.Name}}
|
|
|
|
Source: {{$config.ConfigName}}
|
|
|
|
{{- end }}{{ end }}
|
|
|
|
{{- if .Secrets }}
|
|
|
|
Secrets:
|
|
|
|
{{- range $secret := .Secrets }}
|
|
|
|
Target: {{$secret.File.Name}}
|
|
|
|
Source: {{$secret.SecretName}}
|
|
|
|
{{- end }}{{ end }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .HasResources }}
|
|
|
|
Resources:
|
|
|
|
{{- if .HasResourceReservations }}
|
|
|
|
Reservations:
|
|
|
|
{{- if gt .ResourceReservationNanoCPUs 0.0 }}
|
|
|
|
CPU: {{ .ResourceReservationNanoCPUs }}
|
|
|
|
{{- end }}
|
|
|
|
{{- if .ResourceReservationMemory }}
|
|
|
|
Memory: {{ .ResourceReservationMemory }}
|
2016-09-26 05:12:24 -04:00
|
|
|
{{- end }}{{ end }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .HasResourceLimits }}
|
|
|
|
Limits:
|
|
|
|
{{- if gt .ResourceLimitsNanoCPUs 0.0 }}
|
|
|
|
CPU: {{ .ResourceLimitsNanoCPUs }}
|
|
|
|
{{- end }}
|
|
|
|
{{- if .ResourceLimitMemory }}
|
|
|
|
Memory: {{ .ResourceLimitMemory }}
|
2016-09-26 05:12:24 -04:00
|
|
|
{{- end }}{{ end }}{{ end }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .Networks }}
|
|
|
|
Networks:
|
|
|
|
{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }}
|
2016-09-25 04:47:45 -04:00
|
|
|
Endpoint Mode: {{ .EndpointMode }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- if .Ports }}
|
|
|
|
Ports:
|
|
|
|
{{- range $port := .Ports }}
|
2017-02-10 01:53:18 -05:00
|
|
|
PublishedPort = {{ $port.PublishedPort }}
|
2016-07-25 15:24:34 -04:00
|
|
|
Protocol = {{ $port.Protocol }}
|
|
|
|
TargetPort = {{ $port.TargetPort }}
|
2017-01-05 14:21:22 -05:00
|
|
|
PublishMode = {{ $port.PublishMode }}
|
2016-07-25 15:24:34 -04:00
|
|
|
{{- end }} {{ end -}}
|
2019-03-12 21:40:05 -04:00
|
|
|
{{- if .Healthcheck }}
|
|
|
|
Healthcheck:
|
|
|
|
Interval = {{ .Healthcheck.Interval }}
|
|
|
|
Retries = {{ .Healthcheck.Retries }}
|
|
|
|
StartPeriod = {{ .Healthcheck.StartPeriod }}
|
|
|
|
Timeout = {{ .Healthcheck.Timeout }}
|
|
|
|
{{- if .Healthcheck.Test }}
|
|
|
|
Tests:
|
|
|
|
{{- range $test := .Healthcheck.Test }}
|
|
|
|
Test = {{ $test }}
|
|
|
|
{{- end }} {{ end -}}
|
|
|
|
{{- end }}
|
2016-07-25 15:24:34 -04:00
|
|
|
`
|
|
|
|
|
2018-10-23 11:05:44 -04:00
|
|
|
// NewFormat returns a Format for rendering using a Context
|
|
|
|
func NewFormat(source string) formatter.Format {
|
2016-07-25 15:24:34 -04:00
|
|
|
switch source {
|
2018-10-23 11:05:44 -04:00
|
|
|
case formatter.PrettyFormatKey:
|
2016-07-25 15:24:34 -04:00
|
|
|
return serviceInspectPrettyTemplate
|
|
|
|
default:
|
2018-10-23 11:05:44 -04:00
|
|
|
return formatter.Format(strings.TrimPrefix(source, formatter.RawFormatKey))
|
2016-07-25 15:24:34 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-07 19:37:03 -04:00
|
|
|
func resolveNetworks(service swarm.Service, getNetwork inspect.GetRefFunc) map[string]string {
|
|
|
|
networkNames := make(map[string]string)
|
|
|
|
for _, network := range service.Spec.TaskTemplate.Networks {
|
|
|
|
if resolved, _, err := getNetwork(network.Target); err == nil {
|
|
|
|
if resolvedNetwork, ok := resolved.(types.NetworkResource); ok {
|
|
|
|
networkNames[resolvedNetwork.ID] = resolvedNetwork.Name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return networkNames
|
|
|
|
}
|
|
|
|
|
2018-10-23 11:05:44 -04:00
|
|
|
// InspectFormatWrite renders the context for a list of services
|
|
|
|
func InspectFormatWrite(ctx formatter.Context, refs []string, getRef, getNetwork inspect.GetRefFunc) error {
|
2016-07-25 15:24:34 -04:00
|
|
|
if ctx.Format != serviceInspectPrettyTemplate {
|
|
|
|
return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef)
|
|
|
|
}
|
2018-10-23 11:05:44 -04:00
|
|
|
render := func(format func(subContext formatter.SubContext) error) error {
|
2016-07-25 15:24:34 -04:00
|
|
|
for _, ref := range refs {
|
|
|
|
serviceI, _, err := getRef(ref)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
service, ok := serviceI.(swarm.Service)
|
|
|
|
if !ok {
|
2017-03-09 13:23:45 -05:00
|
|
|
return errors.Errorf("got wrong object to inspect")
|
2016-07-25 15:24:34 -04:00
|
|
|
}
|
2017-04-07 19:37:03 -04:00
|
|
|
if err := format(&serviceInspectContext{Service: service, networkNames: resolveNetworks(service, getNetwork)}); err != nil {
|
2016-07-25 15:24:34 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ctx.Write(&serviceInspectContext{}, render)
|
|
|
|
}
|
|
|
|
|
|
|
|
type serviceInspectContext struct {
|
|
|
|
swarm.Service
|
2018-10-23 11:05:44 -04:00
|
|
|
formatter.SubContext
|
2017-04-07 19:37:03 -04:00
|
|
|
|
|
|
|
// networkNames is a map from network IDs (as found in
|
|
|
|
// Networks[x].Target) to network names.
|
|
|
|
networkNames map[string]string
|
2016-07-25 15:24:34 -04:00
|
|
|
}
|
|
|
|
|
2016-09-13 03:01:31 -04:00
|
|
|
func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) {
|
2018-10-23 11:05:44 -04:00
|
|
|
return formatter.MarshalJSON(ctx)
|
2016-09-13 03:01:31 -04:00
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) ID() string {
|
|
|
|
return ctx.Service.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) Name() string {
|
|
|
|
return ctx.Service.Spec.Name
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) Labels() map[string]string {
|
|
|
|
return ctx.Service.Spec.Labels
|
|
|
|
}
|
|
|
|
|
2018-04-15 13:38:59 -04:00
|
|
|
func (ctx *serviceInspectContext) Configs() []*swarm.ConfigReference {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Configs
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) Secrets() []*swarm.SecretReference {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Secrets
|
|
|
|
}
|
|
|
|
|
2019-03-12 21:40:05 -04:00
|
|
|
func (ctx *serviceInspectContext) Healthcheck() *container.HealthConfig {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Healthcheck
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) IsModeGlobal() bool {
|
|
|
|
return ctx.Service.Spec.Mode.Global != nil
|
|
|
|
}
|
|
|
|
|
2016-09-26 05:12:24 -04:00
|
|
|
func (ctx *serviceInspectContext) IsModeReplicated() bool {
|
|
|
|
return ctx.Service.Spec.Mode.Replicated != nil
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 {
|
|
|
|
return ctx.Service.Spec.Mode.Replicated.Replicas
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) HasUpdateStatus() bool {
|
2016-12-01 17:08:06 -05:00
|
|
|
return ctx.Service.UpdateStatus != nil && ctx.Service.UpdateStatus.State != ""
|
2016-07-25 15:24:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState {
|
|
|
|
return ctx.Service.UpdateStatus.State
|
|
|
|
}
|
|
|
|
|
2016-12-01 17:08:06 -05:00
|
|
|
func (ctx *serviceInspectContext) HasUpdateStatusStarted() bool {
|
|
|
|
return ctx.Service.UpdateStatus.StartedAt != nil
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) UpdateStatusStarted() string {
|
2017-04-14 13:44:24 -04:00
|
|
|
return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.StartedAt)) + " ago"
|
2016-07-25 15:24:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateIsCompleted() bool {
|
2016-12-01 17:08:06 -05:00
|
|
|
return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted && ctx.Service.UpdateStatus.CompletedAt != nil
|
2016-07-25 15:24:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateStatusCompleted() string {
|
2017-04-14 13:44:24 -04:00
|
|
|
return units.HumanDuration(time.Since(*ctx.Service.UpdateStatus.CompletedAt)) + " ago"
|
2016-07-25 15:24:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateStatusMessage() string {
|
|
|
|
return ctx.Service.UpdateStatus.Message
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) TaskPlacementConstraints() []string {
|
|
|
|
if ctx.Service.Spec.TaskTemplate.Placement != nil {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.Placement.Constraints
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-19 18:27:37 -05:00
|
|
|
func (ctx *serviceInspectContext) TaskPlacementPreferences() []string {
|
|
|
|
if ctx.Service.Spec.TaskTemplate.Placement == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var strings []string
|
|
|
|
for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences {
|
|
|
|
if pref.Spread != nil {
|
|
|
|
strings = append(strings, "spread="+pref.Spread.SpreadDescriptor)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strings
|
|
|
|
}
|
|
|
|
|
2019-01-09 12:10:35 -05:00
|
|
|
func (ctx *serviceInspectContext) MaxReplicas() uint64 {
|
|
|
|
if ctx.Service.Spec.TaskTemplate.Placement != nil {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.Placement.MaxReplicas
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) HasUpdateConfig() bool {
|
|
|
|
return ctx.Service.Spec.UpdateConfig != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateParallelism() uint64 {
|
|
|
|
return ctx.Service.Spec.UpdateConfig.Parallelism
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) HasUpdateDelay() bool {
|
|
|
|
return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateDelay() time.Duration {
|
|
|
|
return ctx.Service.Spec.UpdateConfig.Delay
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateOnFailure() string {
|
|
|
|
return ctx.Service.Spec.UpdateConfig.FailureAction
|
|
|
|
}
|
|
|
|
|
2017-01-18 14:38:19 -05:00
|
|
|
func (ctx *serviceInspectContext) UpdateOrder() string {
|
|
|
|
return ctx.Service.Spec.UpdateConfig.Order
|
|
|
|
}
|
|
|
|
|
2016-09-02 17:12:05 -04:00
|
|
|
func (ctx *serviceInspectContext) HasUpdateMonitor() bool {
|
|
|
|
return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateMonitor() time.Duration {
|
|
|
|
return ctx.Service.Spec.UpdateConfig.Monitor
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 {
|
|
|
|
return ctx.Service.Spec.UpdateConfig.MaxFailureRatio
|
|
|
|
}
|
|
|
|
|
2017-02-15 19:04:30 -05:00
|
|
|
func (ctx *serviceInspectContext) HasRollbackConfig() bool {
|
|
|
|
return ctx.Service.Spec.RollbackConfig != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) RollbackParallelism() uint64 {
|
|
|
|
return ctx.Service.Spec.RollbackConfig.Parallelism
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) HasRollbackDelay() bool {
|
|
|
|
return ctx.Service.Spec.RollbackConfig.Delay.Nanoseconds() > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) RollbackDelay() time.Duration {
|
|
|
|
return ctx.Service.Spec.RollbackConfig.Delay
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) RollbackOnFailure() string {
|
|
|
|
return ctx.Service.Spec.RollbackConfig.FailureAction
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) HasRollbackMonitor() bool {
|
|
|
|
return ctx.Service.Spec.RollbackConfig.Monitor.Nanoseconds() > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) RollbackMonitor() time.Duration {
|
|
|
|
return ctx.Service.Spec.RollbackConfig.Monitor
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 {
|
|
|
|
return ctx.Service.Spec.RollbackConfig.MaxFailureRatio
|
|
|
|
}
|
|
|
|
|
2017-01-18 14:38:19 -05:00
|
|
|
func (ctx *serviceInspectContext) RollbackOrder() string {
|
|
|
|
return ctx.Service.Spec.RollbackConfig.Order
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) ContainerImage() string {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ContainerArgs() []string {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ContainerEnv() []string {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ContainerWorkDir() string {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ContainerUser() string {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.User
|
|
|
|
}
|
|
|
|
|
2018-06-14 07:50:12 -04:00
|
|
|
func (ctx *serviceInspectContext) HasContainerInit() bool {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Init != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ContainerInit() bool {
|
|
|
|
return *ctx.Service.Spec.TaskTemplate.ContainerSpec.Init
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts
|
|
|
|
}
|
|
|
|
|
2019-02-12 10:07:07 -05:00
|
|
|
func (ctx *serviceInspectContext) ContainerSysCtls() map[string]string {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.ContainerSpec.Sysctls
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) HasContainerSysCtls() bool {
|
|
|
|
return len(ctx.Service.Spec.TaskTemplate.ContainerSpec.Sysctls) > 0
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) HasResources() bool {
|
|
|
|
return ctx.Service.Spec.TaskTemplate.Resources != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) HasResourceReservations() bool {
|
2016-11-16 19:46:31 -05:00
|
|
|
if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil {
|
|
|
|
return false
|
|
|
|
}
|
2016-07-25 15:24:34 -04:00
|
|
|
return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 {
|
|
|
|
if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 {
|
|
|
|
return float64(0)
|
|
|
|
}
|
|
|
|
return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ResourceReservationMemory() string {
|
|
|
|
if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) HasResourceLimits() bool {
|
2016-11-16 19:46:31 -05:00
|
|
|
if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil {
|
|
|
|
return false
|
|
|
|
}
|
2016-07-25 15:24:34 -04:00
|
|
|
return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 {
|
|
|
|
return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) ResourceLimitMemory() string {
|
|
|
|
if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *serviceInspectContext) Networks() []string {
|
|
|
|
var out []string
|
2017-04-07 19:37:03 -04:00
|
|
|
for _, n := range ctx.Service.Spec.TaskTemplate.Networks {
|
|
|
|
if name, ok := ctx.networkNames[n.Target]; ok {
|
|
|
|
out = append(out, name)
|
|
|
|
} else {
|
|
|
|
out = append(out, n.Target)
|
|
|
|
}
|
2016-07-25 15:24:34 -04:00
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
2016-09-25 04:47:45 -04:00
|
|
|
func (ctx *serviceInspectContext) EndpointMode() string {
|
|
|
|
if ctx.Service.Spec.EndpointSpec == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
return string(ctx.Service.Spec.EndpointSpec.Mode)
|
|
|
|
}
|
|
|
|
|
2016-07-25 15:24:34 -04:00
|
|
|
func (ctx *serviceInspectContext) Ports() []swarm.PortConfig {
|
|
|
|
return ctx.Service.Endpoint.Ports
|
|
|
|
}
|
2017-01-26 16:08:07 -05:00
|
|
|
|
|
|
|
const (
|
2017-02-08 01:51:33 -05:00
|
|
|
defaultServiceTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Mode}}\t{{.Replicas}}\t{{.Image}}\t{{.Ports}}"
|
2017-01-26 16:08:07 -05:00
|
|
|
|
|
|
|
serviceIDHeader = "ID"
|
|
|
|
modeHeader = "MODE"
|
|
|
|
replicasHeader = "REPLICAS"
|
|
|
|
)
|
|
|
|
|
2018-10-23 11:05:44 -04:00
|
|
|
// NewListFormat returns a Format for rendering using a service Context
|
|
|
|
func NewListFormat(source string, quiet bool) formatter.Format {
|
2017-01-26 16:08:07 -05:00
|
|
|
switch source {
|
2018-10-23 11:05:44 -04:00
|
|
|
case formatter.TableFormatKey:
|
2017-01-26 16:08:07 -05:00
|
|
|
if quiet {
|
2018-10-23 11:05:44 -04:00
|
|
|
return formatter.DefaultQuietFormat
|
2017-01-26 16:08:07 -05:00
|
|
|
}
|
|
|
|
return defaultServiceTableFormat
|
2018-10-23 11:05:44 -04:00
|
|
|
case formatter.RawFormatKey:
|
2017-01-26 16:08:07 -05:00
|
|
|
if quiet {
|
|
|
|
return `id: {{.ID}}`
|
|
|
|
}
|
2017-02-08 01:51:33 -05:00
|
|
|
return `id: {{.ID}}\nname: {{.Name}}\nmode: {{.Mode}}\nreplicas: {{.Replicas}}\nimage: {{.Image}}\nports: {{.Ports}}\n`
|
2017-01-26 16:08:07 -05:00
|
|
|
}
|
2018-10-23 11:05:44 -04:00
|
|
|
return formatter.Format(source)
|
2017-01-26 16:08:07 -05:00
|
|
|
}
|
|
|
|
|
2018-10-23 11:05:44 -04:00
|
|
|
// ListInfo stores the information about mode and replicas to be used by template
|
|
|
|
type ListInfo struct {
|
2017-01-26 16:08:07 -05:00
|
|
|
Mode string
|
|
|
|
Replicas string
|
|
|
|
}
|
|
|
|
|
2018-10-23 11:05:44 -04:00
|
|
|
// ListFormatWrite writes the context
|
|
|
|
func ListFormatWrite(ctx formatter.Context, services []swarm.Service, info map[string]ListInfo) error {
|
|
|
|
render := func(format func(subContext formatter.SubContext) error) error {
|
2017-01-26 16:08:07 -05:00
|
|
|
for _, service := range services {
|
|
|
|
serviceCtx := &serviceContext{service: service, mode: info[service.ID].Mode, replicas: info[service.ID].Replicas}
|
|
|
|
if err := format(serviceCtx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-02-03 19:48:46 -05:00
|
|
|
serviceCtx := serviceContext{}
|
2018-10-23 11:05:44 -04:00
|
|
|
serviceCtx.Header = formatter.SubHeaderContext{
|
2017-02-03 19:48:46 -05:00
|
|
|
"ID": serviceIDHeader,
|
2018-10-23 11:05:44 -04:00
|
|
|
"Name": formatter.NameHeader,
|
2017-02-03 19:48:46 -05:00
|
|
|
"Mode": modeHeader,
|
|
|
|
"Replicas": replicasHeader,
|
2018-10-23 11:05:44 -04:00
|
|
|
"Image": formatter.ImageHeader,
|
|
|
|
"Ports": formatter.PortsHeader,
|
2017-02-03 19:48:46 -05:00
|
|
|
}
|
|
|
|
return ctx.Write(&serviceCtx, render)
|
2017-01-26 16:08:07 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
type serviceContext struct {
|
2018-10-23 11:05:44 -04:00
|
|
|
formatter.HeaderContext
|
2017-01-26 16:08:07 -05:00
|
|
|
service swarm.Service
|
|
|
|
mode string
|
|
|
|
replicas string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *serviceContext) MarshalJSON() ([]byte, error) {
|
2018-10-23 11:05:44 -04:00
|
|
|
return formatter.MarshalJSON(c)
|
2017-01-26 16:08:07 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *serviceContext) ID() string {
|
|
|
|
return stringid.TruncateID(c.service.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *serviceContext) Name() string {
|
|
|
|
return c.service.Spec.Name
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *serviceContext) Mode() string {
|
|
|
|
return c.mode
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *serviceContext) Replicas() string {
|
|
|
|
return c.replicas
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *serviceContext) Image() string {
|
2017-08-07 05:52:40 -04:00
|
|
|
var image string
|
|
|
|
if c.service.Spec.TaskTemplate.ContainerSpec != nil {
|
|
|
|
image = c.service.Spec.TaskTemplate.ContainerSpec.Image
|
|
|
|
}
|
2017-01-25 19:54:18 -05:00
|
|
|
if ref, err := reference.ParseNormalizedNamed(image); err == nil {
|
|
|
|
// update image string for display, (strips any digest)
|
|
|
|
if nt, ok := ref.(reference.NamedTagged); ok {
|
|
|
|
if namedTagged, err := reference.WithTag(reference.TrimNamed(nt), nt.Tag()); err == nil {
|
|
|
|
image = reference.FamiliarString(namedTagged)
|
|
|
|
}
|
2017-01-26 16:08:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return image
|
|
|
|
}
|
2017-02-08 01:51:33 -05:00
|
|
|
|
Improve presentation of published port ranges
Port mappings in `docker service ls` are quite verbose, and occupy a lot of
space when ranges of ports are published.
This patch improves the output by reconstructing ranges of ports.
Given the following service;
$ docker service create \
-p 60-61:60-61 \
-p 62:61 \
-p 80:80 \
-p 81:80 \
-p 90-95:90-95 \
-p 90-92:90-92/udp \
-p 93-96:93-96/udp \
--name foo \
nginx:alpine
Before this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60->60/tcp,*:61->61/tcp,*:62->61/tcp,*:80->80/tcp,*:81->80/tcp,*:90->90/tcp,*:91->91/tcp,*:92->92/tcp,*:93->93/tcp,*:94->94/tcp,*:95->95/tcp,*:90->90/udp,*:91->91/udp,*:92->92/udp,*:93->93/udp,*:94->94/udp,*:95->95/udp,*:96->96/udp
After this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60-62->60-61/tcp,*:80-81->80/tcp,*:90-95->90-95/tcp,*:90-96->90-96/udp
Additional enhancements can still be made, and marked as TODO in this change;
- combine non-consecutive ports mapped to a single port (`80->80`, `81->80`,
`84->80`, `86->80`, `87->80`); to be printed as `*:80-81,84,86-87->80`.
- combine `tcp` and `udp` mappings if their port-mapping is the same;
print `*:80-81->80-81/tcp+udp` instead of `*:80-81->80-81/tcp, *:80-81->80-81/udp`
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-02 06:50:52 -04:00
|
|
|
type portRange struct {
|
|
|
|
pStart uint32
|
|
|
|
pEnd uint32
|
|
|
|
tStart uint32
|
|
|
|
tEnd uint32
|
|
|
|
protocol swarm.PortConfigProtocol
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pr portRange) String() string {
|
|
|
|
var (
|
|
|
|
pub string
|
|
|
|
tgt string
|
|
|
|
)
|
|
|
|
|
|
|
|
if pr.pEnd > pr.pStart {
|
|
|
|
pub = fmt.Sprintf("%d-%d", pr.pStart, pr.pEnd)
|
|
|
|
} else {
|
|
|
|
pub = fmt.Sprintf("%d", pr.pStart)
|
|
|
|
}
|
|
|
|
if pr.tEnd > pr.tStart {
|
|
|
|
tgt = fmt.Sprintf("%d-%d", pr.tStart, pr.tEnd)
|
|
|
|
} else {
|
|
|
|
tgt = fmt.Sprintf("%d", pr.tStart)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("*:%s->%s/%s", pub, tgt, pr.protocol)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ports formats published ports on the ingress network for output.
|
|
|
|
//
|
|
|
|
// Where possible, ranges are grouped to produce a compact output:
|
|
|
|
// - multiple ports mapped to a single port (80->80, 81->80); is formatted as *:80-81->80
|
|
|
|
// - multiple consecutive ports on both sides; (80->80, 81->81) are formatted as: *:80-81->80-81
|
|
|
|
//
|
|
|
|
// The above should not be grouped together, i.e.:
|
|
|
|
// - 80->80, 81->81, 82->80 should be presented as : *:80-81->80-81, *:82->80
|
|
|
|
//
|
|
|
|
// TODO improve:
|
|
|
|
// - combine non-consecutive ports mapped to a single port (80->80, 81->80, 84->80, 86->80, 87->80); to be printed as *:80-81,84,86-87->80
|
|
|
|
// - combine tcp and udp mappings if their port-mapping is exactly the same (*:80-81->80-81/tcp+udp instead of *:80-81->80-81/tcp, *:80-81->80-81/udp)
|
2017-02-08 01:51:33 -05:00
|
|
|
func (c *serviceContext) Ports() string {
|
2017-07-31 18:45:50 -04:00
|
|
|
if c.service.Endpoint.Ports == nil {
|
2017-02-08 01:51:33 -05:00
|
|
|
return ""
|
|
|
|
}
|
Improve presentation of published port ranges
Port mappings in `docker service ls` are quite verbose, and occupy a lot of
space when ranges of ports are published.
This patch improves the output by reconstructing ranges of ports.
Given the following service;
$ docker service create \
-p 60-61:60-61 \
-p 62:61 \
-p 80:80 \
-p 81:80 \
-p 90-95:90-95 \
-p 90-92:90-92/udp \
-p 93-96:93-96/udp \
--name foo \
nginx:alpine
Before this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60->60/tcp,*:61->61/tcp,*:62->61/tcp,*:80->80/tcp,*:81->80/tcp,*:90->90/tcp,*:91->91/tcp,*:92->92/tcp,*:93->93/tcp,*:94->94/tcp,*:95->95/tcp,*:90->90/udp,*:91->91/udp,*:92->92/udp,*:93->93/udp,*:94->94/udp,*:95->95/udp,*:96->96/udp
After this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60-62->60-61/tcp,*:80-81->80/tcp,*:90-95->90-95/tcp,*:90-96->90-96/udp
Additional enhancements can still be made, and marked as TODO in this change;
- combine non-consecutive ports mapped to a single port (`80->80`, `81->80`,
`84->80`, `86->80`, `87->80`); to be printed as `*:80-81,84,86-87->80`.
- combine `tcp` and `udp` mappings if their port-mapping is the same;
print `*:80-81->80-81/tcp+udp` instead of `*:80-81->80-81/tcp, *:80-81->80-81/udp`
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-02 06:50:52 -04:00
|
|
|
|
|
|
|
pr := portRange{}
|
2017-02-08 01:51:33 -05:00
|
|
|
ports := []string{}
|
Improve presentation of published port ranges
Port mappings in `docker service ls` are quite verbose, and occupy a lot of
space when ranges of ports are published.
This patch improves the output by reconstructing ranges of ports.
Given the following service;
$ docker service create \
-p 60-61:60-61 \
-p 62:61 \
-p 80:80 \
-p 81:80 \
-p 90-95:90-95 \
-p 90-92:90-92/udp \
-p 93-96:93-96/udp \
--name foo \
nginx:alpine
Before this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60->60/tcp,*:61->61/tcp,*:62->61/tcp,*:80->80/tcp,*:81->80/tcp,*:90->90/tcp,*:91->91/tcp,*:92->92/tcp,*:93->93/tcp,*:94->94/tcp,*:95->95/tcp,*:90->90/udp,*:91->91/udp,*:92->92/udp,*:93->93/udp,*:94->94/udp,*:95->95/udp,*:96->96/udp
After this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60-62->60-61/tcp,*:80-81->80/tcp,*:90-95->90-95/tcp,*:90-96->90-96/udp
Additional enhancements can still be made, and marked as TODO in this change;
- combine non-consecutive ports mapped to a single port (`80->80`, `81->80`,
`84->80`, `86->80`, `87->80`); to be printed as `*:80-81,84,86-87->80`.
- combine `tcp` and `udp` mappings if their port-mapping is the same;
print `*:80-81->80-81/tcp+udp` instead of `*:80-81->80-81/tcp, *:80-81->80-81/udp`
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-02 06:50:52 -04:00
|
|
|
|
2018-07-06 15:49:10 -04:00
|
|
|
servicePorts := c.service.Endpoint.Ports
|
|
|
|
sort.Slice(servicePorts, func(i, j int) bool {
|
|
|
|
if servicePorts[i].Protocol == servicePorts[j].Protocol {
|
|
|
|
return servicePorts[i].PublishedPort < servicePorts[j].PublishedPort
|
|
|
|
}
|
|
|
|
return servicePorts[i].Protocol < servicePorts[j].Protocol
|
|
|
|
})
|
Improve presentation of published port ranges
Port mappings in `docker service ls` are quite verbose, and occupy a lot of
space when ranges of ports are published.
This patch improves the output by reconstructing ranges of ports.
Given the following service;
$ docker service create \
-p 60-61:60-61 \
-p 62:61 \
-p 80:80 \
-p 81:80 \
-p 90-95:90-95 \
-p 90-92:90-92/udp \
-p 93-96:93-96/udp \
--name foo \
nginx:alpine
Before this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60->60/tcp,*:61->61/tcp,*:62->61/tcp,*:80->80/tcp,*:81->80/tcp,*:90->90/tcp,*:91->91/tcp,*:92->92/tcp,*:93->93/tcp,*:94->94/tcp,*:95->95/tcp,*:90->90/udp,*:91->91/udp,*:92->92/udp,*:93->93/udp,*:94->94/udp,*:95->95/udp,*:96->96/udp
After this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60-62->60-61/tcp,*:80-81->80/tcp,*:90-95->90-95/tcp,*:90-96->90-96/udp
Additional enhancements can still be made, and marked as TODO in this change;
- combine non-consecutive ports mapped to a single port (`80->80`, `81->80`,
`84->80`, `86->80`, `87->80`); to be printed as `*:80-81,84,86-87->80`.
- combine `tcp` and `udp` mappings if their port-mapping is the same;
print `*:80-81->80-81/tcp+udp` instead of `*:80-81->80-81/tcp, *:80-81->80-81/udp`
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-02 06:50:52 -04:00
|
|
|
|
|
|
|
for _, p := range c.service.Endpoint.Ports {
|
|
|
|
if p.PublishMode == swarm.PortConfigPublishModeIngress {
|
|
|
|
prIsRange := pr.tEnd != pr.tStart
|
|
|
|
tOverlaps := p.TargetPort <= pr.tEnd
|
|
|
|
|
|
|
|
// Start a new port-range if:
|
|
|
|
// - the protocol is different from the current port-range
|
|
|
|
// - published or target port are not consecutive to the current port-range
|
|
|
|
// - the current port-range is a _range_, and the target port overlaps with the current range's target-ports
|
|
|
|
if p.Protocol != pr.protocol || p.PublishedPort-pr.pEnd > 1 || p.TargetPort-pr.tEnd > 1 || prIsRange && tOverlaps {
|
|
|
|
// start a new port-range, and print the previous port-range (if any)
|
|
|
|
if pr.pStart > 0 {
|
|
|
|
ports = append(ports, pr.String())
|
|
|
|
}
|
|
|
|
pr = portRange{
|
|
|
|
pStart: p.PublishedPort,
|
|
|
|
pEnd: p.PublishedPort,
|
|
|
|
tStart: p.TargetPort,
|
|
|
|
tEnd: p.TargetPort,
|
|
|
|
protocol: p.Protocol,
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
pr.pEnd = p.PublishedPort
|
|
|
|
pr.tEnd = p.TargetPort
|
2017-02-08 01:51:33 -05:00
|
|
|
}
|
|
|
|
}
|
Improve presentation of published port ranges
Port mappings in `docker service ls` are quite verbose, and occupy a lot of
space when ranges of ports are published.
This patch improves the output by reconstructing ranges of ports.
Given the following service;
$ docker service create \
-p 60-61:60-61 \
-p 62:61 \
-p 80:80 \
-p 81:80 \
-p 90-95:90-95 \
-p 90-92:90-92/udp \
-p 93-96:93-96/udp \
--name foo \
nginx:alpine
Before this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60->60/tcp,*:61->61/tcp,*:62->61/tcp,*:80->80/tcp,*:81->80/tcp,*:90->90/tcp,*:91->91/tcp,*:92->92/tcp,*:93->93/tcp,*:94->94/tcp,*:95->95/tcp,*:90->90/udp,*:91->91/udp,*:92->92/udp,*:93->93/udp,*:94->94/udp,*:95->95/udp,*:96->96/udp
After this patch is applied:
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
u1kwguv841qg foo replicated 1/1 nginx:alpine *:60-62->60-61/tcp,*:80-81->80/tcp,*:90-95->90-95/tcp,*:90-96->90-96/udp
Additional enhancements can still be made, and marked as TODO in this change;
- combine non-consecutive ports mapped to a single port (`80->80`, `81->80`,
`84->80`, `86->80`, `87->80`); to be printed as `*:80-81,84,86-87->80`.
- combine `tcp` and `udp` mappings if their port-mapping is the same;
print `*:80-81->80-81/tcp+udp` instead of `*:80-81->80-81/tcp, *:80-81->80-81/udp`
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-10-02 06:50:52 -04:00
|
|
|
if pr.pStart > 0 {
|
|
|
|
ports = append(ports, pr.String())
|
|
|
|
}
|
|
|
|
return strings.Join(ports, ", ")
|
|
|
|
}
|