mirror of https://github.com/docker/cli.git
Merge pull request #3770 from thaJeztah/update_golangci_lint_step2
golangci-lint: enable more linters
This commit is contained in:
commit
ce22ac2736
|
@ -3,23 +3,41 @@ linters:
|
|||
- bodyclose
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupword # Detects duplicate words.
|
||||
- durationcheck
|
||||
- errchkjson
|
||||
- exportloopref # Detects pointers to enclosing loop variables.
|
||||
- gocritic # Metalinter; detects bugs, performance, and styling issues.
|
||||
- gocyclo
|
||||
- gofumpt
|
||||
- gofumpt # Detects whether code was gofumpt-ed.
|
||||
- goimports
|
||||
- gosec
|
||||
- gosec # Detects security problems.
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- lll
|
||||
- megacheck
|
||||
- misspell
|
||||
- misspell # Detects commonly misspelled English words in comments.
|
||||
- nakedret
|
||||
- revive
|
||||
- nilerr # Detects code that returns nil even if it checks that the error is not nil.
|
||||
- nolintlint # Detects ill-formed or insufficient nolint directives.
|
||||
- perfsprint # Detects fmt.Sprintf uses that can be replaced with a faster alternative.
|
||||
- prealloc # Detects slice declarations that could potentially be pre-allocated.
|
||||
- predeclared # Detects code that shadows one of Go's predeclared identifiers
|
||||
- reassign
|
||||
- revive # Metalinter; drop-in replacement for golint.
|
||||
- staticcheck
|
||||
- stylecheck # Replacement for golint
|
||||
- tenv # Detects using os.Setenv instead of t.Setenv.
|
||||
- thelper # Detects test helpers without t.Helper().
|
||||
- tparallel # Detects inappropriate usage of t.Parallel().
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unconvert # Detects unnecessary type conversions.
|
||||
- unparam
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- vet
|
||||
- wastedassign
|
||||
|
||||
disable:
|
||||
- errcheck
|
||||
|
@ -40,13 +58,35 @@ linters-settings:
|
|||
gocyclo:
|
||||
min-complexity: 16
|
||||
govet:
|
||||
check-shadowing: false
|
||||
check-shadowing: true
|
||||
settings:
|
||||
shadow:
|
||||
strict: true
|
||||
lll:
|
||||
line-length: 200
|
||||
nakedret:
|
||||
command: nakedret
|
||||
pattern: ^(?P<path>.*?\\.go):(?P<line>\\d+)\\s*(?P<message>.*)$
|
||||
|
||||
revive:
|
||||
rules:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
|
||||
- name: import-shadowing
|
||||
severity: warning
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
|
||||
- name: empty-block
|
||||
severity: warning
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
|
||||
- name: empty-lines
|
||||
severity: warning
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#use-any
|
||||
- name: use-any
|
||||
severity: warning
|
||||
disabled: false
|
||||
|
||||
issues:
|
||||
# The default exclusion rules are a bit too permissive, so copying the relevant ones below
|
||||
exclude-use-default: false
|
||||
|
@ -83,7 +123,7 @@ issues:
|
|||
- gosec
|
||||
# EXC0008
|
||||
# TODO: evaluate these and fix where needed: G307: Deferring unsafe method "*os.File" on type "Close" (gosec)
|
||||
- text: "(G104|G307)"
|
||||
- text: "G307"
|
||||
linters:
|
||||
- gosec
|
||||
# EXC0009
|
||||
|
@ -97,10 +137,13 @@ issues:
|
|||
|
||||
# G113 Potential uncontrolled memory consumption in Rat.SetString (CVE-2022-23772)
|
||||
# only affects gp < 1.16.14. and go < 1.17.7
|
||||
- text: "(G113)"
|
||||
- text: "G113"
|
||||
linters:
|
||||
- gosec
|
||||
# TODO: G104: Errors unhandled. (gosec)
|
||||
- text: "G104"
|
||||
linters:
|
||||
- gosec
|
||||
|
||||
# Looks like the match in "EXC0007" above doesn't catch this one
|
||||
# TODO: consider upstreaming this to golangci-lint's default exclusion rules
|
||||
- text: "G204: Subprocess launched with a potential tainted input or cmd arguments"
|
||||
|
@ -125,6 +168,15 @@ issues:
|
|||
linters:
|
||||
- errcheck
|
||||
- gosec
|
||||
- text: "ST1000: at least one file in a package should have a package comment"
|
||||
linters:
|
||||
- stylecheck
|
||||
|
||||
# Allow "err" and "ok" vars to shadow existing declarations, otherwise we get too many false positives.
|
||||
- text: '^shadow: declaration of "(err|ok)" shadows declaration'
|
||||
linters:
|
||||
- govet
|
||||
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
|
|
@ -75,13 +75,14 @@ func TestValidateCandidate(t *testing.T) {
|
|||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
p, err := newPlugin(tc.c, fakeroot.Commands())
|
||||
if tc.err != "" {
|
||||
switch {
|
||||
case tc.err != "":
|
||||
assert.ErrorContains(t, err, tc.err)
|
||||
} else if tc.invalid != "" {
|
||||
case tc.invalid != "":
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, cmp.ErrorType(p.Err, reflect.TypeOf(&pluginError{})))
|
||||
assert.ErrorContains(t, p.Err, tc.invalid)
|
||||
} else {
|
||||
default:
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, NamePrefix+p.Name, goodPluginName)
|
||||
assert.Equal(t, p.SchemaVersion, "0.1.0")
|
||||
|
|
|
@ -43,6 +43,6 @@ func wrapAsPluginError(err error, msg string) error {
|
|||
|
||||
// NewPluginError creates a new pluginError, analogous to
|
||||
// errors.Errorf.
|
||||
func NewPluginError(msg string, args ...interface{}) error {
|
||||
func NewPluginError(msg string, args ...any) error {
|
||||
return &pluginError{cause: errors.Errorf(msg, args...)}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestListPluginCandidates(t *testing.T) {
|
|||
)
|
||||
defer dir.Remove()
|
||||
|
||||
var dirs []string
|
||||
dirs := make([]string, 0, 6)
|
||||
for _, d := range []string{"plugins1", "nonexistent", "plugins2", "plugins3", "plugins4", "plugins5"} {
|
||||
dirs = append(dirs, dir.Join(d))
|
||||
}
|
||||
|
|
|
@ -12,8 +12,7 @@ const (
|
|||
|
||||
// NewFormat returns a format for use with a checkpoint Context
|
||||
func NewFormat(source string) formatter.Format {
|
||||
switch source {
|
||||
case formatter.TableFormatKey:
|
||||
if source == formatter.TableFormatKey {
|
||||
return defaultCheckpointFormat
|
||||
}
|
||||
return formatter.Format(source)
|
||||
|
|
|
@ -394,7 +394,7 @@ func (cli *DockerCli) CurrentContext() string {
|
|||
// occur when trying to use it.
|
||||
//
|
||||
// Refer to [DockerCli.CurrentContext] above for further details.
|
||||
func resolveContextName(opts *cliflags.ClientOptions, config *configfile.ConfigFile) string {
|
||||
func resolveContextName(opts *cliflags.ClientOptions, cfg *configfile.ConfigFile) string {
|
||||
if opts != nil && opts.Context != "" {
|
||||
return opts.Context
|
||||
}
|
||||
|
@ -407,9 +407,9 @@ func resolveContextName(opts *cliflags.ClientOptions, config *configfile.ConfigF
|
|||
if ctxName := os.Getenv(EnvOverrideContext); ctxName != "" {
|
||||
return ctxName
|
||||
}
|
||||
if config != nil && config.CurrentContext != "" {
|
||||
if cfg != nil && cfg.CurrentContext != "" {
|
||||
// We don't validate if this context exists: errors may occur when trying to use it.
|
||||
return config.CurrentContext
|
||||
return cfg.CurrentContext
|
||||
}
|
||||
return DefaultContextName
|
||||
}
|
||||
|
@ -514,7 +514,7 @@ func UserAgent() string {
|
|||
}
|
||||
|
||||
var defaultStoreEndpoints = []store.NamedTypeGetter{
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() any { return &docker.EndpointMeta{} }),
|
||||
}
|
||||
|
||||
// RegisterDefaultStoreEndpoints registers a new named endpoint
|
||||
|
@ -528,7 +528,7 @@ func RegisterDefaultStoreEndpoints(ep ...store.NamedTypeGetter) {
|
|||
// DefaultContextStoreConfig returns a new store.Config with the default set of endpoints configured.
|
||||
func DefaultContextStoreConfig() store.Config {
|
||||
return store.NewConfig(
|
||||
func() interface{} { return &DockerContext{} },
|
||||
func() any { return &DockerContext{} },
|
||||
defaultStoreEndpoints...,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func contentTrustEnabled(t *testing.T) bool {
|
||||
t.Helper()
|
||||
var cli DockerCli
|
||||
assert.NilError(t, WithContentTrustFromEnv()(&cli))
|
||||
return cli.contentTrust
|
||||
|
|
|
@ -101,7 +101,7 @@ func (c *configContext) Labels() string {
|
|||
if mapLabels == nil {
|
||||
return ""
|
||||
}
|
||||
var joinLabels []string
|
||||
joinLabels := make([]string, 0, len(mapLabels))
|
||||
for k, v := range mapLabels {
|
||||
joinLabels = append(joinLabels, k+"="+v)
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ func RunConfigInspect(dockerCli command.Cli, opts InspectOptions) error {
|
|||
opts.Format = "pretty"
|
||||
}
|
||||
|
||||
getRef := func(id string) (interface{}, []byte, error) {
|
||||
getRef := func(id string) (any, []byte, error) {
|
||||
return client.ConfigInspectWithRaw(ctx, id)
|
||||
}
|
||||
f := opts.Format
|
||||
|
|
|
@ -24,8 +24,8 @@ type AttachOptions struct {
|
|||
DetachKeys string
|
||||
}
|
||||
|
||||
func inspectContainerAndCheckState(ctx context.Context, cli client.APIClient, args string) (*types.ContainerJSON, error) {
|
||||
c, err := cli.ContainerInspect(ctx, args)
|
||||
func inspectContainerAndCheckState(ctx context.Context, apiClient client.APIClient, args string) (*types.ContainerJSON, error) {
|
||||
c, err := apiClient.ContainerInspect(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -45,21 +45,21 @@ func inspectContainerAndCheckState(ctx context.Context, cli client.APIClient, ar
|
|||
// NewAttachCommand creates a new cobra.Command for `docker attach`
|
||||
func NewAttachCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts AttachOptions
|
||||
var container string
|
||||
var ctr string
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "attach [OPTIONS] CONTAINER",
|
||||
Short: "Attach local standard input, output, and error streams to a running container",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
container = args[0]
|
||||
return RunAttach(context.Background(), dockerCli, container, &opts)
|
||||
ctr = args[0]
|
||||
return RunAttach(context.Background(), dockerCli, ctr, &opts)
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"aliases": "docker container attach, docker attach",
|
||||
},
|
||||
ValidArgsFunction: completion.ContainerNames(dockerCli, false, func(container types.Container) bool {
|
||||
return container.State != "paused"
|
||||
ValidArgsFunction: completion.ContainerNames(dockerCli, false, func(ctr types.Container) bool {
|
||||
return ctr.State != "paused"
|
||||
}),
|
||||
}
|
||||
|
||||
|
@ -71,8 +71,8 @@ func NewAttachCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
// RunAttach executes an `attach` command
|
||||
func RunAttach(ctx context.Context, dockerCli command.Cli, target string, opts *AttachOptions) error {
|
||||
apiClient := dockerCli.Client()
|
||||
func RunAttach(ctx context.Context, dockerCLI command.Cli, target string, opts *AttachOptions) error {
|
||||
apiClient := dockerCLI.Client()
|
||||
|
||||
// request channel to wait for client
|
||||
resultC, errC := apiClient.ContainerWait(ctx, target, "")
|
||||
|
@ -82,11 +82,11 @@ func RunAttach(ctx context.Context, dockerCli command.Cli, target string, opts *
|
|||
return err
|
||||
}
|
||||
|
||||
if err := dockerCli.In().CheckTty(!opts.NoStdin, c.Config.Tty); err != nil {
|
||||
if err := dockerCLI.In().CheckTty(!opts.NoStdin, c.Config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
detachKeys := dockerCli.ConfigFile().DetachKeys
|
||||
detachKeys := dockerCLI.ConfigFile().DetachKeys
|
||||
if opts.DetachKeys != "" {
|
||||
detachKeys = opts.DetachKeys
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func RunAttach(ctx context.Context, dockerCli command.Cli, target string, opts *
|
|||
|
||||
var in io.ReadCloser
|
||||
if options.Stdin {
|
||||
in = dockerCli.In()
|
||||
in = dockerCLI.In()
|
||||
}
|
||||
|
||||
if opts.Proxy && !c.Config.Tty {
|
||||
|
@ -129,15 +129,15 @@ func RunAttach(ctx context.Context, dockerCli command.Cli, target string, opts *
|
|||
return err
|
||||
}
|
||||
|
||||
if c.Config.Tty && dockerCli.Out().IsTerminal() {
|
||||
resizeTTY(ctx, dockerCli, target)
|
||||
if c.Config.Tty && dockerCLI.Out().IsTerminal() {
|
||||
resizeTTY(ctx, dockerCLI, target)
|
||||
}
|
||||
|
||||
streamer := hijackedIOStreamer{
|
||||
streams: dockerCli,
|
||||
streams: dockerCLI,
|
||||
inputStream: in,
|
||||
outputStream: dockerCli.Out(),
|
||||
errorStream: dockerCli.Err(),
|
||||
outputStream: dockerCLI.Out(),
|
||||
errorStream: dockerCLI.Err(),
|
||||
resp: resp,
|
||||
tty: c.Config.Tty,
|
||||
detachKeys: options.DetachKeys,
|
||||
|
|
|
@ -16,24 +16,24 @@ type fakeClient struct {
|
|||
client.Client
|
||||
inspectFunc func(string) (types.ContainerJSON, error)
|
||||
execInspectFunc func(execID string) (types.ContainerExecInspect, error)
|
||||
execCreateFunc func(container string, config types.ExecConfig) (types.IDResponse, error)
|
||||
execCreateFunc func(containerID string, config types.ExecConfig) (types.IDResponse, error)
|
||||
createContainerFunc func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
platform *specs.Platform,
|
||||
containerName string) (container.CreateResponse, error)
|
||||
containerStartFunc func(container string, options container.StartOptions) error
|
||||
containerStartFunc func(containerID string, options container.StartOptions) error
|
||||
imageCreateFunc func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
||||
infoFunc func() (system.Info, error)
|
||||
containerStatPathFunc func(container, path string) (types.ContainerPathStat, error)
|
||||
containerCopyFromFunc func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
|
||||
containerStatPathFunc func(containerID, path string) (types.ContainerPathStat, error)
|
||||
containerCopyFromFunc func(containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
|
||||
logFunc func(string, container.LogsOptions) (io.ReadCloser, error)
|
||||
waitFunc func(string) (<-chan container.WaitResponse, <-chan error)
|
||||
containerListFunc func(container.ListOptions) ([]types.Container, error)
|
||||
containerExportFunc func(string) (io.ReadCloser, error)
|
||||
containerExecResizeFunc func(id string, options container.ResizeOptions) error
|
||||
containerRemoveFunc func(ctx context.Context, container string, options container.RemoveOptions) error
|
||||
containerKillFunc func(ctx context.Context, container, signal string) error
|
||||
containerRemoveFunc func(ctx context.Context, containerID string, options container.RemoveOptions) error
|
||||
containerKillFunc func(ctx context.Context, containerID, signal string) error
|
||||
Version string
|
||||
}
|
||||
|
||||
|
@ -51,9 +51,9 @@ func (f *fakeClient) ContainerInspect(_ context.Context, containerID string) (ty
|
|||
return types.ContainerJSON{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerExecCreate(_ context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
|
||||
func (f *fakeClient) ContainerExecCreate(_ context.Context, containerID string, config types.ExecConfig) (types.IDResponse, error) {
|
||||
if f.execCreateFunc != nil {
|
||||
return f.execCreateFunc(container, config)
|
||||
return f.execCreateFunc(containerID, config)
|
||||
}
|
||||
return types.IDResponse{}, nil
|
||||
}
|
||||
|
@ -83,9 +83,9 @@ func (f *fakeClient) ContainerCreate(
|
|||
return container.CreateResponse{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error {
|
||||
func (f *fakeClient) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error {
|
||||
if f.containerRemoveFunc != nil {
|
||||
return f.containerRemoveFunc(ctx, container, options)
|
||||
return f.containerRemoveFunc(ctx, containerID, options)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -104,23 +104,23 @@ func (f *fakeClient) Info(_ context.Context) (system.Info, error) {
|
|||
return system.Info{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerStatPath(_ context.Context, container, path string) (types.ContainerPathStat, error) {
|
||||
func (f *fakeClient) ContainerStatPath(_ context.Context, containerID, path string) (types.ContainerPathStat, error) {
|
||||
if f.containerStatPathFunc != nil {
|
||||
return f.containerStatPathFunc(container, path)
|
||||
return f.containerStatPathFunc(containerID, path)
|
||||
}
|
||||
return types.ContainerPathStat{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) CopyFromContainer(_ context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
|
||||
func (f *fakeClient) CopyFromContainer(_ context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
|
||||
if f.containerCopyFromFunc != nil {
|
||||
return f.containerCopyFromFunc(container, srcPath)
|
||||
return f.containerCopyFromFunc(containerID, srcPath)
|
||||
}
|
||||
return nil, types.ContainerPathStat{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerLogs(_ context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) {
|
||||
func (f *fakeClient) ContainerLogs(_ context.Context, containerID string, options container.LogsOptions) (io.ReadCloser, error) {
|
||||
if f.logFunc != nil {
|
||||
return f.logFunc(container, options)
|
||||
return f.logFunc(containerID, options)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -129,23 +129,23 @@ func (f *fakeClient) ClientVersion() string {
|
|||
return f.Version
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerWait(_ context.Context, container string, _ container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
|
||||
func (f *fakeClient) ContainerWait(_ context.Context, containerID string, _ container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
|
||||
if f.waitFunc != nil {
|
||||
return f.waitFunc(container)
|
||||
return f.waitFunc(containerID)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerStart(_ context.Context, container string, options container.StartOptions) error {
|
||||
func (f *fakeClient) ContainerStart(_ context.Context, containerID string, options container.StartOptions) error {
|
||||
if f.containerStartFunc != nil {
|
||||
return f.containerStartFunc(container, options)
|
||||
return f.containerStartFunc(containerID, options)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerExport(_ context.Context, container string) (io.ReadCloser, error) {
|
||||
func (f *fakeClient) ContainerExport(_ context.Context, containerID string) (io.ReadCloser, error) {
|
||||
if f.containerExportFunc != nil {
|
||||
return f.containerExportFunc(container)
|
||||
return f.containerExportFunc(containerID)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -157,9 +157,9 @@ func (f *fakeClient) ContainerExecResize(_ context.Context, id string, options c
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerKill(ctx context.Context, container, signal string) error {
|
||||
func (f *fakeClient) ContainerKill(ctx context.Context, containerID, signal string) error {
|
||||
if f.containerKillFunc != nil {
|
||||
return f.containerKillFunc(ctx, container, signal)
|
||||
return f.containerKillFunc(ctx, containerID, signal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -246,7 +246,6 @@ func copyFromContainer(ctx context.Context, dockerCli command.Cli, copyConfig cp
|
|||
linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget)
|
||||
srcPath = linkTarget
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ctx, cancel := signal.NotifyContext(ctx, os.Interrupt)
|
||||
|
|
|
@ -113,15 +113,15 @@ func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, options *createOptio
|
|||
}
|
||||
|
||||
// FIXME(thaJeztah): this is the only code-path that uses APIClient.ImageCreate. Rewrite this to use the regular "pull" code (or vice-versa).
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, image string, opts *createOptions) error {
|
||||
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), image)
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, img string, options *createOptions) error {
|
||||
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, image, types.ImageCreateOptions{
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, img, types.ImageCreateOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
Platform: opts.platform,
|
||||
Platform: options.platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -129,7 +129,7 @@ func pullImage(ctx context.Context, dockerCli command.Cli, image string, opts *c
|
|||
defer responseBody.Close()
|
||||
|
||||
out := dockerCli.Err()
|
||||
if opts.quiet {
|
||||
if options.quiet {
|
||||
out = io.Discard
|
||||
}
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, streams.NewOut(out), nil)
|
||||
|
@ -185,7 +185,7 @@ func newCIDFile(path string) (*cidFile, error) {
|
|||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *containerConfig, opts *createOptions) (containerID string, err error) {
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *containerConfig, options *createOptions) (containerID string, err error) {
|
||||
config := containerCfg.Config
|
||||
hostConfig := containerCfg.HostConfig
|
||||
networkingConfig := containerCfg.NetworkingConfig
|
||||
|
@ -211,7 +211,7 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
if named, ok := ref.(reference.Named); ok {
|
||||
namedRef = reference.TagNameOnly(named)
|
||||
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !opts.untrusted {
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !options.untrusted {
|
||||
var err error
|
||||
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef)
|
||||
if err != nil {
|
||||
|
@ -222,7 +222,7 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
}
|
||||
|
||||
pullAndTagImage := func() error {
|
||||
if err := pullImage(ctx, dockerCli, config.Image, opts); err != nil {
|
||||
if err := pullImage(ctx, dockerCli, config.Image, options); err != nil {
|
||||
return err
|
||||
}
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
|
||||
|
@ -236,15 +236,15 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
// create. It will produce an error if you try to set a platform on older API
|
||||
// versions, so check the API version here to maintain backwards
|
||||
// compatibility for CLI users.
|
||||
if opts.platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
|
||||
p, err := platforms.Parse(opts.platform)
|
||||
if options.platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
|
||||
p, err := platforms.Parse(options.platform)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error parsing specified platform")
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
|
||||
if opts.pull == PullImageAlways {
|
||||
if options.pull == PullImageAlways {
|
||||
if err := pullAndTagImage(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -252,11 +252,11 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
|
||||
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.name)
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
|
||||
if err != nil {
|
||||
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
|
||||
if errdefs.IsNotFound(err) && namedRef != nil && opts.pull == PullImageMissing {
|
||||
if !opts.quiet {
|
||||
if errdefs.IsNotFound(err) && namedRef != nil && options.pull == PullImageMissing {
|
||||
if !options.quiet {
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
fmt.Fprintf(dockerCli.Err(), "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
}
|
||||
|
||||
var retryErr error
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.name)
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
|
||||
if retryErr != nil {
|
||||
return "", retryErr
|
||||
}
|
||||
|
|
|
@ -223,7 +223,7 @@ func TestNewCreateCommandWithContentTrustErrors(t *testing.T) {
|
|||
}
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{
|
||||
createContainerFunc: func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
|
@ -233,8 +233,8 @@ func TestNewCreateCommandWithContentTrustErrors(t *testing.T) {
|
|||
return container.CreateResponse{}, fmt.Errorf("shouldn't try to pull image")
|
||||
},
|
||||
}, test.EnableContentTrust)
|
||||
cli.SetNotaryClient(tc.notaryFunc)
|
||||
cmd := NewCreateCommand(cli)
|
||||
fakeCLI.SetNotaryClient(tc.notaryFunc)
|
||||
cmd := NewCreateCommand(fakeCLI)
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetArgs(tc.args)
|
||||
err := cmd.Execute()
|
||||
|
@ -323,7 +323,7 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
|
|||
}
|
||||
sort.Strings(expected)
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{
|
||||
createContainerFunc: func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
|
@ -335,7 +335,7 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
|
|||
return container.CreateResponse{}, nil
|
||||
},
|
||||
})
|
||||
cli.SetConfigFile(&configfile.ConfigFile{
|
||||
fakeCLI.SetConfigFile(&configfile.ConfigFile{
|
||||
Proxies: map[string]configfile.ProxyConfig{
|
||||
"default": {
|
||||
HTTPProxy: "httpProxy",
|
||||
|
@ -346,7 +346,7 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
cmd := NewCreateCommand(cli)
|
||||
cmd := NewCreateCommand(fakeCLI)
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetArgs([]string{"image:tag"})
|
||||
err := cmd.Execute()
|
||||
|
|
|
@ -197,11 +197,9 @@ func TestRunExec(t *testing.T) {
|
|||
err := RunExec(context.Background(), cli, "thecontainer", testcase.options)
|
||||
if testcase.expectedError != "" {
|
||||
assert.ErrorContains(t, err, testcase.expectedError)
|
||||
} else {
|
||||
if !assert.Check(t, err) {
|
||||
} else if !assert.Check(t, err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
assert.Check(t, is.Equal(testcase.expectedOut, cli.OutBuffer().String()))
|
||||
assert.Check(t, is.Equal(testcase.expectedErr, cli.ErrBuffer().String()))
|
||||
})
|
||||
|
@ -264,8 +262,8 @@ func TestNewExecCommandErrors(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
cli := test.NewFakeCli(&fakeClient{inspectFunc: tc.containerInspectFunc})
|
||||
cmd := NewExecCommand(cli)
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{inspectFunc: tc.containerInspectFunc})
|
||||
cmd := NewExecCommand(fakeCLI)
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetArgs(tc.args)
|
||||
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
||||
|
|
|
@ -14,8 +14,7 @@ const (
|
|||
|
||||
// NewDiffFormat returns a format for use with a diff Context
|
||||
func NewDiffFormat(source string) formatter.Format {
|
||||
switch source {
|
||||
case formatter.TableFormatKey:
|
||||
if source == formatter.TableFormatKey {
|
||||
return defaultDiffTableFormat
|
||||
}
|
||||
return formatter.Format(source)
|
||||
|
|
|
@ -24,7 +24,7 @@ const (
|
|||
pidsHeader = "PIDS" // Used only on Linux
|
||||
)
|
||||
|
||||
// StatsEntry represents represents the statistics data collected from a container
|
||||
// StatsEntry represents the statistics data collected from a container
|
||||
type StatsEntry struct {
|
||||
Container string
|
||||
Name string
|
||||
|
@ -116,9 +116,9 @@ func NewStats(container string) *Stats {
|
|||
}
|
||||
|
||||
// statsFormatWrite renders the context for a list of containers statistics
|
||||
func statsFormatWrite(ctx formatter.Context, Stats []StatsEntry, osType string, trunc bool) error {
|
||||
func statsFormatWrite(ctx formatter.Context, stats []StatsEntry, osType string, trunc bool) error {
|
||||
render := func(format func(subContext formatter.SubContext) error) error {
|
||||
for _, cstats := range Stats {
|
||||
for _, cstats := range stats {
|
||||
statsCtx := &statsContext{
|
||||
s: cstats,
|
||||
os: osType,
|
||||
|
|
|
@ -43,7 +43,7 @@ func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
|||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
getRefFunc := func(ref string) (interface{}, []byte, error) {
|
||||
getRefFunc := func(ref string) (any, []byte, error) {
|
||||
return client.ContainerInspectWithRaw(ctx, ref, opts.size)
|
||||
}
|
||||
return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc)
|
||||
|
|
|
@ -29,7 +29,7 @@ type psOptions struct {
|
|||
}
|
||||
|
||||
// NewPsCommand creates a new cobra.Command for `docker ps`
|
||||
func NewPsCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func NewPsCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
options := psOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -38,7 +38,7 @@ func NewPsCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.sizeChanged = cmd.Flags().Changed("size")
|
||||
return runPs(dockerCli, &options)
|
||||
return runPs(dockerCLI, &options)
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"category-top": "3",
|
||||
|
@ -61,28 +61,28 @@ func NewPsCommand(dockerCli command.Cli) *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := *NewPsCommand(dockerCli)
|
||||
func newListCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
cmd := *NewPsCommand(dockerCLI)
|
||||
cmd.Aliases = []string{"ps", "list"}
|
||||
cmd.Use = "ls [OPTIONS]"
|
||||
return &cmd
|
||||
}
|
||||
|
||||
func buildContainerListOptions(opts *psOptions) (*container.ListOptions, error) {
|
||||
options := &container.ListOptions{
|
||||
All: opts.all,
|
||||
Limit: opts.last,
|
||||
Size: opts.size,
|
||||
Filters: opts.filter.Value(),
|
||||
func buildContainerListOptions(options *psOptions) (*container.ListOptions, error) {
|
||||
listOptions := &container.ListOptions{
|
||||
All: options.all,
|
||||
Limit: options.last,
|
||||
Size: options.size,
|
||||
Filters: options.filter.Value(),
|
||||
}
|
||||
|
||||
if opts.nLatest && opts.last == -1 {
|
||||
options.Limit = 1
|
||||
if options.nLatest && options.last == -1 {
|
||||
listOptions.Limit = 1
|
||||
}
|
||||
|
||||
// always validate template when `--format` is used, for consistency
|
||||
if len(opts.format) > 0 {
|
||||
tmpl, err := templates.NewParse("", opts.format)
|
||||
if len(options.format) > 0 {
|
||||
tmpl, err := templates.NewParse("", options.format)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse template")
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func buildContainerListOptions(opts *psOptions) (*container.ListOptions, error)
|
|||
|
||||
// if `size` was not explicitly set to false (with `--size=false`)
|
||||
// and `--quiet` is not set, request size if the template requires it
|
||||
if !opts.quiet && !options.Size && !opts.sizeChanged {
|
||||
if !options.quiet && !listOptions.Size && !options.sizeChanged {
|
||||
// The --size option isn't set, but .Size may be used in the template.
|
||||
// Parse and execute the given template to detect if the .Size field is
|
||||
// used. If it is, then automatically enable the --size option. See #24696
|
||||
|
@ -106,22 +106,22 @@ func buildContainerListOptions(opts *psOptions) (*container.ListOptions, error)
|
|||
// because calculating the size is a costly operation.
|
||||
|
||||
if _, ok := optionsProcessor.FieldsUsed["Size"]; ok {
|
||||
options.Size = true
|
||||
listOptions.Size = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return options, nil
|
||||
return listOptions, nil
|
||||
}
|
||||
|
||||
func runPs(dockerCli command.Cli, options *psOptions) error {
|
||||
func runPs(dockerCLI command.Cli, options *psOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if len(options.format) == 0 {
|
||||
// load custom psFormat from CLI config (if any)
|
||||
options.format = dockerCli.ConfigFile().PsFormat
|
||||
options.format = dockerCLI.ConfigFile().PsFormat
|
||||
} else if options.quiet {
|
||||
_, _ = dockerCli.Err().Write([]byte("WARNING: Ignoring custom format, because both --format and --quiet are set.\n"))
|
||||
_, _ = dockerCLI.Err().Write([]byte("WARNING: Ignoring custom format, because both --format and --quiet are set.\n"))
|
||||
}
|
||||
|
||||
listOptions, err := buildContainerListOptions(options)
|
||||
|
@ -129,13 +129,13 @@ func runPs(dockerCli command.Cli, options *psOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
containers, err := dockerCli.Client().ContainerList(ctx, *listOptions)
|
||||
containers, err := dockerCLI.Client().ContainerList(ctx, *listOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Output: dockerCLI.Out(),
|
||||
Format: formatter.NewContainerFormat(options.format, options.quiet, listOptions.Size),
|
||||
Trunc: !options.noTrunc,
|
||||
}
|
||||
|
|
|
@ -49,11 +49,9 @@ func TestRunLogs(t *testing.T) {
|
|||
err := runLogs(cli, testcase.options)
|
||||
if testcase.expectedError != "" {
|
||||
assert.ErrorContains(t, err, testcase.expectedError)
|
||||
} else {
|
||||
if !assert.Check(t, err) {
|
||||
} else if !assert.Check(t, err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
assert.Check(t, is.Equal(testcase.expectedOut, cli.OutBuffer().String()))
|
||||
assert.Check(t, is.Equal(testcase.expectedErr, cli.ErrBuffer().String()))
|
||||
})
|
||||
|
|
|
@ -198,7 +198,7 @@ func TestParseWithVolumes(t *testing.T) {
|
|||
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
|
||||
} else if _, exists := config.Volumes[arr[0]]; !exists {
|
||||
t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes)
|
||||
} else if _, exists := config.Volumes[arr[1]]; !exists {
|
||||
} else if _, exists := config.Volumes[arr[1]]; !exists { //nolint:govet // ignore shadow-check
|
||||
t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes)
|
||||
}
|
||||
|
||||
|
@ -1011,13 +1011,11 @@ func TestValidateDevice(t *testing.T) {
|
|||
for path, expectedError := range invalid {
|
||||
if _, err := validateDevice(path, runtime.GOOS); err == nil {
|
||||
t.Fatalf("ValidateDevice(`%q`) should have failed validation", path)
|
||||
} else {
|
||||
if err.Error() != expectedError {
|
||||
} else if err.Error() != expectedError {
|
||||
t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSystemPaths(t *testing.T) {
|
||||
tests := []struct {
|
||||
|
|
|
@ -118,14 +118,14 @@ func runRun(dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copt
|
|||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptions, containerCfg *containerConfig) error {
|
||||
func runContainer(dockerCli command.Cli, runOpts *runOptions, copts *containerOptions, containerCfg *containerConfig) error {
|
||||
config := containerCfg.Config
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
apiClient := dockerCli.Client()
|
||||
|
||||
config.ArgsEscaped = false
|
||||
|
||||
if !opts.detach {
|
||||
if !runOpts.detach {
|
||||
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -143,12 +143,12 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
|||
ctx, cancelFun := context.WithCancel(context.Background())
|
||||
defer cancelFun()
|
||||
|
||||
containerID, err := createContainer(ctx, dockerCli, containerCfg, &opts.createOptions)
|
||||
containerID, err := createContainer(ctx, dockerCli, containerCfg, &runOpts.createOptions)
|
||||
if err != nil {
|
||||
reportError(stderr, "run", err.Error(), true)
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if opts.sigProxy {
|
||||
if runOpts.sigProxy {
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, apiClient, containerID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
|
@ -169,8 +169,8 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
|||
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
|
||||
if attach {
|
||||
detachKeys := dockerCli.ConfigFile().DetachKeys
|
||||
if opts.detachKeys != "" {
|
||||
detachKeys = opts.detachKeys
|
||||
if runOpts.detachKeys != "" {
|
||||
detachKeys = runOpts.detachKeys
|
||||
}
|
||||
|
||||
closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRunLabel(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{
|
||||
createContainerFunc: func(_ *container.Config, _ *container.HostConfig, _ *network.NetworkingConfig, _ *specs.Platform, _ string) (container.CreateResponse, error) {
|
||||
return container.CreateResponse{
|
||||
ID: "id",
|
||||
|
@ -26,7 +26,7 @@ func TestRunLabel(t *testing.T) {
|
|||
},
|
||||
Version: "1.36",
|
||||
})
|
||||
cmd := NewRunCommand(cli)
|
||||
cmd := NewRunCommand(fakeCLI)
|
||||
cmd.SetArgs([]string{"--detach=true", "--label", "foo", "busybox"})
|
||||
assert.NilError(t, cmd.Execute())
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func TestRunCommandWithContentTrustErrors(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{
|
||||
createContainerFunc: func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
|
@ -68,13 +68,13 @@ func TestRunCommandWithContentTrustErrors(t *testing.T) {
|
|||
return container.CreateResponse{}, fmt.Errorf("shouldn't try to pull image")
|
||||
},
|
||||
}, test.EnableContentTrust)
|
||||
cli.SetNotaryClient(tc.notaryFunc)
|
||||
cmd := NewRunCommand(cli)
|
||||
fakeCLI.SetNotaryClient(tc.notaryFunc)
|
||||
cmd := NewRunCommand(fakeCLI)
|
||||
cmd.SetArgs(tc.args)
|
||||
cmd.SetOut(io.Discard)
|
||||
err := cmd.Execute()
|
||||
assert.Assert(t, err != nil)
|
||||
assert.Assert(t, is.Contains(cli.ErrBuffer().String(), tc.expectedError))
|
||||
assert.Assert(t, is.Contains(fakeCLI.ErrBuffer().String(), tc.expectedError))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,8 @@ func RunStart(dockerCli command.Cli, opts *StartOptions) error {
|
|||
ctx, cancelFun := context.WithCancel(context.Background())
|
||||
defer cancelFun()
|
||||
|
||||
if opts.Attach || opts.OpenStdin {
|
||||
switch {
|
||||
case opts.Attach || opts.OpenStdin:
|
||||
// We're going to attach to a container.
|
||||
// 1. Ensure we only have one container.
|
||||
if len(opts.Containers) > 1 {
|
||||
|
@ -180,7 +181,8 @@ func RunStart(dockerCli command.Cli, opts *StartOptions) error {
|
|||
if status := <-statusChan; status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
} else if opts.Checkpoint != "" {
|
||||
return nil
|
||||
case opts.Checkpoint != "":
|
||||
if len(opts.Containers) > 1 {
|
||||
return errors.New("you cannot restore multiple containers at once")
|
||||
}
|
||||
|
@ -189,14 +191,11 @@ func RunStart(dockerCli command.Cli, opts *StartOptions) error {
|
|||
CheckpointID: opts.Checkpoint,
|
||||
CheckpointDir: opts.CheckpointDir,
|
||||
})
|
||||
|
||||
} else {
|
||||
default:
|
||||
// We're not going to attach to anything.
|
||||
// Start as many containers as we want.
|
||||
return startContainersWithoutAttachments(ctx, dockerCli, opts.Containers)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startContainersWithoutAttachments(ctx context.Context, dockerCli command.Cli, containers []string) error {
|
||||
|
|
|
@ -206,9 +206,9 @@ func calculateBlockIO(blkio types.BlkioStats) (uint64, uint64) {
|
|||
}
|
||||
switch bioEntry.Op[0] {
|
||||
case 'r', 'R':
|
||||
blkRead = blkRead + bioEntry.Value
|
||||
blkRead += bioEntry.Value
|
||||
case 'w', 'W':
|
||||
blkWrite = blkWrite + bioEntry.Value
|
||||
blkWrite += bioEntry.Value
|
||||
}
|
||||
}
|
||||
return blkRead, blkWrite
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
// resizeTtyTo resizes tty to specific height and width
|
||||
func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
|
||||
func resizeTtyTo(ctx context.Context, apiClient client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
|
||||
if height == 0 && width == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -28,9 +28,9 @@ func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id strin
|
|||
|
||||
var err error
|
||||
if isExec {
|
||||
err = client.ContainerExecResize(ctx, id, options)
|
||||
err = apiClient.ContainerExecResize(ctx, id, options)
|
||||
} else {
|
||||
err = client.ContainerResize(ctx, id, options)
|
||||
err = apiClient.ContainerResize(ctx, id, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -81,10 +81,9 @@ func legacyWaitExitOrRemoved(ctx context.Context, apiClient client.APIClient, co
|
|||
}
|
||||
if !waitRemove {
|
||||
stopProcessing = true
|
||||
} else {
|
||||
} else if versions.LessThan(apiClient.ClientVersion(), "1.25") {
|
||||
// If we are talking to an older daemon, `AutoRemove` is not supported.
|
||||
// We need to fall back to the old behavior, which is client-side removal
|
||||
if versions.LessThan(apiClient.ClientVersion(), "1.25") {
|
||||
go func() {
|
||||
removeErr = apiClient.ContainerRemove(ctx, containerID, container.RemoveOptions{RemoveVolumes: true})
|
||||
if removeErr != nil {
|
||||
|
@ -93,7 +92,6 @@ func legacyWaitExitOrRemoved(ctx context.Context, apiClient client.APIClient, co
|
|||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
case "detach":
|
||||
exitCode = 0
|
||||
stopProcessing = true
|
||||
|
@ -129,7 +127,7 @@ func legacyWaitExitOrRemoved(ctx context.Context, apiClient client.APIClient, co
|
|||
return statusChan
|
||||
}
|
||||
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error {
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, containerID string) error) chan error {
|
||||
if len(containers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -10,12 +10,12 @@ import (
|
|||
// DockerContext is a typed representation of what we put in Context metadata
|
||||
type DockerContext struct {
|
||||
Description string
|
||||
AdditionalFields map[string]interface{}
|
||||
AdditionalFields map[string]any
|
||||
}
|
||||
|
||||
// MarshalJSON implements custom JSON marshalling
|
||||
func (dc DockerContext) MarshalJSON() ([]byte, error) {
|
||||
s := map[string]interface{}{}
|
||||
s := map[string]any{}
|
||||
if dc.Description != "" {
|
||||
s["Description"] = dc.Description
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ func (dc DockerContext) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// UnmarshalJSON implements custom JSON marshalling
|
||||
func (dc *DockerContext) UnmarshalJSON(payload []byte) error {
|
||||
var data map[string]interface{}
|
||||
var data map[string]any
|
||||
if err := json.Unmarshal(payload, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ func (dc *DockerContext) UnmarshalJSON(payload []byte) error {
|
|||
dc.Description = v.(string)
|
||||
default:
|
||||
if dc.AdditionalFields == nil {
|
||||
dc.AdditionalFields = make(map[string]interface{})
|
||||
dc.AdditionalFields = make(map[string]any)
|
||||
}
|
||||
dc.AdditionalFields[k] = v
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func longCreateDescription() string {
|
|||
return buf.String()
|
||||
}
|
||||
|
||||
func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func newCreateCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
opts := &CreateOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "create [OPTIONS] CONTEXT",
|
||||
|
@ -44,7 +44,7 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Name = args[0]
|
||||
return RunCreate(dockerCli, opts)
|
||||
return RunCreate(dockerCLI, opts)
|
||||
},
|
||||
Long: longCreateDescription(),
|
||||
ValidArgsFunction: completion.NoComplete,
|
||||
|
@ -57,23 +57,23 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
// RunCreate creates a Docker context
|
||||
func RunCreate(cli command.Cli, o *CreateOptions) error {
|
||||
s := cli.ContextStore()
|
||||
func RunCreate(dockerCLI command.Cli, o *CreateOptions) error {
|
||||
s := dockerCLI.ContextStore()
|
||||
err := checkContextNameForCreation(s, o.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case o.From == "" && o.Docker == nil:
|
||||
err = createFromExistingContext(s, cli.CurrentContext(), o)
|
||||
err = createFromExistingContext(s, dockerCLI.CurrentContext(), o)
|
||||
case o.From != "":
|
||||
err = createFromExistingContext(s, o.From, o)
|
||||
default:
|
||||
err = createNewContext(s, o)
|
||||
}
|
||||
if err == nil {
|
||||
fmt.Fprintln(cli.Out(), o.Name)
|
||||
fmt.Fprintf(cli.Err(), "Successfully created context %q\n", o.Name)
|
||||
fmt.Fprintln(dockerCLI.Out(), o.Name)
|
||||
fmt.Fprintf(dockerCLI.Err(), "Successfully created context %q\n", o.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func createNewContext(contextStore store.ReaderWriter, o *CreateOptions) error {
|
|||
return errors.Wrap(err, "unable to create docker endpoint config")
|
||||
}
|
||||
contextMetadata := store.Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
Endpoints: map[string]any{
|
||||
docker.DockerEndpoint: dockerEP,
|
||||
},
|
||||
Metadata: command.DockerContext{
|
||||
|
|
|
@ -16,15 +16,15 @@ func makeFakeCli(t *testing.T, opts ...func(*test.FakeCli)) *test.FakeCli {
|
|||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
storeConfig := store.NewConfig(
|
||||
func() interface{} { return &command.DockerContext{} },
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
func() any { return &command.DockerContext{} },
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() any { return &docker.EndpointMeta{} }),
|
||||
)
|
||||
store := &command.ContextStoreWithDefault{
|
||||
contextStore := &command.ContextStoreWithDefault{
|
||||
Store: store.New(dir, storeConfig),
|
||||
Resolver: func() (*command.DefaultContext, error) {
|
||||
return &command.DefaultContext{
|
||||
Meta: store.Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
Endpoints: map[string]any{
|
||||
docker.DockerEndpoint: docker.EndpointMeta{
|
||||
Host: "unix:///var/run/docker.sock",
|
||||
},
|
||||
|
@ -42,7 +42,7 @@ func makeFakeCli(t *testing.T, opts ...func(*test.FakeCli)) *test.FakeCli {
|
|||
for _, o := range opts {
|
||||
o(result)
|
||||
}
|
||||
result.SetContextStore(store)
|
||||
result.SetContextStore(contextStore)
|
||||
return result
|
||||
}
|
||||
|
||||
|
@ -104,6 +104,7 @@ func TestCreate(t *testing.T) {
|
|||
}
|
||||
|
||||
func assertContextCreateLogging(t *testing.T, cli *test.FakeCli, n string) {
|
||||
t.Helper()
|
||||
assert.Equal(t, n+"\n", cli.OutBuffer().String())
|
||||
assert.Equal(t, fmt.Sprintf("Successfully created context %q\n", n), cli.ErrBuffer().String())
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func newInspectCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
||||
getRefFunc := func(ref string) (interface{}, []byte, error) {
|
||||
getRefFunc := func(ref string) (any, []byte, error) {
|
||||
c, err := dockerCli.ContextStore().GetMetadata(ref)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestInspect(t *testing.T) {
|
|||
}))
|
||||
expected := string(golden.Get(t, "inspect.golden"))
|
||||
si := cli.ContextStore().GetStorageInfo("current")
|
||||
expected = strings.Replace(expected, "<METADATA_PATH>", strings.Replace(si.MetadataPath, `\`, `\\`, -1), 1)
|
||||
expected = strings.Replace(expected, "<TLS_PATH>", strings.Replace(si.TLSPath, `\`, `\\`, -1), 1)
|
||||
expected = strings.Replace(expected, "<METADATA_PATH>", strings.ReplaceAll(si.MetadataPath, `\`, `\\`), 1)
|
||||
expected = strings.Replace(expected, "<TLS_PATH>", strings.ReplaceAll(si.TLSPath, `\`, `\\`), 1)
|
||||
assert.Equal(t, cli.OutBuffer().String(), expected)
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ func runList(dockerCli command.Cli, opts *listOptions) error {
|
|||
var (
|
||||
curContext = dockerCli.CurrentContext()
|
||||
curFound bool
|
||||
contexts []*formatter.ClientContext
|
||||
contexts = make([]*formatter.ClientContext, 0, len(contextMap))
|
||||
)
|
||||
for _, rawMeta := range contextMap {
|
||||
isCurrent := rawMeta.Name == curContext
|
||||
|
|
|
@ -52,11 +52,11 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
// RunUpdate updates a Docker context
|
||||
func RunUpdate(cli command.Cli, o *UpdateOptions) error {
|
||||
func RunUpdate(dockerCLI command.Cli, o *UpdateOptions) error {
|
||||
if err := store.ValidateContextName(o.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
s := cli.ContextStore()
|
||||
s := dockerCLI.ContextStore()
|
||||
c, err := s.GetMetadata(o.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -93,8 +93,8 @@ func RunUpdate(cli command.Cli, o *UpdateOptions) error {
|
|||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(cli.Out(), o.Name)
|
||||
fmt.Fprintf(cli.Err(), "Successfully updated context %q\n", o.Name)
|
||||
fmt.Fprintln(dockerCLI.Out(), o.Name)
|
||||
fmt.Fprintf(dockerCLI.Err(), "Successfully updated context %q\n", o.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
func TestDockerContextMetadataKeepAdditionalFields(t *testing.T) {
|
||||
c := DockerContext{
|
||||
Description: "test",
|
||||
AdditionalFields: map[string]interface{}{
|
||||
AdditionalFields: map[string]any{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
|
|
@ -44,7 +44,9 @@ type EndpointDefaultResolver interface {
|
|||
// the lack of a default (e.g. because the config file which
|
||||
// would contain it is missing). If there is no default then
|
||||
// returns nil, nil, nil.
|
||||
ResolveDefault() (interface{}, *store.EndpointTLSData, error)
|
||||
//
|
||||
//nolint:dupword // ignore "Duplicate words (nil,) found"
|
||||
ResolveDefault() (any, *store.EndpointTLSData, error)
|
||||
}
|
||||
|
||||
// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
||||
|
@ -53,7 +55,7 @@ func ResolveDefaultContext(opts *cliflags.ClientOptions, config store.Config) (*
|
|||
Endpoints: make(map[string]store.EndpointTLSData),
|
||||
}
|
||||
contextMetadata := store.Metadata{
|
||||
Endpoints: make(map[string]interface{}),
|
||||
Endpoints: make(map[string]any),
|
||||
Metadata: DockerContext{
|
||||
Description: "",
|
||||
},
|
||||
|
|
|
@ -23,14 +23,14 @@ type testContext struct {
|
|||
Bar string `json:"another_very_recognizable_field_name"`
|
||||
}
|
||||
|
||||
var testCfg = store.NewConfig(func() interface{} { return &testContext{} },
|
||||
store.EndpointTypeGetter("ep1", func() interface{} { return &endpoint{} }),
|
||||
store.EndpointTypeGetter("ep2", func() interface{} { return &endpoint{} }),
|
||||
var testCfg = store.NewConfig(func() any { return &testContext{} },
|
||||
store.EndpointTypeGetter("ep1", func() any { return &endpoint{} }),
|
||||
store.EndpointTypeGetter("ep2", func() any { return &endpoint{} }),
|
||||
)
|
||||
|
||||
func testDefaultMetadata() store.Metadata {
|
||||
return store.Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
Endpoints: map[string]any{
|
||||
"ep1": endpoint{Foo: "bar"},
|
||||
},
|
||||
Metadata: testContext{Bar: "baz"},
|
||||
|
@ -149,7 +149,7 @@ func TestErrCreateDefault(t *testing.T) {
|
|||
meta := testDefaultMetadata()
|
||||
s := testStore(t, meta, store.ContextTLSData{})
|
||||
err := s.CreateOrUpdate(store.Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
Endpoints: map[string]any{
|
||||
"ep1": endpoint{Foo: "bar"},
|
||||
},
|
||||
Metadata: testContext{Bar: "baz"},
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -171,13 +171,13 @@ func (c *buildCacheContext) LastUsedSince() string {
|
|||
}
|
||||
|
||||
func (c *buildCacheContext) UsageCount() string {
|
||||
return fmt.Sprintf("%d", c.v.UsageCount)
|
||||
return strconv.Itoa(c.v.UsageCount)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) InUse() string {
|
||||
return fmt.Sprintf("%t", c.v.InUse)
|
||||
return strconv.FormatBool(c.v.InUse)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Shared() string {
|
||||
return fmt.Sprintf("%t", c.v.Shared)
|
||||
return strconv.FormatBool(c.v.Shared)
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ type ContainerContext struct {
|
|||
// used in the template. It's currently only used to detect use of the .Size
|
||||
// field which (if used) automatically sets the '--size' option when making
|
||||
// the API call.
|
||||
FieldsUsed map[string]interface{}
|
||||
FieldsUsed map[string]any
|
||||
}
|
||||
|
||||
// NewContainerContext creates a new context for rendering containers
|
||||
|
@ -226,7 +226,7 @@ func (c *ContainerContext) Status() string {
|
|||
// Size returns the container's size and virtual size (e.g. "2B (virtual 21.5MB)")
|
||||
func (c *ContainerContext) Size() string {
|
||||
if c.FieldsUsed == nil {
|
||||
c.FieldsUsed = map[string]interface{}{}
|
||||
c.FieldsUsed = map[string]any{}
|
||||
}
|
||||
c.FieldsUsed["Size"] = struct{}{}
|
||||
srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3)
|
||||
|
@ -245,7 +245,7 @@ func (c *ContainerContext) Labels() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
var joinLabels []string
|
||||
joinLabels := make([]string, 0, len(c.c.Labels))
|
||||
for k, v := range c.c.Labels {
|
||||
joinLabels = append(joinLabels, k+"="+v)
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ func (c *ContainerContext) Label(name string) string {
|
|||
// If the trunc option is set, names can be truncated (ellipsized).
|
||||
func (c *ContainerContext) Mounts() string {
|
||||
var name string
|
||||
var mounts []string
|
||||
mounts := make([]string, 0, len(c.c.Mounts))
|
||||
for _, m := range c.c.Mounts {
|
||||
if m.Name == "" {
|
||||
name = m.Source
|
||||
|
@ -289,7 +289,7 @@ func (c *ContainerContext) LocalVolumes() string {
|
|||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", count)
|
||||
return strconv.Itoa(count)
|
||||
}
|
||||
|
||||
// Networks returns a comma-separated string of networks that the container is
|
||||
|
@ -299,7 +299,7 @@ func (c *ContainerContext) Networks() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
networks := []string{}
|
||||
networks := make([]string, 0, len(c.c.NetworkSettings.Networks))
|
||||
for k := range c.c.NetworkSettings.Networks {
|
||||
networks = append(networks, k)
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ func DisplayablePorts(ports []types.Port) string {
|
|||
last uint16
|
||||
}
|
||||
groupMap := make(map[string]*portGroup)
|
||||
var result []string
|
||||
var result []string //nolint:prealloc
|
||||
var hostMappings []string
|
||||
var groupMapKeys []string
|
||||
sort.Slice(ports, func(i, j int) bool {
|
||||
|
@ -331,7 +331,7 @@ func DisplayablePorts(ports []types.Port) string {
|
|||
hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
|
||||
continue
|
||||
}
|
||||
portKey = fmt.Sprintf("%s/%s", port.IP, port.Type)
|
||||
portKey = port.IP + "/" + port.Type
|
||||
}
|
||||
group := groupMap[portKey]
|
||||
|
||||
|
@ -372,7 +372,7 @@ func formGroup(key string, start, last uint16) string {
|
|||
if ip != "" {
|
||||
group = fmt.Sprintf("%s:%s->%s", ip, group, group)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", group, groupType)
|
||||
return group + "/" + groupType
|
||||
}
|
||||
|
||||
func comparePorts(i, j types.Port) bool {
|
||||
|
|
|
@ -265,7 +265,6 @@ size: 0B
|
|||
assert.Equal(t, out.String(), tc.expected)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -340,7 +339,7 @@ func TestContainerContextWriteJSON(t *testing.T) {
|
|||
{ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix, State: "running"},
|
||||
}
|
||||
expectedCreated := time.Unix(unix, 0).String()
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
expectedJSONs := []map[string]any{
|
||||
{
|
||||
"Command": "\"\"",
|
||||
"CreatedAt": expectedCreated,
|
||||
|
@ -381,7 +380,7 @@ func TestContainerContextWriteJSON(t *testing.T) {
|
|||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
msg := fmt.Sprintf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
err := json.Unmarshal([]byte(line), &m)
|
||||
assert.NilError(t, err, msg)
|
||||
assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg)
|
||||
|
|
|
@ -22,7 +22,7 @@ const (
|
|||
|
||||
// SubContext defines what Context implementation should provide
|
||||
type SubContext interface {
|
||||
FullHeader() interface{}
|
||||
FullHeader() any
|
||||
}
|
||||
|
||||
// SubHeaderContext is a map destined to formatter header (table format)
|
||||
|
@ -39,10 +39,10 @@ func (c SubHeaderContext) Label(name string) string {
|
|||
|
||||
// HeaderContext provides the subContext interface for managing headers
|
||||
type HeaderContext struct {
|
||||
Header interface{}
|
||||
Header any
|
||||
}
|
||||
|
||||
// FullHeader returns the header as an interface
|
||||
func (c *HeaderContext) FullHeader() interface{} {
|
||||
func (c *HeaderContext) FullHeader() any {
|
||||
return c.Header
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ func Ellipsis(s string, maxDisplayWidth int) string {
|
|||
}
|
||||
|
||||
var (
|
||||
display []int
|
||||
display = make([]int, 0, len(rs))
|
||||
displayWidth int
|
||||
)
|
||||
for _, r := range rs {
|
||||
|
|
|
@ -51,7 +51,7 @@ type Context struct {
|
|||
|
||||
// internal element
|
||||
finalFormat string
|
||||
header interface{}
|
||||
header any
|
||||
buffer *bytes.Buffer
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ type fakeSubContext struct {
|
|||
Name string
|
||||
}
|
||||
|
||||
func (f fakeSubContext) FullHeader() interface{} {
|
||||
func (f fakeSubContext) FullHeader() any {
|
||||
return map[string]string{"Name": "NAME"}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
|
@ -26,11 +26,11 @@ type ImageContext struct {
|
|||
Digest bool
|
||||
}
|
||||
|
||||
func isDangling(image image.Summary) bool {
|
||||
if len(image.RepoTags) == 0 && len(image.RepoDigests) == 0 {
|
||||
func isDangling(img image.Summary) bool {
|
||||
if len(img.RepoTags) == 0 && len(img.RepoDigests) == 0 {
|
||||
return true
|
||||
}
|
||||
return len(image.RepoTags) == 1 && image.RepoTags[0] == "<none>:<none>" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "<none>@<none>"
|
||||
return len(img.RepoTags) == 1 && img.RepoTags[0] == "<none>:<none>" && len(img.RepoDigests) == 1 && img.RepoDigests[0] == "<none>@<none>"
|
||||
}
|
||||
|
||||
// NewImageFormat returns a format for rendering an ImageContext
|
||||
|
@ -88,18 +88,18 @@ func needDigest(ctx ImageContext) bool {
|
|||
}
|
||||
|
||||
func imageFormat(ctx ImageContext, images []image.Summary, format func(subContext SubContext) error) error {
|
||||
for _, image := range images {
|
||||
for _, img := range images {
|
||||
formatted := []*imageContext{}
|
||||
if isDangling(image) {
|
||||
if isDangling(img) {
|
||||
formatted = append(formatted, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
i: img,
|
||||
repo: "<none>",
|
||||
tag: "<none>",
|
||||
digest: "<none>",
|
||||
})
|
||||
} else {
|
||||
formatted = imageFormatTaggedAndDigest(ctx, image)
|
||||
formatted = imageFormatTaggedAndDigest(ctx, img)
|
||||
}
|
||||
for _, imageCtx := range formatted {
|
||||
if err := format(imageCtx); err != nil {
|
||||
|
@ -110,12 +110,12 @@ func imageFormat(ctx ImageContext, images []image.Summary, format func(subContex
|
|||
return nil
|
||||
}
|
||||
|
||||
func imageFormatTaggedAndDigest(ctx ImageContext, image image.Summary) []*imageContext {
|
||||
func imageFormatTaggedAndDigest(ctx ImageContext, img image.Summary) []*imageContext {
|
||||
repoTags := map[string][]string{}
|
||||
repoDigests := map[string][]string{}
|
||||
images := []*imageContext{}
|
||||
|
||||
for _, refString := range image.RepoTags {
|
||||
for _, refString := range img.RepoTags {
|
||||
ref, err := reference.ParseNormalizedNamed(refString)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -125,7 +125,7 @@ func imageFormatTaggedAndDigest(ctx ImageContext, image image.Summary) []*imageC
|
|||
repoTags[familiarRef] = append(repoTags[familiarRef], nt.Tag())
|
||||
}
|
||||
}
|
||||
for _, refString := range image.RepoDigests {
|
||||
for _, refString := range img.RepoDigests {
|
||||
ref, err := reference.ParseNormalizedNamed(refString)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -139,7 +139,7 @@ func imageFormatTaggedAndDigest(ctx ImageContext, image image.Summary) []*imageC
|
|||
addImage := func(repo, tag, digest string) {
|
||||
images = append(images, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
i: img,
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
digest: digest,
|
||||
|
@ -166,7 +166,6 @@ func imageFormatTaggedAndDigest(ctx ImageContext, image image.Summary) []*imageC
|
|||
for _, dgst := range digests {
|
||||
addImage(repo, tag, dgst)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,7 +255,7 @@ func (c *imageContext) Containers() string {
|
|||
if c.i.Containers == -1 {
|
||||
return "N/A"
|
||||
}
|
||||
return fmt.Sprintf("%d", c.i.Containers)
|
||||
return strconv.FormatInt(c.i.Containers, 10)
|
||||
}
|
||||
|
||||
// VirtualSize shows the virtual size of the image and all of its parent
|
||||
|
|
|
@ -72,7 +72,7 @@ func TestImageContext(t *testing.T) {
|
|||
{
|
||||
imageCtx: imageContext{i: image.Summary{Size: 10000}},
|
||||
expValue: "10kB",
|
||||
call: ctx.VirtualSize, //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
|
||||
call: ctx.VirtualSize, //nolint:nolintlint,staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.44.
|
||||
},
|
||||
{
|
||||
imageCtx: imageContext{i: image.Summary{SharedSize: 10000}},
|
||||
|
@ -148,7 +148,7 @@ image tag2 imageID2 N/A 0B
|
|||
Format: NewImageFormat("table {{.Repository}}", false, false),
|
||||
},
|
||||
},
|
||||
"REPOSITORY\nimage\nimage\n<none>\n",
|
||||
"REPOSITORY\nimage\nimage\n<none>\n", //nolint:dupword // ignore "Duplicate words (image) found"
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
|
@ -169,7 +169,7 @@ image <none>
|
|||
Format: NewImageFormat("table {{.Repository}}", true, false),
|
||||
},
|
||||
},
|
||||
"REPOSITORY\nimage\nimage\n<none>\n",
|
||||
"REPOSITORY\nimage\nimage\n<none>\n", //nolint:dupword // ignore "Duplicate words (image) found"
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
|
@ -284,7 +284,7 @@ image_id: imageID3
|
|||
Format: NewImageFormat("{{.Repository}}", false, false),
|
||||
},
|
||||
},
|
||||
"image\nimage\n<none>\n",
|
||||
"image\nimage\n<none>\n", //nolint:dupword // ignore "Duplicate words (image) found"
|
||||
},
|
||||
{
|
||||
ImageContext{
|
||||
|
@ -293,7 +293,7 @@ image_id: imageID3
|
|||
},
|
||||
Digest: true,
|
||||
},
|
||||
"image\nimage\n<none>\n",
|
||||
"image\nimage\n<none>\n", //nolint:dupword // ignore "Duplicate words (image) found"
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
// MarshalJSON marshals x into json
|
||||
// It differs a bit from encoding/json MarshalJSON function for formatter
|
||||
func MarshalJSON(x interface{}) ([]byte, error) {
|
||||
func MarshalJSON(x any) ([]byte, error) {
|
||||
m, err := marshalMap(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -18,8 +18,8 @@ func MarshalJSON(x interface{}) ([]byte, error) {
|
|||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
// marshalMap marshals x to map[string]interface{}
|
||||
func marshalMap(x interface{}) (map[string]interface{}, error) {
|
||||
// marshalMap marshals x to map[string]any
|
||||
func marshalMap(x any) (map[string]any, error) {
|
||||
val := reflect.ValueOf(x)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return nil, errors.Errorf("expected a pointer to a struct, got %v", val.Kind())
|
||||
|
@ -32,7 +32,7 @@ func marshalMap(x interface{}) (map[string]interface{}, error) {
|
|||
return nil, errors.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind())
|
||||
}
|
||||
typ := val.Type()
|
||||
m := make(map[string]interface{})
|
||||
m := make(map[string]any)
|
||||
for i := 0; i < val.NumMethod(); i++ {
|
||||
k, v, err := marshalForMethod(typ.Method(i), val.Method(i))
|
||||
if err != nil {
|
||||
|
@ -49,7 +49,7 @@ var unmarshallableNames = map[string]struct{}{"FullHeader": {}}
|
|||
|
||||
// marshalForMethod returns the map key and the map value for marshalling the method.
|
||||
// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()")
|
||||
func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) {
|
||||
func marshalForMethod(typ reflect.Method, val reflect.Value) (string, any, error) {
|
||||
if val.Kind() != reflect.Func {
|
||||
return "", nil, errors.Errorf("expected func, got %v", val.Kind())
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ func (d *dummy) FullHeader() string {
|
|||
return "FullHeader(should not be marshalled)"
|
||||
}
|
||||
|
||||
var dummyExpected = map[string]interface{}{
|
||||
var dummyExpected = map[string]any{
|
||||
"Func1": "Func1",
|
||||
"Func4": 4,
|
||||
"Func5": dummyType("Func5"),
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
// based on https://github.com/golang/go/blob/master/src/text/tabwriter/tabwriter.go Last modified 690ac40 on 31 Jan
|
||||
|
||||
//nolint:gocyclo,nakedret,revive,stylecheck,unused // ignore linting errors, so that we can stick close to upstream
|
||||
//nolint:gocyclo,nakedret,stylecheck,unused // ignore linting errors, so that we can stick close to upstream
|
||||
package tabwriter
|
||||
|
||||
import (
|
||||
|
@ -202,7 +202,7 @@ const (
|
|||
//
|
||||
// minwidth minimal cell width including any padding
|
||||
// tabwidth width of tab characters (equivalent number of spaces)
|
||||
// padding padding added to a cell before computing its width
|
||||
// padding the padding added to a cell before computing its width
|
||||
// padchar ASCII char used for padding
|
||||
// if padchar == '\t', the Writer will assume that the
|
||||
// width of a '\t' in the formatted output is tabwidth,
|
||||
|
@ -576,9 +576,8 @@ func (b *Writer) Write(buf []byte) (n int, err error) {
|
|||
b.startEscape(ch)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} else if ch == b.endChar {
|
||||
// inside escape
|
||||
if ch == b.endChar {
|
||||
// end of tag/entity
|
||||
j := i + 1
|
||||
if ch == Escape && b.flags&StripEscape != 0 {
|
||||
|
@ -589,7 +588,6 @@ func (b *Writer) Write(buf []byte) (n int, err error) {
|
|||
b.endEscape()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// append leftover text
|
||||
b.append(buf[n:])
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -36,6 +37,7 @@ func (b *buffer) Write(buf []byte) (written int, err error) {
|
|||
func (b *buffer) String() string { return string(b.a) }
|
||||
|
||||
func write(t *testing.T, testname string, w *Writer, src string) {
|
||||
t.Helper()
|
||||
written, err := io.WriteString(w, src)
|
||||
if err != nil {
|
||||
t.Errorf("--- test: %s\n--- src:\n%q\n--- write error: %v\n", testname, src, err)
|
||||
|
@ -46,6 +48,7 @@ func write(t *testing.T, testname string, w *Writer, src string) {
|
|||
}
|
||||
|
||||
func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected string) {
|
||||
t.Helper()
|
||||
err := w.Flush()
|
||||
if err != nil {
|
||||
t.Errorf("--- test: %s\n--- src:\n%q\n--- flush error: %v\n", testname, src, err)
|
||||
|
@ -58,6 +61,7 @@ func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected s
|
|||
}
|
||||
|
||||
func check(t *testing.T, testname string, minwidth, tabwidth, padding int, padchar byte, flags uint, src, expected string) {
|
||||
t.Helper()
|
||||
var b buffer
|
||||
b.init(1000)
|
||||
|
||||
|
@ -622,6 +626,7 @@ func (panicWriter) Write([]byte) (int, error) {
|
|||
}
|
||||
|
||||
func wantPanicString(t *testing.T, want string) {
|
||||
t.Helper()
|
||||
if e := recover(); e != nil {
|
||||
got, ok := e.(string)
|
||||
switch {
|
||||
|
@ -691,7 +696,7 @@ func BenchmarkPyramid(b *testing.B) {
|
|||
for _, x := range [...]int{10, 100, 1000} {
|
||||
// Build a line with x cells.
|
||||
line := bytes.Repeat([]byte("a\t"), x)
|
||||
b.Run(fmt.Sprintf("%d", x), func(b *testing.B) {
|
||||
b.Run(strconv.Itoa(x), func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
w := NewWriter(io.Discard, 4, 4, 1, ' ', 0) // no particular reason for these settings
|
||||
|
@ -713,7 +718,7 @@ func BenchmarkRagged(b *testing.B) {
|
|||
lines[i] = bytes.Repeat([]byte("a\t"), w)
|
||||
}
|
||||
for _, h := range [...]int{10, 100, 1000} {
|
||||
b.Run(fmt.Sprintf("%d", h), func(b *testing.B) {
|
||||
b.Run(strconv.Itoa(h), func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
w := NewWriter(io.Discard, 4, 4, 1, ' ', 0) // no particular reason for these settings
|
||||
|
|
|
@ -100,7 +100,7 @@ func (c *volumeContext) Labels() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
var joinLabels []string
|
||||
joinLabels := make([]string, 0, len(c.v.Labels))
|
||||
for k, v := range c.v.Labels {
|
||||
joinLabels = append(joinLabels, k+"="+v)
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ func TestVolumeContextWriteJSON(t *testing.T) {
|
|||
{Driver: "foo", Name: "foobar_baz"},
|
||||
{Driver: "bar", Name: "foobar_bar"},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
expectedJSONs := []map[string]any{
|
||||
{"Availability": "N/A", "Driver": "foo", "Group": "N/A", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A", "Status": "N/A"},
|
||||
{"Availability": "N/A", "Driver": "bar", "Group": "N/A", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A", "Status": "N/A"},
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ func TestVolumeContextWriteJSON(t *testing.T) {
|
|||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
msg := fmt.Sprintf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
err := json.Unmarshal([]byte(line), &m)
|
||||
assert.NilError(t, err, msg)
|
||||
assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg)
|
||||
|
|
|
@ -17,20 +17,21 @@ type IDResolver struct {
|
|||
}
|
||||
|
||||
// New creates a new IDResolver.
|
||||
func New(client client.APIClient, noResolve bool) *IDResolver {
|
||||
func New(apiClient client.APIClient, noResolve bool) *IDResolver {
|
||||
return &IDResolver{
|
||||
client: client,
|
||||
client: apiClient,
|
||||
noResolve: noResolve,
|
||||
cache: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) {
|
||||
func (r *IDResolver) get(ctx context.Context, t any, id string) (string, error) {
|
||||
switch t.(type) {
|
||||
case swarm.Node:
|
||||
node, _, err := r.client.NodeInspectWithRaw(ctx, id)
|
||||
if err != nil {
|
||||
return id, nil
|
||||
// TODO(thaJeztah): should error-handling be more specific, or is it ok to ignore any error?
|
||||
return id, nil //nolint:nilerr // ignore nil-error being returned, as this is a best-effort.
|
||||
}
|
||||
if node.Spec.Annotations.Name != "" {
|
||||
return node.Spec.Annotations.Name, nil
|
||||
|
@ -42,7 +43,8 @@ func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string,
|
|||
case swarm.Service:
|
||||
service, _, err := r.client.ServiceInspectWithRaw(ctx, id, types.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
return id, nil
|
||||
// TODO(thaJeztah): should error-handling be more specific, or is it ok to ignore any error?
|
||||
return id, nil //nolint:nilerr // ignore nil-error being returned, as this is a best-effort.
|
||||
}
|
||||
return service.Spec.Annotations.Name, nil
|
||||
default:
|
||||
|
@ -53,7 +55,7 @@ func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string,
|
|||
// Resolve will attempt to resolve an ID to a Name by querying the manager.
|
||||
// Results are stored into a cache.
|
||||
// If the `-n` flag is used in the command-line, resolution is disabled.
|
||||
func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) {
|
||||
func (r *IDResolver) Resolve(ctx context.Context, t any, id string) (string, error) {
|
||||
if r.noResolve {
|
||||
return id, nil
|
||||
}
|
||||
|
|
|
@ -23,16 +23,19 @@ func prepareEmpty(_ *testing.T) string {
|
|||
}
|
||||
|
||||
func prepareNoFiles(t *testing.T) string {
|
||||
t.Helper()
|
||||
return createTestTempDir(t)
|
||||
}
|
||||
|
||||
func prepareOneFile(t *testing.T) string {
|
||||
t.Helper()
|
||||
contextDir := createTestTempDir(t)
|
||||
createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents)
|
||||
return contextDir
|
||||
}
|
||||
|
||||
func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) string, excludes []string) {
|
||||
t.Helper()
|
||||
contextDir := prepare(t)
|
||||
err := ValidateContextDirectory(contextDir, excludes)
|
||||
assert.NilError(t, err)
|
||||
|
@ -250,6 +253,7 @@ func createTestTempFile(t *testing.T, dir, filename, contents string) string {
|
|||
// This function is meant to be executed as a deferred call.
|
||||
// When an error occurs, it terminates the test.
|
||||
func chdir(t *testing.T, dir string) {
|
||||
t.Helper()
|
||||
workingDirectory, err := os.Getwd()
|
||||
assert.NilError(t, err)
|
||||
assert.NilError(t, os.Chdir(dir))
|
||||
|
|
|
@ -25,8 +25,8 @@ func TestRunBuildDockerfileFromStdinWithCompress(t *testing.T) {
|
|||
t.Setenv("DOCKER_BUILDKIT", "0")
|
||||
buffer := new(bytes.Buffer)
|
||||
fakeBuild := newFakeBuild()
|
||||
fakeImageBuild := func(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
tee := io.TeeReader(context, buffer)
|
||||
fakeImageBuild := func(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
tee := io.TeeReader(buildContext, buffer)
|
||||
gzipReader, err := gzip.NewReader(tee)
|
||||
assert.NilError(t, err)
|
||||
return fakeBuild.build(ctx, gzipReader, options)
|
||||
|
@ -184,8 +184,8 @@ func newFakeBuild() *fakeBuild {
|
|||
return &fakeBuild{}
|
||||
}
|
||||
|
||||
func (f *fakeBuild) build(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
f.context = tar.NewReader(context)
|
||||
func (f *fakeBuild) build(_ context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
f.context = tar.NewReader(buildContext)
|
||||
f.options = options
|
||||
body := new(bytes.Buffer)
|
||||
return types.ImageBuildResponse{Body: io.NopCloser(body)}, nil
|
||||
|
|
|
@ -30,9 +30,9 @@ type fakeClient struct {
|
|||
imageBuildFunc func(context.Context, io.Reader, types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageTag(_ context.Context, image, ref string) error {
|
||||
func (cli *fakeClient) ImageTag(_ context.Context, img, ref string) error {
|
||||
if cli.imageTagFunc != nil {
|
||||
return cli.imageTagFunc(image, ref)
|
||||
return cli.imageTagFunc(img, ref)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -95,9 +95,9 @@ func (cli *fakeClient) ImageList(_ context.Context, options types.ImageListOptio
|
|||
return []image.Summary{}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageInspectWithRaw(_ context.Context, image string) (types.ImageInspect, []byte, error) {
|
||||
func (cli *fakeClient) ImageInspectWithRaw(_ context.Context, img string) (types.ImageInspect, []byte, error) {
|
||||
if cli.imageInspectFunc != nil {
|
||||
return cli.imageInspectFunc(image)
|
||||
return cli.imageInspectFunc(img)
|
||||
}
|
||||
return types.ImageInspect{}, nil, nil
|
||||
}
|
||||
|
@ -118,9 +118,9 @@ func (cli *fakeClient) ImageHistory(_ context.Context, img string) ([]image.Hist
|
|||
return []image.HistoryResponseItem{{ID: img, Created: time.Now().Unix()}}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
func (cli *fakeClient) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
if cli.imageBuildFunc != nil {
|
||||
return cli.imageBuildFunc(ctx, context, options)
|
||||
return cli.imageBuildFunc(ctx, buildContext, options)
|
||||
}
|
||||
return types.ImageBuildResponse{Body: io.NopCloser(strings.NewReader(""))}, nil
|
||||
}
|
||||
|
|
|
@ -22,8 +22,7 @@ const (
|
|||
|
||||
// NewHistoryFormat returns a format for rendering an HistoryContext
|
||||
func NewHistoryFormat(source string, quiet bool, human bool) formatter.Format {
|
||||
switch source {
|
||||
case formatter.TableFormatKey:
|
||||
if source == formatter.TableFormatKey {
|
||||
switch {
|
||||
case quiet:
|
||||
return formatter.DefaultQuietFormat
|
||||
|
@ -97,7 +96,7 @@ func (c *historyContext) CreatedSince() string {
|
|||
}
|
||||
|
||||
func (c *historyContext) CreatedBy() string {
|
||||
createdBy := strings.Replace(c.h.CreatedBy, "\t", " ", -1)
|
||||
createdBy := strings.ReplaceAll(c.h.CreatedBy, "\t", " ")
|
||||
if c.trunc {
|
||||
return formatter.Ellipsis(createdBy, 45)
|
||||
}
|
||||
|
|
|
@ -213,6 +213,7 @@ func TestHistoryContext_Table(t *testing.T) {
|
|||
{ID: "imageID6", Created: oldDate, CreatedBy: "/bin/bash echo", Size: int64(182964289), Comment: "Hi", Tags: []string{"image:tag2"}},
|
||||
}
|
||||
|
||||
//nolint:dupword // ignore "Duplicate words (CREATED) found"
|
||||
const expectedNoTrunc = `IMAGE CREATED CREATED BY SIZE COMMENT
|
||||
imageID1 24 hours ago /bin/bash ls && npm i && npm run test && karma -c karma.conf.js start && npm start && more commands here && the list goes on 183MB Hi
|
||||
imageID2 24 hours ago /bin/bash echo 183MB Hi
|
||||
|
@ -221,6 +222,7 @@ imageID4 24 hours ago /bin/bash grep
|
|||
imageID5 N/A /bin/bash echo 183MB Hi
|
||||
imageID6 17 years ago /bin/bash echo 183MB Hi
|
||||
`
|
||||
//nolint:dupword // ignore "Duplicate words (CREATED) found"
|
||||
const expectedTrunc = `IMAGE CREATED CREATED BY SIZE COMMENT
|
||||
imageID1 24 hours ago /bin/bash ls && npm i && npm run test && kar… 183MB Hi
|
||||
imageID2 24 hours ago /bin/bash echo 183MB Hi
|
||||
|
|
|
@ -38,7 +38,7 @@ func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
|||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
getRefFunc := func(ref string) (interface{}, []byte, error) {
|
||||
getRefFunc := func(ref string) (any, []byte, error) {
|
||||
return client.ImageInspectWithRaw(ctx, ref)
|
||||
}
|
||||
return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc)
|
||||
|
|
|
@ -3,6 +3,7 @@ package image
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
|
@ -59,7 +60,7 @@ Are you sure you want to continue?`
|
|||
|
||||
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
|
||||
pruneFilters := options.filter.Value().Clone()
|
||||
pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all))
|
||||
pruneFilters.Add("dangling", strconv.FormatBool(!options.all))
|
||||
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
||||
|
||||
warning := danglingWarning
|
||||
|
|
|
@ -54,7 +54,7 @@ func NewPullCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
// RunPull performs a pull against the engine based on the specified options
|
||||
func RunPull(cli command.Cli, opts PullOptions) error {
|
||||
func RunPull(dockerCLI command.Cli, opts PullOptions) error {
|
||||
distributionRef, err := reference.ParseNormalizedNamed(opts.remote)
|
||||
switch {
|
||||
case err != nil:
|
||||
|
@ -64,12 +64,12 @@ func RunPull(cli command.Cli, opts PullOptions) error {
|
|||
case !opts.all && reference.IsNameOnly(distributionRef):
|
||||
distributionRef = reference.TagNameOnly(distributionRef)
|
||||
if tagged, ok := distributionRef.(reference.Tagged); ok && !opts.quiet {
|
||||
fmt.Fprintf(cli.Out(), "Using default tag: %s\n", tagged.Tag())
|
||||
fmt.Fprintf(dockerCLI.Out(), "Using default tag: %s\n", tagged.Tag())
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(cli), distributionRef.String())
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(dockerCLI), distributionRef.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -77,9 +77,9 @@ func RunPull(cli command.Cli, opts PullOptions) error {
|
|||
// Check if reference has a digest
|
||||
_, isCanonical := distributionRef.(reference.Canonical)
|
||||
if !opts.untrusted && !isCanonical {
|
||||
err = trustedPull(ctx, cli, imgRefAndAuth, opts)
|
||||
err = trustedPull(ctx, dockerCLI, imgRefAndAuth, opts)
|
||||
} else {
|
||||
err = imagePullPrivileged(ctx, cli, imgRefAndAuth, opts)
|
||||
err = imagePullPrivileged(ctx, dockerCLI, imgRefAndAuth, opts)
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "when fetching 'plugin'") {
|
||||
|
@ -87,6 +87,6 @@ func RunPull(cli command.Cli, opts PullOptions) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(cli.Out(), imgRefAndAuth.Reference().String())
|
||||
fmt.Fprintln(dockerCLI.Out(), imgRefAndAuth.Reference().String())
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ func TrustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.Reposi
|
|||
// PushTrustedReference pushes a canonical reference to the trust server.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig registrytypes.AuthConfig, in io.Reader) error {
|
||||
func PushTrustedReference(ioStreams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig registrytypes.AuthConfig, in io.Reader) error {
|
||||
// If it is a trusted push we would like to find the target entry which match the
|
||||
// tag provided in the function and then do an AddTarget later.
|
||||
target := &client.Target{}
|
||||
|
@ -83,14 +83,14 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
default:
|
||||
// We want trust signatures to always take an explicit tag,
|
||||
// otherwise it will act as an untrusted push.
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil {
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, ioStreams.Out(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(streams.Err(), "No tag specified, skipping trust metadata push")
|
||||
fmt.Fprintln(ioStreams.Err(), "No tag specified, skipping trust metadata push")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil {
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, ioStreams.Out(), handleTarget); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -102,9 +102,9 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
return errors.Errorf("no targets found, please provide a specific tag in order to sign it")
|
||||
}
|
||||
|
||||
fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata")
|
||||
fmt.Fprintln(ioStreams.Out(), "Signing and pushing trust metadata")
|
||||
|
||||
repo, err := trust.GetNotaryRepository(streams.In(), streams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull")
|
||||
repo, err := trust.GetNotaryRepository(ioStreams.In(), ioStreams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil {
|
||||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name())
|
||||
fmt.Fprintf(ioStreams.Out(), "Finished initializing %q\n", repoInfo.Name.Name())
|
||||
err = repo.AddTarget(target, data.CanonicalTargetsRole)
|
||||
case nil:
|
||||
// already initialized and we have successfully downloaded the latest metadata
|
||||
|
@ -150,7 +150,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(streams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag)
|
||||
fmt.Fprintf(ioStreams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
// Inspector defines an interface to implement to process elements
|
||||
type Inspector interface {
|
||||
// Inspect writes the raw element in JSON format.
|
||||
Inspect(typedElement interface{}, rawElement []byte) error
|
||||
Inspect(typedElement any, rawElement []byte) error
|
||||
// Flush writes the result of inspecting all elements into the output stream.
|
||||
Flush() error
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, e
|
|||
|
||||
// GetRefFunc is a function which used by Inspect to fetch an object from a
|
||||
// reference
|
||||
type GetRefFunc func(ref string) (interface{}, []byte, error)
|
||||
type GetRefFunc func(ref string) (any, []byte, error)
|
||||
|
||||
// Inspect fetches objects by reference using GetRefFunc and writes the json
|
||||
// representation to the output writer.
|
||||
|
@ -96,7 +96,7 @@ func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFu
|
|||
// Inspect executes the inspect template.
|
||||
// It decodes the raw element into a map if the initial execution fails.
|
||||
// This allows docker cli to parse inspect structs injected with Swarm fields.
|
||||
func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error {
|
||||
func (i *TemplateInspector) Inspect(typedElement any, rawElement []byte) error {
|
||||
buffer := new(bytes.Buffer)
|
||||
if err := i.tmpl.Execute(buffer, typedElement); err != nil {
|
||||
if rawElement == nil {
|
||||
|
@ -112,7 +112,7 @@ func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte)
|
|||
// tryRawInspectFallback executes the inspect template with a raw interface.
|
||||
// This allows docker cli to parse inspect structs injected with Swarm fields.
|
||||
func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error {
|
||||
var raw interface{}
|
||||
var raw any
|
||||
buffer := new(bytes.Buffer)
|
||||
rdr := bytes.NewReader(rawElement)
|
||||
dec := json.NewDecoder(rdr)
|
||||
|
@ -150,7 +150,7 @@ func NewIndentedInspector(outputStream io.Writer) Inspector {
|
|||
raw: func(dst *bytes.Buffer, src []byte) error {
|
||||
return json.Indent(dst, src, "", " ")
|
||||
},
|
||||
el: func(v interface{}) ([]byte, error) {
|
||||
el: func(v any) ([]byte, error) {
|
||||
return json.MarshalIndent(v, "", " ")
|
||||
},
|
||||
}
|
||||
|
@ -168,13 +168,13 @@ func NewJSONInspector(outputStream io.Writer) Inspector {
|
|||
|
||||
type elementsInspector struct {
|
||||
outputStream io.Writer
|
||||
elements []interface{}
|
||||
elements []any
|
||||
rawElements [][]byte
|
||||
raw func(dst *bytes.Buffer, src []byte) error
|
||||
el func(v interface{}) ([]byte, error)
|
||||
el func(v any) ([]byte, error)
|
||||
}
|
||||
|
||||
func (e *elementsInspector) Inspect(typedElement interface{}, rawElement []byte) error {
|
||||
func (e *elementsInspector) Inspect(typedElement any, rawElement []byte) error {
|
||||
if rawElement != nil {
|
||||
e.rawElements = append(e.rawElements, rawElement)
|
||||
} else {
|
||||
|
|
|
@ -40,13 +40,13 @@ func TestManifestAnnotateError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManifestAnnotate(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
imageManifest := fullImageManifest(t, namedRef)
|
||||
err := store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err := manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newAnnotateCommand(cli)
|
||||
|
|
|
@ -41,18 +41,18 @@ func TestManifestCreateErrors(t *testing.T) {
|
|||
|
||||
// create a manifest list, then overwrite it, and inspect to see if the old one is still there
|
||||
func TestManifestCreateAmend(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
imageManifest := fullImageManifest(t, namedRef)
|
||||
err := store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err := manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
namedRef = ref(t, "alpine:3.1")
|
||||
imageManifest = fullImageManifest(t, namedRef)
|
||||
err = store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err = manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newCreateListCommand(cli)
|
||||
|
@ -64,7 +64,7 @@ func TestManifestCreateAmend(t *testing.T) {
|
|||
|
||||
// make a new cli to clear the buffers
|
||||
cli = test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
inspectCmd := newInspectCommand(cli)
|
||||
inspectCmd.SetArgs([]string{"example.com/list:v1"})
|
||||
assert.NilError(t, inspectCmd.Execute())
|
||||
|
@ -75,13 +75,13 @@ func TestManifestCreateAmend(t *testing.T) {
|
|||
|
||||
// attempt to overwrite a saved manifest and get refused
|
||||
func TestManifestCreateRefuseAmend(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
imageManifest := fullImageManifest(t, namedRef)
|
||||
err := store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err := manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newCreateListCommand(cli)
|
||||
|
@ -93,10 +93,10 @@ func TestManifestCreateRefuseAmend(t *testing.T) {
|
|||
|
||||
// attempt to make a manifest list without valid images
|
||||
func TestManifestCreateNoManifest(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
cli.SetRegistryClient(&fakeRegistryClient{
|
||||
getManifestFunc: func(_ context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
|
||||
return manifesttypes.ImageManifest{}, errors.Errorf("No such image: %v", ref)
|
||||
|
|
|
@ -20,12 +20,14 @@ import (
|
|||
)
|
||||
|
||||
func ref(t *testing.T, name string) reference.Named {
|
||||
t.Helper()
|
||||
named, err := reference.ParseNamed("example.com/" + name)
|
||||
assert.NilError(t, err)
|
||||
return named
|
||||
}
|
||||
|
||||
func fullImageManifest(t *testing.T, ref reference.Named) types.ImageManifest {
|
||||
t.Helper()
|
||||
man, err := schema2.FromStruct(schema2.Manifest{
|
||||
Versioned: schema2.SchemaVersion,
|
||||
Config: distribution.Descriptor{
|
||||
|
@ -61,10 +63,10 @@ func fullImageManifest(t *testing.T, ref reference.Named) types.ImageManifest {
|
|||
}
|
||||
|
||||
func TestInspectCommandLocalManifestNotFound(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
refStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(refStore)
|
||||
|
||||
cmd := newInspectCommand(cli)
|
||||
cmd.SetOut(io.Discard)
|
||||
|
@ -74,10 +76,10 @@ func TestInspectCommandLocalManifestNotFound(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInspectCommandNotFound(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
refStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(refStore)
|
||||
cli.SetRegistryClient(&fakeRegistryClient{
|
||||
getManifestFunc: func(_ context.Context, _ reference.Named) (types.ImageManifest, error) {
|
||||
return types.ImageManifest{}, errors.New("missing")
|
||||
|
@ -95,13 +97,13 @@ func TestInspectCommandNotFound(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInspectCommandLocalManifest(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
refStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(refStore)
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
imageManifest := fullImageManifest(t, namedRef)
|
||||
err := store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err := refStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newInspectCommand(cli)
|
||||
|
@ -113,10 +115,10 @@ func TestInspectCommandLocalManifest(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInspectcommandRemoteManifest(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
refStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(refStore)
|
||||
cli.SetRegistryClient(&fakeRegistryClient{
|
||||
getManifestFunc: func(_ context.Context, ref reference.Named) (types.ImageManifest, error) {
|
||||
return fullImageManifest(t, ref), nil
|
||||
|
|
|
@ -77,13 +77,13 @@ func runPush(dockerCli command.Cli, opts pushOpts) error {
|
|||
return errors.Errorf("%s not found", targetRef)
|
||||
}
|
||||
|
||||
pushRequest, err := buildPushRequest(manifests, targetRef, opts.insecure)
|
||||
req, err := buildPushRequest(manifests, targetRef, opts.insecure)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if err := pushList(ctx, dockerCli, pushRequest); err != nil {
|
||||
if err := pushList(ctx, dockerCli, req); err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.purge {
|
||||
|
@ -192,9 +192,9 @@ func buildManifestDescriptor(targetRepo *registry.RepositoryInfo, imageManifest
|
|||
}
|
||||
|
||||
func buildBlobRequestList(imageManifest types.ImageManifest, repoName reference.Named) ([]manifestBlob, error) {
|
||||
var blobReqs []manifestBlob
|
||||
|
||||
for _, blobDigest := range imageManifest.Blobs() {
|
||||
blobs := imageManifest.Blobs()
|
||||
blobReqs := make([]manifestBlob, 0, len(blobs))
|
||||
for _, blobDigest := range blobs {
|
||||
canonical, err := reference.WithDigest(repoName, blobDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -49,17 +49,17 @@ func TestManifestPushErrors(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManifestPush(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
registry := newFakeRegistryClient()
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
cli.SetRegistryClient(registry)
|
||||
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
imageManifest := fullImageManifest(t, namedRef)
|
||||
err := store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err := manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newPushListCommand(cli)
|
||||
|
|
|
@ -11,22 +11,22 @@ import (
|
|||
|
||||
// create two manifest lists and remove them both
|
||||
func TestRmSeveralManifests(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
|
||||
list1 := ref(t, "first:1")
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
err := store.Save(list1, namedRef, fullImageManifest(t, namedRef))
|
||||
err := manifestStore.Save(list1, namedRef, fullImageManifest(t, namedRef))
|
||||
assert.NilError(t, err)
|
||||
namedRef = ref(t, "alpine:3.1")
|
||||
err = store.Save(list1, namedRef, fullImageManifest(t, namedRef))
|
||||
err = manifestStore.Save(list1, namedRef, fullImageManifest(t, namedRef))
|
||||
assert.NilError(t, err)
|
||||
|
||||
list2 := ref(t, "second:2")
|
||||
namedRef = ref(t, "alpine:3.2")
|
||||
err = store.Save(list2, namedRef, fullImageManifest(t, namedRef))
|
||||
err = manifestStore.Save(list2, namedRef, fullImageManifest(t, namedRef))
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newRmManifestListCommand(cli)
|
||||
|
@ -43,14 +43,14 @@ func TestRmSeveralManifests(t *testing.T) {
|
|||
|
||||
// attempt to remove a manifest list which was never created
|
||||
func TestRmManifestNotCreated(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
|
||||
list2 := ref(t, "second:2")
|
||||
namedRef := ref(t, "alpine:3.2")
|
||||
err := store.Save(list2, namedRef, fullImageManifest(t, namedRef))
|
||||
err := manifestStore.Save(list2, namedRef, fullImageManifest(t, namedRef))
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newRmManifestListCommand(cli)
|
||||
|
|
|
@ -78,9 +78,9 @@ func runConnect(dockerCli command.Cli, options connectOptions) error {
|
|||
return client.NetworkConnect(context.Background(), options.network, options.container, epConfig)
|
||||
}
|
||||
|
||||
func convertDriverOpt(opts []string) (map[string]string, error) {
|
||||
func convertDriverOpt(options []string) (map[string]string, error) {
|
||||
driverOpt := make(map[string]string)
|
||||
for _, opt := range opts {
|
||||
for _, opt := range options {
|
||||
k, v, ok := strings.Cut(opt, "=")
|
||||
// TODO(thaJeztah): we should probably not accept whitespace here (both for key and value).
|
||||
k = strings.TrimSpace(k)
|
||||
|
|
|
@ -39,7 +39,6 @@ func TestNetworkConnectErrors(t *testing.T) {
|
|||
cmd.SetArgs(tc.args)
|
||||
cmd.SetOut(io.Discard)
|
||||
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,6 @@ func TestNetworkCreateErrors(t *testing.T) {
|
|||
}
|
||||
cmd.SetOut(io.Discard)
|
||||
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
|
@ -89,11 +89,11 @@ func (c *networkContext) Scope() string {
|
|||
}
|
||||
|
||||
func (c *networkContext) IPv6() string {
|
||||
return fmt.Sprintf("%v", c.n.EnableIPv6)
|
||||
return strconv.FormatBool(c.n.EnableIPv6)
|
||||
}
|
||||
|
||||
func (c *networkContext) Internal() string {
|
||||
return fmt.Sprintf("%v", c.n.Internal)
|
||||
return strconv.FormatBool(c.n.Internal)
|
||||
}
|
||||
|
||||
func (c *networkContext) Labels() string {
|
||||
|
@ -101,7 +101,7 @@ func (c *networkContext) Labels() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
var joinLabels []string
|
||||
joinLabels := make([]string, 0, len(c.n.Labels))
|
||||
for k, v := range c.n.Labels {
|
||||
joinLabels = append(joinLabels, k+"="+v)
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ func TestNetworkContextWriteJSON(t *testing.T) {
|
|||
{ID: "networkID1", Name: "foobar_baz"},
|
||||
{ID: "networkID2", Name: "foobar_bar"},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
expectedJSONs := []map[string]any{
|
||||
{"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"},
|
||||
{"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": "", "CreatedAt": "0001-01-01 00:00:00 +0000 UTC"},
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ func TestNetworkContextWriteJSON(t *testing.T) {
|
|||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
msg := fmt.Sprintf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
err := json.Unmarshal([]byte(line), &m)
|
||||
assert.NilError(t, err, msg)
|
||||
assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg)
|
||||
|
|
|
@ -43,7 +43,7 @@ func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
|||
|
||||
ctx := context.Background()
|
||||
|
||||
getNetFunc := func(name string) (interface{}, []byte, error) {
|
||||
getNetFunc := func(name string) (any, []byte, error) {
|
||||
return client.NetworkInspectWithRaw(ctx, name, types.NetworkInspectOptions{Verbose: opts.verbose})
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,6 @@ func TestNetworkRemoveForce(t *testing.T) {
|
|||
} else {
|
||||
assert.Check(t, is.Contains(fakeCli.ErrBuffer().String(), tc.expectedErr))
|
||||
assert.ErrorContains(t, err, "Code: 1")
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -216,11 +216,11 @@ foobar_boo Unknown
|
|||
|
||||
func TestNodeContextWriteJSON(t *testing.T) {
|
||||
cases := []struct {
|
||||
expected []map[string]interface{}
|
||||
expected []map[string]any
|
||||
info system.Info
|
||||
}{
|
||||
{
|
||||
expected: []map[string]interface{}{
|
||||
expected: []map[string]any{
|
||||
{"Availability": "", "Hostname": "foobar_baz", "ID": "nodeID1", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "1.2.3"},
|
||||
{"Availability": "", "Hostname": "foobar_bar", "ID": "nodeID2", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": ""},
|
||||
{"Availability": "", "Hostname": "foobar_boo", "ID": "nodeID3", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "18.03.0-ce"},
|
||||
|
@ -228,7 +228,7 @@ func TestNodeContextWriteJSON(t *testing.T) {
|
|||
info: system.Info{},
|
||||
},
|
||||
{
|
||||
expected: []map[string]interface{}{
|
||||
expected: []map[string]any{
|
||||
{"Availability": "", "Hostname": "foobar_baz", "ID": "nodeID1", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Ready", "EngineVersion": "1.2.3"},
|
||||
{"Availability": "", "Hostname": "foobar_bar", "ID": "nodeID2", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Needs Rotation", "EngineVersion": ""},
|
||||
{"Availability": "", "Hostname": "foobar_boo", "ID": "nodeID3", "ManagerStatus": "", "Status": "", "Self": false, "TLSStatus": "Unknown", "EngineVersion": "18.03.0-ce"},
|
||||
|
@ -257,7 +257,7 @@ func TestNodeContextWriteJSON(t *testing.T) {
|
|||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
msg := fmt.Sprintf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
err := json.Unmarshal([]byte(line), &m)
|
||||
assert.NilError(t, err, msg)
|
||||
assert.Check(t, is.DeepEqual(testcase.expected[i], m), msg)
|
||||
|
@ -319,7 +319,7 @@ func TestNodeInspectWriteContext(t *testing.T) {
|
|||
Format: NewFormat("pretty", false),
|
||||
Output: out,
|
||||
}
|
||||
err := InspectFormatWrite(context, []string{"nodeID1"}, func(string) (interface{}, []byte, error) {
|
||||
err := InspectFormatWrite(context, []string{"nodeID1"}, func(string) (any, []byte, error) {
|
||||
return node, nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -45,7 +45,7 @@ func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
|||
opts.format = "pretty"
|
||||
}
|
||||
|
||||
getRef := func(ref string) (interface{}, []byte, error) {
|
||||
getRef := func(ref string) (any, []byte, error) {
|
||||
nodeRef, err := Reference(ctx, client, ref)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
|
|
@ -76,8 +76,8 @@ func (c *pluginContext) Name() string {
|
|||
}
|
||||
|
||||
func (c *pluginContext) Description() string {
|
||||
desc := strings.Replace(c.p.Config.Description, "\n", "", -1)
|
||||
desc = strings.Replace(desc, "\r", "", -1)
|
||||
desc := strings.ReplaceAll(c.p.Config.Description, "\n", "")
|
||||
desc = strings.ReplaceAll(desc, "\r", "")
|
||||
if c.trunc {
|
||||
desc = formatter.Ellipsis(desc, 45)
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ func TestPluginContextWriteJSON(t *testing.T) {
|
|||
{ID: "pluginID1", Name: "foobar_baz"},
|
||||
{ID: "pluginID2", Name: "foobar_bar"},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
expectedJSONs := []map[string]any{
|
||||
{"Description": "", "Enabled": false, "ID": "pluginID1", "Name": "foobar_baz", "PluginReference": ""},
|
||||
{"Description": "", "Enabled": false, "ID": "pluginID2", "Name": "foobar_bar", "PluginReference": ""},
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ func TestPluginContextWriteJSON(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func newInspectCommand(dockerCli command.Cli) *cobra.Command {
|
|||
func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
||||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
getRef := func(ref string) (interface{}, []byte, error) {
|
||||
getRef := func(ref string) (any, []byte, error) {
|
||||
return client.PluginInspectWithRaw(ctx, ref)
|
||||
}
|
||||
|
||||
|
|
|
@ -64,8 +64,8 @@ func (c *searchContext) Name() string {
|
|||
}
|
||||
|
||||
func (c *searchContext) Description() string {
|
||||
desc := strings.Replace(c.s.Description, "\n", " ", -1)
|
||||
desc = strings.Replace(desc, "\r", " ", -1)
|
||||
desc := strings.ReplaceAll(c.s.Description, "\n", " ")
|
||||
desc = strings.ReplaceAll(desc, "\r", " ")
|
||||
if c.trunc {
|
||||
desc = formatter.Ellipsis(desc, 45)
|
||||
}
|
||||
|
@ -97,5 +97,5 @@ func (c *searchContext) IsOfficial() string {
|
|||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false" in the future.
|
||||
func (c *searchContext) IsAutomated() string {
|
||||
return c.formatBool(c.s.IsAutomated) //nolint:staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
return c.formatBool(c.s.IsAutomated) //nolint:nolintlint,staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
}
|
||||
|
|
|
@ -47,16 +47,16 @@ func TestSearchContext(t *testing.T) {
|
|||
},
|
||||
{
|
||||
searchCtx: searchContext{
|
||||
s: registrytypes.SearchResult{IsAutomated: true}, //nolint:staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
s: registrytypes.SearchResult{IsAutomated: true}, //nolint:nolintlint,staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
},
|
||||
expValue: "[OK]",
|
||||
call: ctx.IsAutomated, //nolint:staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
call: ctx.IsAutomated, //nolint:nolintlint,staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
},
|
||||
{
|
||||
searchCtx: searchContext{
|
||||
s: registrytypes.SearchResult{},
|
||||
},
|
||||
call: ctx.IsAutomated, //nolint:staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
call: ctx.IsAutomated, //nolint:nolintlint,staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ result2 5
|
|||
|
||||
results := []registrytypes.SearchResult{
|
||||
{Name: "result1", Description: "Official build", StarCount: 5000, IsOfficial: true},
|
||||
{Name: "result2", Description: "Not official", StarCount: 5, IsAutomated: true}, //nolint:staticcheck // ignore SA1019 (IsAutomated is deprecated).
|
||||
{Name: "result2", Description: "Not official", StarCount: 5, IsAutomated: true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
|
|
@ -108,7 +108,7 @@ func (c *secretContext) Labels() string {
|
|||
if mapLabels == nil {
|
||||
return ""
|
||||
}
|
||||
var joinLabels []string
|
||||
joinLabels := make([]string, 0, len(mapLabels))
|
||||
for k, v := range mapLabels {
|
||||
joinLabels = append(joinLabels, k+"="+v)
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ func runSecretInspect(dockerCli command.Cli, opts inspectOptions) error {
|
|||
opts.format = "pretty"
|
||||
}
|
||||
|
||||
getRef := func(id string) (interface{}, []byte, error) {
|
||||
getRef := func(id string) (any, []byte, error) {
|
||||
return client.SecretInspectWithRaw(ctx, id)
|
||||
}
|
||||
f := opts.format
|
||||
|
|
|
@ -3,6 +3,7 @@ package service
|
|||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -345,13 +346,13 @@ func (ctx *serviceInspectContext) TaskPlacementPreferences() []string {
|
|||
if ctx.Service.Spec.TaskTemplate.Placement == nil {
|
||||
return nil
|
||||
}
|
||||
var strings []string
|
||||
var out []string
|
||||
for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences {
|
||||
if pref.Spread != nil {
|
||||
strings = append(strings, "spread="+pref.Spread.SpreadDescriptor)
|
||||
out = append(out, "spread="+pref.Spread.SpreadDescriptor)
|
||||
}
|
||||
}
|
||||
return strings
|
||||
return out
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) MaxReplicas() uint64 {
|
||||
|
@ -742,12 +743,12 @@ func (pr portRange) String() string {
|
|||
if pr.pEnd > pr.pStart {
|
||||
pub = fmt.Sprintf("%d-%d", pr.pStart, pr.pEnd)
|
||||
} else {
|
||||
pub = fmt.Sprintf("%d", pr.pStart)
|
||||
pub = strconv.FormatUint(uint64(pr.pStart), 10)
|
||||
}
|
||||
if pr.tEnd > pr.tStart {
|
||||
tgt = fmt.Sprintf("%d-%d", pr.tStart, pr.tEnd)
|
||||
} else {
|
||||
tgt = fmt.Sprintf("%d", pr.tStart)
|
||||
tgt = strconv.FormatUint(uint64(pr.tStart), 10)
|
||||
}
|
||||
return fmt.Sprintf("*:%s->%s/%s", pub, tgt, pr.protocol)
|
||||
}
|
||||
|
|
|
@ -283,7 +283,7 @@ func TestServiceContextWriteJSON(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
expectedJSONs := []map[string]interface{}{
|
||||
expectedJSONs := []map[string]any{
|
||||
{"ID": "02_bar", "Name": "bar", "Mode": "replicated", "Replicas": "2/4", "Image": "", "Ports": "*:80->8080/tcp"},
|
||||
{"ID": "01_baz", "Name": "baz", "Mode": "global", "Replicas": "1/3", "Image": "", "Ports": "*:80->8080/tcp"},
|
||||
}
|
||||
|
@ -295,7 +295,7 @@ func TestServiceContextWriteJSON(t *testing.T) {
|
|||
}
|
||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||
msg := fmt.Sprintf("Output: line %d: %s", i, line)
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
err := json.Unmarshal([]byte(line), &m)
|
||||
assert.NilError(t, err, msg)
|
||||
assert.Check(t, is.DeepEqual(expectedJSONs[i], m), msg)
|
||||
|
|
|
@ -52,7 +52,7 @@ func ParseGenericResources(value []string) ([]swarm.GenericResource, error) {
|
|||
|
||||
// genericResourcesFromGRPC converts a GRPC GenericResource to a GenericResource
|
||||
func genericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []swarm.GenericResource {
|
||||
var generic []swarm.GenericResource
|
||||
generic := make([]swarm.GenericResource, 0, len(genericRes))
|
||||
for _, res := range genericRes {
|
||||
var current swarm.GenericResource
|
||||
|
||||
|
@ -95,7 +95,7 @@ func buildGenericResourceMap(genericRes []swarm.GenericResource) (map[string]swa
|
|||
}
|
||||
|
||||
func buildGenericResourceList(genericRes map[string]swarm.GenericResource) []swarm.GenericResource {
|
||||
var l []swarm.GenericResource
|
||||
l := make([]swarm.GenericResource, 0, len(genericRes))
|
||||
|
||||
for _, res := range genericRes {
|
||||
l = append(l, res)
|
||||
|
|
|
@ -54,7 +54,7 @@ func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
|||
opts.format = "pretty"
|
||||
}
|
||||
|
||||
getRef := func(ref string) (interface{}, []byte, error) {
|
||||
getRef := func(ref string) (any, []byte, error) {
|
||||
// Service inspect shows defaults values in empty fields.
|
||||
service, _, err := client.ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true})
|
||||
if err == nil || !errdefs.IsNotFound(err) {
|
||||
|
@ -63,7 +63,7 @@ func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
|||
return nil, nil, errors.Errorf("Error: no such service: %s", ref)
|
||||
}
|
||||
|
||||
getNetwork := func(ref string) (interface{}, []byte, error) {
|
||||
getNetwork := func(ref string) (any, []byte, error) {
|
||||
network, _, err := client.NetworkInspectWithRaw(ctx, ref, types.NetworkInspectOptions{Scope: "swarm"})
|
||||
if err == nil || !errdefs.IsNotFound(err) {
|
||||
return network, nil, err
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
)
|
||||
|
||||
func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string {
|
||||
t.Helper()
|
||||
b := new(bytes.Buffer)
|
||||
|
||||
endpointSpec := &swarm.EndpointSpec{
|
||||
|
@ -128,10 +129,10 @@ func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time)
|
|||
}
|
||||
|
||||
err := InspectFormatWrite(ctx, []string{"de179gar9d0o7ltdybungplod"},
|
||||
func(ref string) (interface{}, []byte, error) {
|
||||
func(ref string) (any, []byte, error) {
|
||||
return s, nil, nil
|
||||
},
|
||||
func(ref string) (interface{}, []byte, error) {
|
||||
func(ref string) (any, []byte, error) {
|
||||
return types.NetworkResource{
|
||||
ID: "5vpyomhb6ievnk0i0o60gcnei",
|
||||
Name: "mynetwork",
|
||||
|
@ -165,7 +166,7 @@ func TestJSONFormatWithNoUpdateConfig(t *testing.T) {
|
|||
// s2: {"ID":..}
|
||||
s1 := formatServiceInspect(t, NewFormat(""), now)
|
||||
s2 := formatServiceInspect(t, NewFormat("{{json .}}"), now)
|
||||
var m1Wrap []map[string]interface{}
|
||||
var m1Wrap []map[string]any
|
||||
if err := json.Unmarshal([]byte(s1), &m1Wrap); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -173,7 +174,7 @@ func TestJSONFormatWithNoUpdateConfig(t *testing.T) {
|
|||
t.Fatalf("strange s1=%s", s1)
|
||||
}
|
||||
m1 := m1Wrap[0]
|
||||
var m2 map[string]interface{}
|
||||
var m2 map[string]any
|
||||
if err := json.Unmarshal([]byte(s2), &m2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ type listOptions struct {
|
|||
filter opts.FilterOpt
|
||||
}
|
||||
|
||||
func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func newListCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
options := listOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -31,7 +31,7 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Short: "List services",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runList(dockerCli, options)
|
||||
return runList(dockerCLI, options)
|
||||
},
|
||||
ValidArgsFunction: completion.NoComplete,
|
||||
}
|
||||
|
@ -44,20 +44,20 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func runList(dockerCli command.Cli, opts listOptions) error {
|
||||
func runList(dockerCLI command.Cli, options listOptions) error {
|
||||
var (
|
||||
apiClient = dockerCli.Client()
|
||||
apiClient = dockerCLI.Client()
|
||||
ctx = context.Background()
|
||||
err error
|
||||
)
|
||||
|
||||
listOpts := types.ServiceListOptions{
|
||||
Filters: opts.filter.Value(),
|
||||
Filters: options.filter.Value(),
|
||||
// When not running "quiet", also get service status (number of running
|
||||
// and desired tasks). Note that this is only supported on API v1.41 and
|
||||
// up; older API versions ignore this option, and we will have to collect
|
||||
// the information manually below.
|
||||
Status: !opts.quiet,
|
||||
Status: !options.quiet,
|
||||
}
|
||||
|
||||
services, err := apiClient.ServiceList(ctx, listOpts)
|
||||
|
@ -84,18 +84,18 @@ func runList(dockerCli command.Cli, opts listOptions) error {
|
|||
}
|
||||
}
|
||||
|
||||
format := opts.format
|
||||
format := options.format
|
||||
if len(format) == 0 {
|
||||
if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !opts.quiet {
|
||||
format = dockerCli.ConfigFile().ServicesFormat
|
||||
if len(dockerCLI.ConfigFile().ServicesFormat) > 0 && !options.quiet {
|
||||
format = dockerCLI.ConfigFile().ServicesFormat
|
||||
} else {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
}
|
||||
|
||||
servicesCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: NewListFormat(format, opts.quiet),
|
||||
Output: dockerCLI.Out(),
|
||||
Format: NewListFormat(format, options.quiet),
|
||||
}
|
||||
return ListFormatWrite(servicesCtx, services)
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ func TestServiceListServiceStatus(t *testing.T) {
|
|||
for _, tc := range tests {
|
||||
if quiet {
|
||||
tc.withQuiet = quiet
|
||||
tc.doc = tc.doc + " with quiet"
|
||||
tc.doc += " with quiet"
|
||||
}
|
||||
matrix = append(matrix, tc)
|
||||
}
|
||||
|
|
|
@ -181,12 +181,12 @@ type taskFormatter struct {
|
|||
cache map[logContext]string
|
||||
}
|
||||
|
||||
func newTaskFormatter(client client.APIClient, opts *logsOptions, padding int) *taskFormatter {
|
||||
func newTaskFormatter(apiClient client.APIClient, opts *logsOptions, padding int) *taskFormatter {
|
||||
return &taskFormatter{
|
||||
client: client,
|
||||
client: apiClient,
|
||||
opts: opts,
|
||||
padding: padding,
|
||||
r: idresolver.New(client, opts.noResolve),
|
||||
r: idresolver.New(apiClient, opts.noResolve),
|
||||
cache: make(map[logContext]string),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ func (i *Uint64Opt) Type() string {
|
|||
// String returns a string repr of this option
|
||||
func (i *Uint64Opt) String() string {
|
||||
if i.value != nil {
|
||||
return fmt.Sprintf("%v", *i.value)
|
||||
return strconv.FormatUint(*i.value, 10)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -82,17 +82,17 @@ type placementPrefOpts struct {
|
|||
strings []string
|
||||
}
|
||||
|
||||
func (opts *placementPrefOpts) String() string {
|
||||
if len(opts.strings) == 0 {
|
||||
func (o *placementPrefOpts) String() string {
|
||||
if len(o.strings) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%v", opts.strings)
|
||||
return fmt.Sprintf("%v", o.strings)
|
||||
}
|
||||
|
||||
// Set validates the input value and adds it to the internal slices.
|
||||
// Note: in the future strategies other than "spread", may be supported,
|
||||
// as well as additional comma-separated options.
|
||||
func (opts *placementPrefOpts) Set(value string) error {
|
||||
func (o *placementPrefOpts) Set(value string) error {
|
||||
strategy, arg, ok := strings.Cut(value, "=")
|
||||
if !ok || strategy == "" {
|
||||
return errors.New(`placement preference must be of the format "<strategy>=<arg>"`)
|
||||
|
@ -101,17 +101,17 @@ func (opts *placementPrefOpts) Set(value string) error {
|
|||
return errors.Errorf("unsupported placement preference %s (only spread is supported)", strategy)
|
||||
}
|
||||
|
||||
opts.prefs = append(opts.prefs, swarm.PlacementPreference{
|
||||
o.prefs = append(o.prefs, swarm.PlacementPreference{
|
||||
Spread: &swarm.SpreadOver{
|
||||
SpreadDescriptor: arg,
|
||||
},
|
||||
})
|
||||
opts.strings = append(opts.strings, value)
|
||||
o.strings = append(o.strings, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns a string name for this Option type
|
||||
func (opts *placementPrefOpts) Type() string {
|
||||
func (o *placementPrefOpts) Type() string {
|
||||
return "pref"
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.Upda
|
|||
}
|
||||
}
|
||||
|
||||
func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
func (o updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
if !anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) {
|
||||
return nil
|
||||
}
|
||||
|
@ -175,28 +175,28 @@ func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig
|
|||
updateConfig := updateConfigFromDefaults(defaults.Service.Update)
|
||||
|
||||
if flags.Changed(flagUpdateParallelism) {
|
||||
updateConfig.Parallelism = opts.parallelism
|
||||
updateConfig.Parallelism = o.parallelism
|
||||
}
|
||||
if flags.Changed(flagUpdateDelay) {
|
||||
updateConfig.Delay = opts.delay
|
||||
updateConfig.Delay = o.delay
|
||||
}
|
||||
if flags.Changed(flagUpdateMonitor) {
|
||||
updateConfig.Monitor = opts.monitor
|
||||
updateConfig.Monitor = o.monitor
|
||||
}
|
||||
if flags.Changed(flagUpdateFailureAction) {
|
||||
updateConfig.FailureAction = opts.onFailure
|
||||
updateConfig.FailureAction = o.onFailure
|
||||
}
|
||||
if flags.Changed(flagUpdateMaxFailureRatio) {
|
||||
updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value()
|
||||
updateConfig.MaxFailureRatio = o.maxFailureRatio.Value()
|
||||
}
|
||||
if flags.Changed(flagUpdateOrder) {
|
||||
updateConfig.Order = opts.order
|
||||
updateConfig.Order = o.order
|
||||
}
|
||||
|
||||
return updateConfig
|
||||
}
|
||||
|
||||
func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
func (o updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
if !anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) {
|
||||
return nil
|
||||
}
|
||||
|
@ -204,22 +204,22 @@ func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConf
|
|||
updateConfig := updateConfigFromDefaults(defaults.Service.Rollback)
|
||||
|
||||
if flags.Changed(flagRollbackParallelism) {
|
||||
updateConfig.Parallelism = opts.parallelism
|
||||
updateConfig.Parallelism = o.parallelism
|
||||
}
|
||||
if flags.Changed(flagRollbackDelay) {
|
||||
updateConfig.Delay = opts.delay
|
||||
updateConfig.Delay = o.delay
|
||||
}
|
||||
if flags.Changed(flagRollbackMonitor) {
|
||||
updateConfig.Monitor = opts.monitor
|
||||
updateConfig.Monitor = o.monitor
|
||||
}
|
||||
if flags.Changed(flagRollbackFailureAction) {
|
||||
updateConfig.FailureAction = opts.onFailure
|
||||
updateConfig.FailureAction = o.onFailure
|
||||
}
|
||||
if flags.Changed(flagRollbackMaxFailureRatio) {
|
||||
updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value()
|
||||
updateConfig.MaxFailureRatio = o.maxFailureRatio.Value()
|
||||
}
|
||||
if flags.Changed(flagRollbackOrder) {
|
||||
updateConfig.Order = opts.order
|
||||
updateConfig.Order = o.order
|
||||
}
|
||||
|
||||
return updateConfig
|
||||
|
@ -378,8 +378,9 @@ func resolveNetworkID(ctx context.Context, apiClient client.NetworkAPIClient, ne
|
|||
}
|
||||
|
||||
func convertNetworks(networks opts.NetworkOpt) []swarm.NetworkAttachmentConfig {
|
||||
var netAttach []swarm.NetworkAttachmentConfig
|
||||
for _, net := range networks.Value() {
|
||||
nws := networks.Value()
|
||||
netAttach := make([]swarm.NetworkAttachmentConfig, 0, len(nws))
|
||||
for _, net := range nws {
|
||||
netAttach = append(netAttach, swarm.NetworkAttachmentConfig{
|
||||
Target: net.Target,
|
||||
Aliases: net.Aliases,
|
||||
|
@ -432,42 +433,42 @@ type healthCheckOptions struct {
|
|||
noHealthcheck bool
|
||||
}
|
||||
|
||||
func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) {
|
||||
func (o *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) {
|
||||
var healthConfig *container.HealthConfig
|
||||
haveHealthSettings := opts.cmd != "" ||
|
||||
opts.interval.Value() != nil ||
|
||||
opts.timeout.Value() != nil ||
|
||||
opts.startPeriod.Value() != nil ||
|
||||
opts.startInterval.Value() != nil ||
|
||||
opts.retries != 0
|
||||
if opts.noHealthcheck {
|
||||
haveHealthSettings := o.cmd != "" ||
|
||||
o.interval.Value() != nil ||
|
||||
o.timeout.Value() != nil ||
|
||||
o.startPeriod.Value() != nil ||
|
||||
o.startInterval.Value() != nil ||
|
||||
o.retries != 0
|
||||
if o.noHealthcheck {
|
||||
if haveHealthSettings {
|
||||
return nil, errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck)
|
||||
}
|
||||
healthConfig = &container.HealthConfig{Test: []string{"NONE"}}
|
||||
} else if haveHealthSettings {
|
||||
var test []string
|
||||
if opts.cmd != "" {
|
||||
test = []string{"CMD-SHELL", opts.cmd}
|
||||
if o.cmd != "" {
|
||||
test = []string{"CMD-SHELL", o.cmd}
|
||||
}
|
||||
var interval, timeout, startPeriod, startInterval time.Duration
|
||||
if ptr := opts.interval.Value(); ptr != nil {
|
||||
if ptr := o.interval.Value(); ptr != nil {
|
||||
interval = *ptr
|
||||
}
|
||||
if ptr := opts.timeout.Value(); ptr != nil {
|
||||
if ptr := o.timeout.Value(); ptr != nil {
|
||||
timeout = *ptr
|
||||
}
|
||||
if ptr := opts.startPeriod.Value(); ptr != nil {
|
||||
if ptr := o.startPeriod.Value(); ptr != nil {
|
||||
startPeriod = *ptr
|
||||
}
|
||||
if ptr := opts.startInterval.Value(); ptr != nil {
|
||||
if ptr := o.startInterval.Value(); ptr != nil {
|
||||
startInterval = *ptr
|
||||
}
|
||||
healthConfig = &container.HealthConfig{
|
||||
Test: test,
|
||||
Interval: interval,
|
||||
Timeout: timeout,
|
||||
Retries: opts.retries,
|
||||
Retries: o.retries,
|
||||
StartPeriod: startPeriod,
|
||||
StartInterval: startInterval,
|
||||
}
|
||||
|
@ -769,7 +770,7 @@ func (options *serviceOptions) ToService(ctx context.Context, apiClient client.N
|
|||
return service, nil
|
||||
}
|
||||
|
||||
type flagDefaults map[string]interface{}
|
||||
type flagDefaults map[string]any
|
||||
|
||||
func (fd flagDefaults) getUint64(flagName string) uint64 {
|
||||
if val, ok := fd[flagName].(uint64); ok {
|
||||
|
@ -786,7 +787,7 @@ func (fd flagDefaults) getString(flagName string) string {
|
|||
}
|
||||
|
||||
func buildServiceDefaultFlagMapping() flagDefaults {
|
||||
defaultFlagValues := make(map[string]interface{})
|
||||
defaultFlagValues := make(map[string]any)
|
||||
|
||||
defaultFlagValues[flagStopGracePeriod], _ = gogotypes.DurationFromProto(defaults.Service.Task.GetContainer().StopGracePeriod)
|
||||
defaultFlagValues[flagRestartCondition] = `"` + defaultRestartCondition() + `"`
|
||||
|
@ -827,7 +828,7 @@ func addDetachFlag(flags *pflag.FlagSet, detach *bool) {
|
|||
|
||||
// addServiceFlags adds all flags that are common to both `create` and `update`.
|
||||
// Any flags that are not common are added separately in the individual command
|
||||
func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValues flagDefaults) {
|
||||
func addServiceFlags(flags *pflag.FlagSet, options *serviceOptions, defaultFlagValues flagDefaults) {
|
||||
flagDesc := func(flagName string, desc string) string {
|
||||
if defaultValue, ok := defaultFlagValues[flagName]; ok {
|
||||
return fmt.Sprintf("%s (default %v)", desc, defaultValue)
|
||||
|
@ -835,98 +836,98 @@ func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValu
|
|||
return desc
|
||||
}
|
||||
|
||||
addDetachFlag(flags, &opts.detach)
|
||||
flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Suppress progress output")
|
||||
addDetachFlag(flags, &options.detach)
|
||||
flags.BoolVarP(&options.quiet, flagQuiet, "q", false, "Suppress progress output")
|
||||
|
||||
flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container")
|
||||
flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
|
||||
flags.Var(&opts.credentialSpec, flagCredentialSpec, "Credential spec for managed service account (Windows only)")
|
||||
flags.StringVarP(&options.workdir, flagWorkdir, "w", "", "Working directory inside the container")
|
||||
flags.StringVarP(&options.user, flagUser, "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
|
||||
flags.Var(&options.credentialSpec, flagCredentialSpec, "Credential spec for managed service account (Windows only)")
|
||||
flags.SetAnnotation(flagCredentialSpec, "version", []string{"1.29"})
|
||||
flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname")
|
||||
flags.StringVar(&options.hostname, flagHostname, "", "Container hostname")
|
||||
flags.SetAnnotation(flagHostname, "version", []string{"1.25"})
|
||||
flags.Var(&opts.entrypoint, flagEntrypoint, "Overwrite the default ENTRYPOINT of the image")
|
||||
flags.Var(&opts.capAdd, flagCapAdd, "Add Linux capabilities")
|
||||
flags.Var(&options.entrypoint, flagEntrypoint, "Overwrite the default ENTRYPOINT of the image")
|
||||
flags.Var(&options.capAdd, flagCapAdd, "Add Linux capabilities")
|
||||
flags.SetAnnotation(flagCapAdd, "version", []string{"1.41"})
|
||||
flags.Var(&opts.capDrop, flagCapDrop, "Drop Linux capabilities")
|
||||
flags.Var(&options.capDrop, flagCapDrop, "Drop Linux capabilities")
|
||||
flags.SetAnnotation(flagCapDrop, "version", []string{"1.41"})
|
||||
|
||||
flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs")
|
||||
flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
|
||||
flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs")
|
||||
flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
|
||||
flags.Int64Var(&opts.resources.limitPids, flagLimitPids, 0, "Limit maximum number of processes (default 0 = unlimited)")
|
||||
flags.Var(&options.resources.limitCPU, flagLimitCPU, "Limit CPUs")
|
||||
flags.Var(&options.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
|
||||
flags.Var(&options.resources.resCPU, flagReserveCPU, "Reserve CPUs")
|
||||
flags.Var(&options.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
|
||||
flags.Int64Var(&options.resources.limitPids, flagLimitPids, 0, "Limit maximum number of processes (default 0 = unlimited)")
|
||||
flags.SetAnnotation(flagLimitPids, "version", []string{"1.41"})
|
||||
|
||||
flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
|
||||
flags.Var(&opts.maxConcurrent, flagConcurrent, "Number of job tasks to run concurrently (default equal to --replicas)")
|
||||
flags.Var(&options.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&options.replicas, flagReplicas, "Number of tasks")
|
||||
flags.Var(&options.maxConcurrent, flagConcurrent, "Number of job tasks to run concurrently (default equal to --replicas)")
|
||||
flags.SetAnnotation(flagConcurrent, "version", []string{"1.41"})
|
||||
flags.Uint64Var(&opts.maxReplicas, flagMaxReplicas, defaultFlagValues.getUint64(flagMaxReplicas), "Maximum number of tasks per node (default 0 = unlimited)")
|
||||
flags.Uint64Var(&options.maxReplicas, flagMaxReplicas, defaultFlagValues.getUint64(flagMaxReplicas), "Maximum number of tasks per node (default 0 = unlimited)")
|
||||
flags.SetAnnotation(flagMaxReplicas, "version", []string{"1.40"})
|
||||
|
||||
flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none", "on-failure", "any")`))
|
||||
flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, flagDesc(flagRestartMaxAttempts, "Maximum number of restarts before giving up"))
|
||||
flags.StringVar(&options.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none", "on-failure", "any")`))
|
||||
flags.Var(&options.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&options.restartPolicy.maxAttempts, flagRestartMaxAttempts, flagDesc(flagRestartMaxAttempts, "Maximum number of restarts before giving up"))
|
||||
|
||||
flags.Var(&opts.restartPolicy.window, flagRestartWindow, flagDesc(flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&options.restartPolicy.window, flagRestartWindow, flagDesc(flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)"))
|
||||
|
||||
flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, defaultFlagValues.getUint64(flagUpdateParallelism), "Maximum number of tasks updated simultaneously (0 to update all at once)")
|
||||
flags.DurationVar(&opts.update.delay, flagUpdateDelay, 0, flagDesc(flagUpdateDelay, "Delay between updates (ns|us|ms|s|m|h)"))
|
||||
flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, 0, flagDesc(flagUpdateMonitor, "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)"))
|
||||
flags.Uint64Var(&options.update.parallelism, flagUpdateParallelism, defaultFlagValues.getUint64(flagUpdateParallelism), "Maximum number of tasks updated simultaneously (0 to update all at once)")
|
||||
flags.DurationVar(&options.update.delay, flagUpdateDelay, 0, flagDesc(flagUpdateDelay, "Delay between updates (ns|us|ms|s|m|h)"))
|
||||
flags.DurationVar(&options.update.monitor, flagUpdateMonitor, 0, flagDesc(flagUpdateMonitor, "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)"))
|
||||
flags.SetAnnotation(flagUpdateMonitor, "version", []string{"1.25"})
|
||||
flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "", flagDesc(flagUpdateFailureAction, `Action on update failure ("pause", "continue", "rollback")`))
|
||||
flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, flagDesc(flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update"))
|
||||
flags.StringVar(&options.update.onFailure, flagUpdateFailureAction, "", flagDesc(flagUpdateFailureAction, `Action on update failure ("pause", "continue", "rollback")`))
|
||||
flags.Var(&options.update.maxFailureRatio, flagUpdateMaxFailureRatio, flagDesc(flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update"))
|
||||
flags.SetAnnotation(flagUpdateMaxFailureRatio, "version", []string{"1.25"})
|
||||
flags.StringVar(&opts.update.order, flagUpdateOrder, "", flagDesc(flagUpdateOrder, `Update order ("start-first", "stop-first")`))
|
||||
flags.StringVar(&options.update.order, flagUpdateOrder, "", flagDesc(flagUpdateOrder, `Update order ("start-first", "stop-first")`))
|
||||
flags.SetAnnotation(flagUpdateOrder, "version", []string{"1.29"})
|
||||
|
||||
flags.Uint64Var(&opts.rollback.parallelism, flagRollbackParallelism, defaultFlagValues.getUint64(flagRollbackParallelism),
|
||||
flags.Uint64Var(&options.rollback.parallelism, flagRollbackParallelism, defaultFlagValues.getUint64(flagRollbackParallelism),
|
||||
"Maximum number of tasks rolled back simultaneously (0 to roll back all at once)")
|
||||
flags.SetAnnotation(flagRollbackParallelism, "version", []string{"1.28"})
|
||||
flags.DurationVar(&opts.rollback.delay, flagRollbackDelay, 0, flagDesc(flagRollbackDelay, "Delay between task rollbacks (ns|us|ms|s|m|h)"))
|
||||
flags.DurationVar(&options.rollback.delay, flagRollbackDelay, 0, flagDesc(flagRollbackDelay, "Delay between task rollbacks (ns|us|ms|s|m|h)"))
|
||||
flags.SetAnnotation(flagRollbackDelay, "version", []string{"1.28"})
|
||||
flags.DurationVar(&opts.rollback.monitor, flagRollbackMonitor, 0, flagDesc(flagRollbackMonitor, "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)"))
|
||||
flags.DurationVar(&options.rollback.monitor, flagRollbackMonitor, 0, flagDesc(flagRollbackMonitor, "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)"))
|
||||
flags.SetAnnotation(flagRollbackMonitor, "version", []string{"1.28"})
|
||||
flags.StringVar(&opts.rollback.onFailure, flagRollbackFailureAction, "", flagDesc(flagRollbackFailureAction, `Action on rollback failure ("pause", "continue")`))
|
||||
flags.StringVar(&options.rollback.onFailure, flagRollbackFailureAction, "", flagDesc(flagRollbackFailureAction, `Action on rollback failure ("pause", "continue")`))
|
||||
flags.SetAnnotation(flagRollbackFailureAction, "version", []string{"1.28"})
|
||||
flags.Var(&opts.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, flagDesc(flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback"))
|
||||
flags.Var(&options.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, flagDesc(flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback"))
|
||||
flags.SetAnnotation(flagRollbackMaxFailureRatio, "version", []string{"1.28"})
|
||||
flags.StringVar(&opts.rollback.order, flagRollbackOrder, "", flagDesc(flagRollbackOrder, `Rollback order ("start-first", "stop-first")`))
|
||||
flags.StringVar(&options.rollback.order, flagRollbackOrder, "", flagDesc(flagRollbackOrder, `Rollback order ("start-first", "stop-first")`))
|
||||
flags.SetAnnotation(flagRollbackOrder, "version", []string{"1.29"})
|
||||
|
||||
flags.StringVar(&opts.endpoint.mode, flagEndpointMode, defaultFlagValues.getString(flagEndpointMode), "Endpoint mode (vip or dnsrr)")
|
||||
flags.StringVar(&options.endpoint.mode, flagEndpointMode, defaultFlagValues.getString(flagEndpointMode), "Endpoint mode (vip or dnsrr)")
|
||||
|
||||
flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents")
|
||||
flags.BoolVar(&opts.noResolveImage, flagNoResolveImage, false, "Do not query the registry to resolve image digest and supported platforms")
|
||||
flags.BoolVar(&options.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents")
|
||||
flags.BoolVar(&options.noResolveImage, flagNoResolveImage, false, "Do not query the registry to resolve image digest and supported platforms")
|
||||
flags.SetAnnotation(flagNoResolveImage, "version", []string{"1.30"})
|
||||
|
||||
flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service")
|
||||
flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options")
|
||||
flags.StringVar(&options.logDriver.name, flagLogDriver, "", "Logging driver for service")
|
||||
flags.Var(&options.logDriver.opts, flagLogOpt, "Logging driver options")
|
||||
|
||||
flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health")
|
||||
flags.StringVar(&options.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health")
|
||||
flags.SetAnnotation(flagHealthCmd, "version", []string{"1.25"})
|
||||
flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ms|s|m|h)")
|
||||
flags.Var(&options.healthcheck.interval, flagHealthInterval, "Time between running the check (ms|s|m|h)")
|
||||
flags.SetAnnotation(flagHealthInterval, "version", []string{"1.25"})
|
||||
flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ms|s|m|h)")
|
||||
flags.Var(&options.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ms|s|m|h)")
|
||||
flags.SetAnnotation(flagHealthTimeout, "version", []string{"1.25"})
|
||||
flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy")
|
||||
flags.IntVar(&options.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy")
|
||||
flags.SetAnnotation(flagHealthRetries, "version", []string{"1.25"})
|
||||
flags.Var(&opts.healthcheck.startPeriod, flagHealthStartPeriod, "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)")
|
||||
flags.Var(&options.healthcheck.startPeriod, flagHealthStartPeriod, "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)")
|
||||
flags.SetAnnotation(flagHealthStartPeriod, "version", []string{"1.29"})
|
||||
flags.Var(&opts.healthcheck.startInterval, flagHealthStartInterval, "Time between running the check during the start period (ms|s|m|h)")
|
||||
flags.Var(&options.healthcheck.startInterval, flagHealthStartInterval, "Time between running the check during the start period (ms|s|m|h)")
|
||||
flags.SetAnnotation(flagHealthStartInterval, "version", []string{"1.44"})
|
||||
flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK")
|
||||
flags.BoolVar(&options.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK")
|
||||
flags.SetAnnotation(flagNoHealthcheck, "version", []string{"1.25"})
|
||||
|
||||
flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY")
|
||||
flags.BoolVarP(&options.tty, flagTTY, "t", false, "Allocate a pseudo-TTY")
|
||||
flags.SetAnnotation(flagTTY, "version", []string{"1.25"})
|
||||
|
||||
flags.BoolVar(&opts.readOnly, flagReadOnly, false, "Mount the container's root filesystem as read only")
|
||||
flags.BoolVar(&options.readOnly, flagReadOnly, false, "Mount the container's root filesystem as read only")
|
||||
flags.SetAnnotation(flagReadOnly, "version", []string{"1.28"})
|
||||
|
||||
flags.StringVar(&opts.stopSignal, flagStopSignal, "", "Signal to stop the container")
|
||||
flags.StringVar(&options.stopSignal, flagStopSignal, "", "Signal to stop the container")
|
||||
flags.SetAnnotation(flagStopSignal, "version", []string{"1.28"})
|
||||
flags.StringVar(&opts.isolation, flagIsolation, "", "Service container isolation mode")
|
||||
flags.StringVar(&options.isolation, flagIsolation, "", "Service container isolation mode")
|
||||
flags.SetAnnotation(flagIsolation, "version", []string{"1.35"})
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
// ParseSecrets retrieves the secrets with the requested names and fills
|
||||
// secret IDs into the secret references.
|
||||
func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) {
|
||||
func ParseSecrets(apiClient client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) {
|
||||
if len(requestedSecrets) == 0 {
|
||||
return []*swarmtypes.SecretReference{}, nil
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.
|
|||
args.Add("name", s.SecretName)
|
||||
}
|
||||
|
||||
secrets, err := client.SecretList(ctx, types.SecretListOptions{
|
||||
secrets, err := apiClient.SecretList(ctx, types.SecretListOptions{
|
||||
Filters: args,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -65,13 +65,13 @@ func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.
|
|||
|
||||
// ParseConfigs retrieves the configs from the requested names and converts
|
||||
// them to config references to use with the spec
|
||||
func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) {
|
||||
func ParseConfigs(apiClient client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) {
|
||||
if len(requestedConfigs) == 0 {
|
||||
return []*swarmtypes.ConfigReference{}, nil
|
||||
}
|
||||
|
||||
// the configRefs map has two purposes: it prevents duplication of config
|
||||
// target filenames, and it it used to get all configs so we can resolve
|
||||
// target filenames. It is used to get all configs, so we can resolve
|
||||
// their IDs. unfortunately, there are other targets for ConfigReferences,
|
||||
// besides just a File; specifically, the Runtime target, which is used for
|
||||
// CredentialSpecs. Therefore, we need to have a list of ConfigReferences
|
||||
|
@ -115,7 +115,7 @@ func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.
|
|||
args.Add("name", s.ConfigName)
|
||||
}
|
||||
|
||||
configs, err := client.ConfigList(ctx, types.ConfigListOptions{
|
||||
configs, err := apiClient.ConfigList(ctx, types.ConfigListOptions{
|
||||
Filters: args,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -70,7 +70,7 @@ func terminalState(state swarm.TaskState) bool {
|
|||
// ServiceProgress outputs progress information for convergence of a service.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func ServiceProgress(ctx context.Context, client client.APIClient, serviceID string, progressWriter io.WriteCloser) error {
|
||||
func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID string, progressWriter io.WriteCloser) error {
|
||||
defer progressWriter.Close()
|
||||
|
||||
progressOut := streamformatter.NewJSONProgressOutput(progressWriter, false)
|
||||
|
@ -84,7 +84,7 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
|||
taskFilter.Add("_up-to-date", "true")
|
||||
|
||||
getUpToDateTasks := func() ([]swarm.Task, error) {
|
||||
return client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter})
|
||||
return apiClient.TaskList(ctx, types.TaskListOptions{Filters: taskFilter})
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -97,7 +97,7 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
|||
)
|
||||
|
||||
for {
|
||||
service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
||||
service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
|||
return err
|
||||
}
|
||||
|
||||
activeNodes, err := getActiveNodes(ctx, client)
|
||||
activeNodes, err := getActiveNodes(ctx, apiClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -218,8 +218,8 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
|||
}
|
||||
}
|
||||
|
||||
func getActiveNodes(ctx context.Context, client client.APIClient) (map[string]struct{}, error) {
|
||||
nodes, err := client.NodeList(ctx, types.NodeListOptions{})
|
||||
func getActiveNodes(ctx context.Context, apiClient client.APIClient) (map[string]struct{}, error) {
|
||||
nodes, err := apiClient.NodeList(ctx, types.NodeListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ func writeOverallProgress(progressOut progress.Output, numerator, denominator in
|
|||
|
||||
func truncError(errMsg string) string {
|
||||
// Remove newlines from the error, which corrupt the output.
|
||||
errMsg = strings.Replace(errMsg, "\n", " ", -1)
|
||||
errMsg = strings.ReplaceAll(errMsg, "\n", " ")
|
||||
|
||||
// Limit the length to 75 characters, so that even on narrow terminals
|
||||
// this will not overflow to the next line.
|
||||
|
@ -493,7 +493,6 @@ func (u *globalProgressUpdater) tasksByNode(tasks []swarm.Task) map[string]swarm
|
|||
numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] {
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
tasksByNode[task.NodeID] = task
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ func TestReplicatedProgressUpdaterOneReplica(t *testing.T) {
|
|||
}
|
||||
|
||||
p := &mockProgress{}
|
||||
updaterTester := updaterTester{
|
||||
ut := updaterTester{
|
||||
t: t,
|
||||
updater: &replicatedProgressUpdater{
|
||||
progressOut: p,
|
||||
|
@ -82,7 +82,7 @@ func TestReplicatedProgressUpdaterOneReplica(t *testing.T) {
|
|||
|
||||
tasks := []swarm.Task{}
|
||||
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
{ID: "1/1", Action: " "},
|
||||
|
@ -97,14 +97,14 @@ func TestReplicatedProgressUpdaterOneReplica(t *testing.T) {
|
|||
DesiredState: swarm.TaskStateShutdown,
|
||||
Status: swarm.TaskStatus{State: swarm.TaskStateNew},
|
||||
})
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
})
|
||||
|
||||
// Task with valid DesiredState and State updates progress bar
|
||||
tasks[0].DesiredState = swarm.TaskStateRunning
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "1/1", Action: "new ", Current: 1, Total: 9, HideCounts: true},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -113,7 +113,7 @@ func TestReplicatedProgressUpdaterOneReplica(t *testing.T) {
|
|||
// If the task exposes an error, we should show that instead of the
|
||||
// progress bar.
|
||||
tasks[0].Status.Err = "something is wrong"
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "1/1", Action: "something is wrong"},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -122,7 +122,7 @@ func TestReplicatedProgressUpdaterOneReplica(t *testing.T) {
|
|||
// When the task reaches running, update should return true
|
||||
tasks[0].Status.Err = ""
|
||||
tasks[0].Status.State = swarm.TaskStateRunning
|
||||
updaterTester.testUpdater(tasks, true,
|
||||
ut.testUpdater(tasks, true,
|
||||
[]progress.Progress{
|
||||
{ID: "1/1", Action: "running ", Current: 9, Total: 9, HideCounts: true},
|
||||
{ID: "overall progress", Action: "1 out of 1 tasks"},
|
||||
|
@ -131,7 +131,7 @@ func TestReplicatedProgressUpdaterOneReplica(t *testing.T) {
|
|||
// If the task fails, update should return false again
|
||||
tasks[0].Status.Err = "task failed"
|
||||
tasks[0].Status.State = swarm.TaskStateFailed
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "1/1", Action: "task failed"},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -147,7 +147,7 @@ func TestReplicatedProgressUpdaterOneReplica(t *testing.T) {
|
|||
DesiredState: swarm.TaskStateRunning,
|
||||
Status: swarm.TaskStatus{State: swarm.TaskStateRunning},
|
||||
})
|
||||
updaterTester.testUpdater(tasks, true,
|
||||
ut.testUpdater(tasks, true,
|
||||
[]progress.Progress{
|
||||
{ID: "1/1", Action: "running ", Current: 9, Total: 9, HideCounts: true},
|
||||
{ID: "overall progress", Action: "1 out of 1 tasks"},
|
||||
|
@ -162,7 +162,7 @@ func TestReplicatedProgressUpdaterOneReplica(t *testing.T) {
|
|||
DesiredState: swarm.TaskStateRunning,
|
||||
Status: swarm.TaskStatus{State: swarm.TaskStatePreparing},
|
||||
})
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "1/1", Action: "preparing", Current: 6, Total: 9, HideCounts: true},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -183,7 +183,7 @@ func TestReplicatedProgressUpdaterManyReplicas(t *testing.T) {
|
|||
}
|
||||
|
||||
p := &mockProgress{}
|
||||
updaterTester := updaterTester{
|
||||
ut := updaterTester{
|
||||
t: t,
|
||||
updater: &replicatedProgressUpdater{
|
||||
progressOut: p,
|
||||
|
@ -196,7 +196,7 @@ func TestReplicatedProgressUpdaterManyReplicas(t *testing.T) {
|
|||
tasks := []swarm.Task{}
|
||||
|
||||
// No per-task progress bars because there are too many replicas
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", replicas)},
|
||||
{ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", replicas)},
|
||||
|
@ -215,13 +215,13 @@ func TestReplicatedProgressUpdaterManyReplicas(t *testing.T) {
|
|||
if i%2 == 1 {
|
||||
tasks[i].NodeID = "b"
|
||||
}
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i, replicas)},
|
||||
})
|
||||
|
||||
tasks[i].Status.State = swarm.TaskStateRunning
|
||||
updaterTester.testUpdater(tasks, uint64(i) == replicas-1,
|
||||
ut.testUpdater(tasks, uint64(i) == replicas-1,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i+1, replicas)},
|
||||
})
|
||||
|
@ -238,7 +238,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
}
|
||||
|
||||
p := &mockProgress{}
|
||||
updaterTester := updaterTester{
|
||||
ut := updaterTester{
|
||||
t: t,
|
||||
updater: &globalProgressUpdater{
|
||||
progressOut: p,
|
||||
|
@ -250,7 +250,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
|
||||
tasks := []swarm.Task{}
|
||||
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: "waiting for new tasks"},
|
||||
})
|
||||
|
@ -263,7 +263,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
DesiredState: swarm.TaskStateShutdown,
|
||||
Status: swarm.TaskStatus{State: swarm.TaskStateNew},
|
||||
})
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -271,7 +271,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
|
||||
// Task with valid DesiredState and State updates progress bar
|
||||
tasks[0].DesiredState = swarm.TaskStateRunning
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "a", Action: "new ", Current: 1, Total: 9, HideCounts: true},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -280,7 +280,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
// If the task exposes an error, we should show that instead of the
|
||||
// progress bar.
|
||||
tasks[0].Status.Err = "something is wrong"
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "a", Action: "something is wrong"},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -289,7 +289,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
// When the task reaches running, update should return true
|
||||
tasks[0].Status.Err = ""
|
||||
tasks[0].Status.State = swarm.TaskStateRunning
|
||||
updaterTester.testUpdater(tasks, true,
|
||||
ut.testUpdater(tasks, true,
|
||||
[]progress.Progress{
|
||||
{ID: "a", Action: "running ", Current: 9, Total: 9, HideCounts: true},
|
||||
{ID: "overall progress", Action: "1 out of 1 tasks"},
|
||||
|
@ -298,7 +298,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
// If the task fails, update should return false again
|
||||
tasks[0].Status.Err = "task failed"
|
||||
tasks[0].Status.State = swarm.TaskStateFailed
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "a", Action: "task failed"},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -314,7 +314,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
DesiredState: swarm.TaskStateRunning,
|
||||
Status: swarm.TaskStatus{State: swarm.TaskStateRunning},
|
||||
})
|
||||
updaterTester.testUpdater(tasks, true,
|
||||
ut.testUpdater(tasks, true,
|
||||
[]progress.Progress{
|
||||
{ID: "a", Action: "running ", Current: 9, Total: 9, HideCounts: true},
|
||||
{ID: "overall progress", Action: "1 out of 1 tasks"},
|
||||
|
@ -329,7 +329,7 @@ func TestGlobalProgressUpdaterOneNode(t *testing.T) {
|
|||
DesiredState: swarm.TaskStateRunning,
|
||||
Status: swarm.TaskStatus{State: swarm.TaskStatePreparing},
|
||||
})
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "a", Action: "preparing", Current: 6, Total: 9, HideCounts: true},
|
||||
{ID: "overall progress", Action: "0 out of 1 tasks"},
|
||||
|
@ -348,7 +348,7 @@ func TestGlobalProgressUpdaterManyNodes(t *testing.T) {
|
|||
}
|
||||
|
||||
p := &mockProgress{}
|
||||
updaterTester := updaterTester{
|
||||
ut := updaterTester{
|
||||
t: t,
|
||||
updater: &globalProgressUpdater{
|
||||
progressOut: p,
|
||||
|
@ -359,12 +359,12 @@ func TestGlobalProgressUpdaterManyNodes(t *testing.T) {
|
|||
}
|
||||
|
||||
for i := 0; i != nodes; i++ {
|
||||
updaterTester.activeNodes[strconv.Itoa(i)] = struct{}{}
|
||||
ut.activeNodes[strconv.Itoa(i)] = struct{}{}
|
||||
}
|
||||
|
||||
tasks := []swarm.Task{}
|
||||
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: "waiting for new tasks"},
|
||||
})
|
||||
|
@ -379,7 +379,7 @@ func TestGlobalProgressUpdaterManyNodes(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
updaterTester.testUpdater(tasks, false,
|
||||
ut.testUpdater(tasks, false,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", nodes)},
|
||||
{ID: "overall progress", Action: fmt.Sprintf("0 out of %d tasks", nodes)},
|
||||
|
@ -387,7 +387,7 @@ func TestGlobalProgressUpdaterManyNodes(t *testing.T) {
|
|||
|
||||
for i := 0; i != nodes; i++ {
|
||||
tasks[i].Status.State = swarm.TaskStateRunning
|
||||
updaterTester.testUpdater(tasks, i == nodes-1,
|
||||
ut.testUpdater(tasks, i == nodes-1,
|
||||
[]progress.Progress{
|
||||
{ID: "overall progress", Action: fmt.Sprintf("%d out of %d tasks", i+1, nodes)},
|
||||
})
|
||||
|
|
|
@ -52,18 +52,18 @@ func newPsCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
func runPS(dockerCli command.Cli, options psOptions) error {
|
||||
client := dockerCli.Client()
|
||||
apiClient := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
filter, notfound, err := createFilter(ctx, client, options)
|
||||
filter, notfound, err := createFilter(ctx, apiClient, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := updateNodeFilter(ctx, client, filter); err != nil {
|
||||
if err := updateNodeFilter(ctx, apiClient, filter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter})
|
||||
tasks, err := apiClient.TaskList(ctx, types.TaskListOptions{Filters: filter})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func runPS(dockerCli command.Cli, options psOptions) error {
|
|||
if options.quiet {
|
||||
options.noTrunc = true
|
||||
}
|
||||
if err := task.Print(ctx, dockerCli, tasks, idresolver.New(client, options.noResolve), !options.noTrunc, options.quiet, format); err != nil {
|
||||
if err := task.Print(ctx, dockerCli, tasks, idresolver.New(apiClient, options.noResolve), !options.noTrunc, options.quiet, format); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(notfound) != 0 {
|
||||
|
@ -84,7 +84,7 @@ func runPS(dockerCli command.Cli, options psOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func createFilter(ctx context.Context, client client.APIClient, options psOptions) (filters.Args, []string, error) {
|
||||
func createFilter(ctx context.Context, apiClient client.APIClient, options psOptions) (filters.Args, []string, error) {
|
||||
filter := options.filter.Value()
|
||||
|
||||
serviceIDFilter := filters.NewArgs()
|
||||
|
@ -93,11 +93,11 @@ func createFilter(ctx context.Context, client client.APIClient, options psOption
|
|||
serviceIDFilter.Add("id", service)
|
||||
serviceNameFilter.Add("name", service)
|
||||
}
|
||||
serviceByIDList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceIDFilter})
|
||||
serviceByIDList, err := apiClient.ServiceList(ctx, types.ServiceListOptions{Filters: serviceIDFilter})
|
||||
if err != nil {
|
||||
return filter, nil, err
|
||||
}
|
||||
serviceByNameList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceNameFilter})
|
||||
serviceByNameList, err := apiClient.ServiceList(ctx, types.ServiceListOptions{Filters: serviceNameFilter})
|
||||
if err != nil {
|
||||
return filter, nil, err
|
||||
}
|
||||
|
@ -142,11 +142,11 @@ loop:
|
|||
return filter, notfound, err
|
||||
}
|
||||
|
||||
func updateNodeFilter(ctx context.Context, client client.APIClient, filter filters.Args) error {
|
||||
func updateNodeFilter(ctx context.Context, apiClient client.APIClient, filter filters.Args) error {
|
||||
if filter.Contains("node") {
|
||||
nodeFilters := filter.Get("node")
|
||||
for _, nodeFilter := range nodeFilters {
|
||||
nodeReference, err := node.Reference(ctx, client, nodeFilter)
|
||||
nodeReference, err := node.Reference(ctx, apiClient, nodeFilter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -76,7 +76,6 @@ func runScale(dockerCli command.Cli, options *scaleOptions, args []string) error
|
|||
} else {
|
||||
serviceIDs = append(serviceIDs, serviceID)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(serviceIDs) > 0 {
|
||||
|
@ -104,11 +103,12 @@ func runServiceScale(ctx context.Context, dockerCli command.Cli, serviceID strin
|
|||
}
|
||||
|
||||
serviceMode := &service.Spec.Mode
|
||||
if serviceMode.Replicated != nil {
|
||||
switch {
|
||||
case serviceMode.Replicated != nil:
|
||||
serviceMode.Replicated.Replicas = &scale
|
||||
} else if serviceMode.ReplicatedJob != nil {
|
||||
case serviceMode.ReplicatedJob != nil:
|
||||
serviceMode.ReplicatedJob.TotalCompletions = &scale
|
||||
} else {
|
||||
default:
|
||||
return errors.Errorf("scale can only be used with replicated or replicated-job mode")
|
||||
}
|
||||
|
||||
|
|
|
@ -219,7 +219,8 @@ func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, options *serviceOpti
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sendAuth {
|
||||
switch {
|
||||
case sendAuth:
|
||||
// Retrieve encoded auth token from the image reference
|
||||
// This would be the old image if it didn't change in this update
|
||||
image := spec.TaskTemplate.ContainerSpec.Image
|
||||
|
@ -228,9 +229,9 @@ func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, options *serviceOpti
|
|||
return err
|
||||
}
|
||||
updateOpts.EncodedRegistryAuth = encodedAuth
|
||||
} else if clientSideRollback {
|
||||
case clientSideRollback:
|
||||
updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec
|
||||
} else {
|
||||
default:
|
||||
updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec
|
||||
}
|
||||
|
||||
|
@ -727,8 +728,10 @@ func updateUlimits(flags *pflag.FlagSet, ulimits []*units.Ulimit) []*units.Ulimi
|
|||
newUlimits[ulimit.Name] = ulimit
|
||||
}
|
||||
}
|
||||
|
||||
var limits []*units.Ulimit
|
||||
if len(newUlimits) == 0 {
|
||||
return nil
|
||||
}
|
||||
limits := make([]*units.Ulimit, 0, len(newUlimits))
|
||||
for _, ulimit := range newUlimits {
|
||||
limits = append(limits, ulimit)
|
||||
}
|
||||
|
@ -799,7 +802,7 @@ func getUpdatedConfigs(apiClient client.ConfigAPIClient, flags *pflag.FlagSet, s
|
|||
if flags.Changed(flagCredentialSpec) {
|
||||
credSpec := flags.Lookup(flagCredentialSpec).Value.(*credentialSpecOpt).Value()
|
||||
credSpecConfigName = credSpec.Config
|
||||
} else {
|
||||
} else { //nolint:gocritic // ignore elseif: can replace 'else {if cond {}}' with 'else if cond {}'
|
||||
// if the credential spec flag has not changed, then check if there
|
||||
// already is a credentialSpec. if there is one, and it's for a Config,
|
||||
// then it's from the old object, and its value is the config ID. we
|
||||
|
@ -1307,7 +1310,7 @@ func updateNetworks(ctx context.Context, apiClient client.NetworkAPIClient, flag
|
|||
}
|
||||
|
||||
existingNetworks := make(map[string]struct{})
|
||||
var newNetworks []swarm.NetworkAttachmentConfig
|
||||
var newNetworks []swarm.NetworkAttachmentConfig //nolint:prealloc
|
||||
for _, network := range specNetworks {
|
||||
if _, exists := idsToRemove[network.Target]; exists {
|
||||
continue
|
||||
|
@ -1362,7 +1365,7 @@ func updateCredSpecConfig(flags *pflag.FlagSet, containerSpec *swarm.ContainerSp
|
|||
// otherwise, set the credential spec to be the parsed value
|
||||
credSpec := credSpecOpt.Value.(*credentialSpecOpt).Value()
|
||||
|
||||
// if this is a Config credential spec, we we still need to replace the
|
||||
// if this is a Config credential spec, we still need to replace the
|
||||
// value of credSpec.Config with the config ID instead of Name.
|
||||
if credSpec.Config != "" {
|
||||
for _, config := range containerSpec.Configs {
|
||||
|
@ -1503,10 +1506,13 @@ func updateCapabilities(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec
|
|||
}
|
||||
|
||||
func capsList(caps map[string]bool) []string {
|
||||
if len(caps) == 0 {
|
||||
return nil
|
||||
}
|
||||
if caps[opts.AllCapabilities] {
|
||||
return []string{opts.AllCapabilities}
|
||||
}
|
||||
var out []string
|
||||
out := make([]string, 0, len(caps))
|
||||
for c := range caps {
|
||||
out = append(out, c)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue