mirror of https://github.com/docker/cli.git
golangci-lint: revive: enable import-shadowing
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
391668f57a
commit
8e9aec6904
|
@ -55,6 +55,13 @@ linters-settings:
|
|||
command: nakedret
|
||||
pattern: ^(?P<path>.*?\\.go):(?P<line>\\d+)\\s*(?P<message>.*)$
|
||||
|
||||
revive:
|
||||
rules:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
|
||||
- name: import-shadowing
|
||||
severity: warning
|
||||
disabled: false
|
||||
|
||||
issues:
|
||||
# The default exclusion rules are a bit too permissive, so copying the relevant ones below
|
||||
exclude-use-default: false
|
||||
|
|
|
@ -394,7 +394,7 @@ func (cli *DockerCli) CurrentContext() string {
|
|||
// occur when trying to use it.
|
||||
//
|
||||
// Refer to [DockerCli.CurrentContext] above for further details.
|
||||
func resolveContextName(opts *cliflags.ClientOptions, config *configfile.ConfigFile) string {
|
||||
func resolveContextName(opts *cliflags.ClientOptions, cfg *configfile.ConfigFile) string {
|
||||
if opts != nil && opts.Context != "" {
|
||||
return opts.Context
|
||||
}
|
||||
|
@ -407,9 +407,9 @@ func resolveContextName(opts *cliflags.ClientOptions, config *configfile.ConfigF
|
|||
if ctxName := os.Getenv(EnvOverrideContext); ctxName != "" {
|
||||
return ctxName
|
||||
}
|
||||
if config != nil && config.CurrentContext != "" {
|
||||
if cfg != nil && cfg.CurrentContext != "" {
|
||||
// We don't validate if this context exists: errors may occur when trying to use it.
|
||||
return config.CurrentContext
|
||||
return cfg.CurrentContext
|
||||
}
|
||||
return DefaultContextName
|
||||
}
|
||||
|
|
|
@ -24,8 +24,8 @@ type AttachOptions struct {
|
|||
DetachKeys string
|
||||
}
|
||||
|
||||
func inspectContainerAndCheckState(ctx context.Context, cli client.APIClient, args string) (*types.ContainerJSON, error) {
|
||||
c, err := cli.ContainerInspect(ctx, args)
|
||||
func inspectContainerAndCheckState(ctx context.Context, apiClient client.APIClient, args string) (*types.ContainerJSON, error) {
|
||||
c, err := apiClient.ContainerInspect(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -45,21 +45,21 @@ func inspectContainerAndCheckState(ctx context.Context, cli client.APIClient, ar
|
|||
// NewAttachCommand creates a new cobra.Command for `docker attach`
|
||||
func NewAttachCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts AttachOptions
|
||||
var container string
|
||||
var ctr string
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "attach [OPTIONS] CONTAINER",
|
||||
Short: "Attach local standard input, output, and error streams to a running container",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
container = args[0]
|
||||
return RunAttach(context.Background(), dockerCli, container, &opts)
|
||||
ctr = args[0]
|
||||
return RunAttach(context.Background(), dockerCli, ctr, &opts)
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"aliases": "docker container attach, docker attach",
|
||||
},
|
||||
ValidArgsFunction: completion.ContainerNames(dockerCli, false, func(container types.Container) bool {
|
||||
return container.State != "paused"
|
||||
ValidArgsFunction: completion.ContainerNames(dockerCli, false, func(ctr types.Container) bool {
|
||||
return ctr.State != "paused"
|
||||
}),
|
||||
}
|
||||
|
||||
|
@ -71,8 +71,8 @@ func NewAttachCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
// RunAttach executes an `attach` command
|
||||
func RunAttach(ctx context.Context, dockerCli command.Cli, target string, opts *AttachOptions) error {
|
||||
apiClient := dockerCli.Client()
|
||||
func RunAttach(ctx context.Context, dockerCLI command.Cli, target string, opts *AttachOptions) error {
|
||||
apiClient := dockerCLI.Client()
|
||||
|
||||
// request channel to wait for client
|
||||
resultC, errC := apiClient.ContainerWait(ctx, target, "")
|
||||
|
@ -82,11 +82,11 @@ func RunAttach(ctx context.Context, dockerCli command.Cli, target string, opts *
|
|||
return err
|
||||
}
|
||||
|
||||
if err := dockerCli.In().CheckTty(!opts.NoStdin, c.Config.Tty); err != nil {
|
||||
if err := dockerCLI.In().CheckTty(!opts.NoStdin, c.Config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
detachKeys := dockerCli.ConfigFile().DetachKeys
|
||||
detachKeys := dockerCLI.ConfigFile().DetachKeys
|
||||
if opts.DetachKeys != "" {
|
||||
detachKeys = opts.DetachKeys
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func RunAttach(ctx context.Context, dockerCli command.Cli, target string, opts *
|
|||
|
||||
var in io.ReadCloser
|
||||
if options.Stdin {
|
||||
in = dockerCli.In()
|
||||
in = dockerCLI.In()
|
||||
}
|
||||
|
||||
if opts.Proxy && !c.Config.Tty {
|
||||
|
@ -129,15 +129,15 @@ func RunAttach(ctx context.Context, dockerCli command.Cli, target string, opts *
|
|||
return err
|
||||
}
|
||||
|
||||
if c.Config.Tty && dockerCli.Out().IsTerminal() {
|
||||
resizeTTY(ctx, dockerCli, target)
|
||||
if c.Config.Tty && dockerCLI.Out().IsTerminal() {
|
||||
resizeTTY(ctx, dockerCLI, target)
|
||||
}
|
||||
|
||||
streamer := hijackedIOStreamer{
|
||||
streams: dockerCli,
|
||||
streams: dockerCLI,
|
||||
inputStream: in,
|
||||
outputStream: dockerCli.Out(),
|
||||
errorStream: dockerCli.Err(),
|
||||
outputStream: dockerCLI.Out(),
|
||||
errorStream: dockerCLI.Err(),
|
||||
resp: resp,
|
||||
tty: c.Config.Tty,
|
||||
detachKeys: options.DetachKeys,
|
||||
|
|
|
@ -16,24 +16,24 @@ type fakeClient struct {
|
|||
client.Client
|
||||
inspectFunc func(string) (types.ContainerJSON, error)
|
||||
execInspectFunc func(execID string) (types.ContainerExecInspect, error)
|
||||
execCreateFunc func(container string, config types.ExecConfig) (types.IDResponse, error)
|
||||
execCreateFunc func(containerID string, config types.ExecConfig) (types.IDResponse, error)
|
||||
createContainerFunc func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
platform *specs.Platform,
|
||||
containerName string) (container.CreateResponse, error)
|
||||
containerStartFunc func(container string, options container.StartOptions) error
|
||||
containerStartFunc func(containerID string, options container.StartOptions) error
|
||||
imageCreateFunc func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
||||
infoFunc func() (system.Info, error)
|
||||
containerStatPathFunc func(container, path string) (types.ContainerPathStat, error)
|
||||
containerCopyFromFunc func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
|
||||
containerStatPathFunc func(containerID, path string) (types.ContainerPathStat, error)
|
||||
containerCopyFromFunc func(containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
|
||||
logFunc func(string, container.LogsOptions) (io.ReadCloser, error)
|
||||
waitFunc func(string) (<-chan container.WaitResponse, <-chan error)
|
||||
containerListFunc func(container.ListOptions) ([]types.Container, error)
|
||||
containerExportFunc func(string) (io.ReadCloser, error)
|
||||
containerExecResizeFunc func(id string, options container.ResizeOptions) error
|
||||
containerRemoveFunc func(ctx context.Context, container string, options container.RemoveOptions) error
|
||||
containerKillFunc func(ctx context.Context, container, signal string) error
|
||||
containerRemoveFunc func(ctx context.Context, containerID string, options container.RemoveOptions) error
|
||||
containerKillFunc func(ctx context.Context, containerID, signal string) error
|
||||
Version string
|
||||
}
|
||||
|
||||
|
@ -51,9 +51,9 @@ func (f *fakeClient) ContainerInspect(_ context.Context, containerID string) (ty
|
|||
return types.ContainerJSON{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerExecCreate(_ context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
|
||||
func (f *fakeClient) ContainerExecCreate(_ context.Context, containerID string, config types.ExecConfig) (types.IDResponse, error) {
|
||||
if f.execCreateFunc != nil {
|
||||
return f.execCreateFunc(container, config)
|
||||
return f.execCreateFunc(containerID, config)
|
||||
}
|
||||
return types.IDResponse{}, nil
|
||||
}
|
||||
|
@ -83,9 +83,9 @@ func (f *fakeClient) ContainerCreate(
|
|||
return container.CreateResponse{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error {
|
||||
func (f *fakeClient) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error {
|
||||
if f.containerRemoveFunc != nil {
|
||||
return f.containerRemoveFunc(ctx, container, options)
|
||||
return f.containerRemoveFunc(ctx, containerID, options)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -104,23 +104,23 @@ func (f *fakeClient) Info(_ context.Context) (system.Info, error) {
|
|||
return system.Info{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerStatPath(_ context.Context, container, path string) (types.ContainerPathStat, error) {
|
||||
func (f *fakeClient) ContainerStatPath(_ context.Context, containerID, path string) (types.ContainerPathStat, error) {
|
||||
if f.containerStatPathFunc != nil {
|
||||
return f.containerStatPathFunc(container, path)
|
||||
return f.containerStatPathFunc(containerID, path)
|
||||
}
|
||||
return types.ContainerPathStat{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) CopyFromContainer(_ context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
|
||||
func (f *fakeClient) CopyFromContainer(_ context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
|
||||
if f.containerCopyFromFunc != nil {
|
||||
return f.containerCopyFromFunc(container, srcPath)
|
||||
return f.containerCopyFromFunc(containerID, srcPath)
|
||||
}
|
||||
return nil, types.ContainerPathStat{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerLogs(_ context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) {
|
||||
func (f *fakeClient) ContainerLogs(_ context.Context, containerID string, options container.LogsOptions) (io.ReadCloser, error) {
|
||||
if f.logFunc != nil {
|
||||
return f.logFunc(container, options)
|
||||
return f.logFunc(containerID, options)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -129,23 +129,23 @@ func (f *fakeClient) ClientVersion() string {
|
|||
return f.Version
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerWait(_ context.Context, container string, _ container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
|
||||
func (f *fakeClient) ContainerWait(_ context.Context, containerID string, _ container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
|
||||
if f.waitFunc != nil {
|
||||
return f.waitFunc(container)
|
||||
return f.waitFunc(containerID)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerStart(_ context.Context, container string, options container.StartOptions) error {
|
||||
func (f *fakeClient) ContainerStart(_ context.Context, containerID string, options container.StartOptions) error {
|
||||
if f.containerStartFunc != nil {
|
||||
return f.containerStartFunc(container, options)
|
||||
return f.containerStartFunc(containerID, options)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerExport(_ context.Context, container string) (io.ReadCloser, error) {
|
||||
func (f *fakeClient) ContainerExport(_ context.Context, containerID string) (io.ReadCloser, error) {
|
||||
if f.containerExportFunc != nil {
|
||||
return f.containerExportFunc(container)
|
||||
return f.containerExportFunc(containerID)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -157,9 +157,9 @@ func (f *fakeClient) ContainerExecResize(_ context.Context, id string, options c
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerKill(ctx context.Context, container, signal string) error {
|
||||
func (f *fakeClient) ContainerKill(ctx context.Context, containerID, signal string) error {
|
||||
if f.containerKillFunc != nil {
|
||||
return f.containerKillFunc(ctx, container, signal)
|
||||
return f.containerKillFunc(ctx, containerID, signal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -113,15 +113,15 @@ func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, options *createOptio
|
|||
}
|
||||
|
||||
// FIXME(thaJeztah): this is the only code-path that uses APIClient.ImageCreate. Rewrite this to use the regular "pull" code (or vice-versa).
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, image string, opts *createOptions) error {
|
||||
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), image)
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, img string, options *createOptions) error {
|
||||
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, image, types.ImageCreateOptions{
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, img, types.ImageCreateOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
Platform: opts.platform,
|
||||
Platform: options.platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -129,7 +129,7 @@ func pullImage(ctx context.Context, dockerCli command.Cli, image string, opts *c
|
|||
defer responseBody.Close()
|
||||
|
||||
out := dockerCli.Err()
|
||||
if opts.quiet {
|
||||
if options.quiet {
|
||||
out = io.Discard
|
||||
}
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, streams.NewOut(out), nil)
|
||||
|
@ -185,7 +185,7 @@ func newCIDFile(path string) (*cidFile, error) {
|
|||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *containerConfig, opts *createOptions) (containerID string, err error) {
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *containerConfig, options *createOptions) (containerID string, err error) {
|
||||
config := containerCfg.Config
|
||||
hostConfig := containerCfg.HostConfig
|
||||
networkingConfig := containerCfg.NetworkingConfig
|
||||
|
@ -211,7 +211,7 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
if named, ok := ref.(reference.Named); ok {
|
||||
namedRef = reference.TagNameOnly(named)
|
||||
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !opts.untrusted {
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !options.untrusted {
|
||||
var err error
|
||||
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef)
|
||||
if err != nil {
|
||||
|
@ -222,7 +222,7 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
}
|
||||
|
||||
pullAndTagImage := func() error {
|
||||
if err := pullImage(ctx, dockerCli, config.Image, opts); err != nil {
|
||||
if err := pullImage(ctx, dockerCli, config.Image, options); err != nil {
|
||||
return err
|
||||
}
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
|
||||
|
@ -236,15 +236,15 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
// create. It will produce an error if you try to set a platform on older API
|
||||
// versions, so check the API version here to maintain backwards
|
||||
// compatibility for CLI users.
|
||||
if opts.platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
|
||||
p, err := platforms.Parse(opts.platform)
|
||||
if options.platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
|
||||
p, err := platforms.Parse(options.platform)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error parsing specified platform")
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
|
||||
if opts.pull == PullImageAlways {
|
||||
if options.pull == PullImageAlways {
|
||||
if err := pullAndTagImage(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -252,11 +252,11 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
|
||||
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.name)
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
|
||||
if err != nil {
|
||||
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
|
||||
if errdefs.IsNotFound(err) && namedRef != nil && opts.pull == PullImageMissing {
|
||||
if !opts.quiet {
|
||||
if errdefs.IsNotFound(err) && namedRef != nil && options.pull == PullImageMissing {
|
||||
if !options.quiet {
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
fmt.Fprintf(dockerCli.Err(), "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *c
|
|||
}
|
||||
|
||||
var retryErr error
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.name)
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
|
||||
if retryErr != nil {
|
||||
return "", retryErr
|
||||
}
|
||||
|
|
|
@ -223,7 +223,7 @@ func TestNewCreateCommandWithContentTrustErrors(t *testing.T) {
|
|||
}
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{
|
||||
createContainerFunc: func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
|
@ -233,8 +233,8 @@ func TestNewCreateCommandWithContentTrustErrors(t *testing.T) {
|
|||
return container.CreateResponse{}, fmt.Errorf("shouldn't try to pull image")
|
||||
},
|
||||
}, test.EnableContentTrust)
|
||||
cli.SetNotaryClient(tc.notaryFunc)
|
||||
cmd := NewCreateCommand(cli)
|
||||
fakeCLI.SetNotaryClient(tc.notaryFunc)
|
||||
cmd := NewCreateCommand(fakeCLI)
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetArgs(tc.args)
|
||||
err := cmd.Execute()
|
||||
|
@ -323,7 +323,7 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
|
|||
}
|
||||
sort.Strings(expected)
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{
|
||||
createContainerFunc: func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
|
@ -335,7 +335,7 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
|
|||
return container.CreateResponse{}, nil
|
||||
},
|
||||
})
|
||||
cli.SetConfigFile(&configfile.ConfigFile{
|
||||
fakeCLI.SetConfigFile(&configfile.ConfigFile{
|
||||
Proxies: map[string]configfile.ProxyConfig{
|
||||
"default": {
|
||||
HTTPProxy: "httpProxy",
|
||||
|
@ -346,7 +346,7 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
cmd := NewCreateCommand(cli)
|
||||
cmd := NewCreateCommand(fakeCLI)
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetArgs([]string{"image:tag"})
|
||||
err := cmd.Execute()
|
||||
|
|
|
@ -262,8 +262,8 @@ func TestNewExecCommandErrors(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
cli := test.NewFakeCli(&fakeClient{inspectFunc: tc.containerInspectFunc})
|
||||
cmd := NewExecCommand(cli)
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{inspectFunc: tc.containerInspectFunc})
|
||||
cmd := NewExecCommand(fakeCLI)
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetArgs(tc.args)
|
||||
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
||||
|
|
|
@ -29,7 +29,7 @@ type psOptions struct {
|
|||
}
|
||||
|
||||
// NewPsCommand creates a new cobra.Command for `docker ps`
|
||||
func NewPsCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func NewPsCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
options := psOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -38,7 +38,7 @@ func NewPsCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.sizeChanged = cmd.Flags().Changed("size")
|
||||
return runPs(dockerCli, &options)
|
||||
return runPs(dockerCLI, &options)
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"category-top": "3",
|
||||
|
@ -61,28 +61,28 @@ func NewPsCommand(dockerCli command.Cli) *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := *NewPsCommand(dockerCli)
|
||||
func newListCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
cmd := *NewPsCommand(dockerCLI)
|
||||
cmd.Aliases = []string{"ps", "list"}
|
||||
cmd.Use = "ls [OPTIONS]"
|
||||
return &cmd
|
||||
}
|
||||
|
||||
func buildContainerListOptions(opts *psOptions) (*container.ListOptions, error) {
|
||||
options := &container.ListOptions{
|
||||
All: opts.all,
|
||||
Limit: opts.last,
|
||||
Size: opts.size,
|
||||
Filters: opts.filter.Value(),
|
||||
func buildContainerListOptions(options *psOptions) (*container.ListOptions, error) {
|
||||
listOptions := &container.ListOptions{
|
||||
All: options.all,
|
||||
Limit: options.last,
|
||||
Size: options.size,
|
||||
Filters: options.filter.Value(),
|
||||
}
|
||||
|
||||
if opts.nLatest && opts.last == -1 {
|
||||
options.Limit = 1
|
||||
if options.nLatest && options.last == -1 {
|
||||
listOptions.Limit = 1
|
||||
}
|
||||
|
||||
// always validate template when `--format` is used, for consistency
|
||||
if len(opts.format) > 0 {
|
||||
tmpl, err := templates.NewParse("", opts.format)
|
||||
if len(options.format) > 0 {
|
||||
tmpl, err := templates.NewParse("", options.format)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse template")
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func buildContainerListOptions(opts *psOptions) (*container.ListOptions, error)
|
|||
|
||||
// if `size` was not explicitly set to false (with `--size=false`)
|
||||
// and `--quiet` is not set, request size if the template requires it
|
||||
if !opts.quiet && !options.Size && !opts.sizeChanged {
|
||||
if !options.quiet && !listOptions.Size && !options.sizeChanged {
|
||||
// The --size option isn't set, but .Size may be used in the template.
|
||||
// Parse and execute the given template to detect if the .Size field is
|
||||
// used. If it is, then automatically enable the --size option. See #24696
|
||||
|
@ -106,22 +106,22 @@ func buildContainerListOptions(opts *psOptions) (*container.ListOptions, error)
|
|||
// because calculating the size is a costly operation.
|
||||
|
||||
if _, ok := optionsProcessor.FieldsUsed["Size"]; ok {
|
||||
options.Size = true
|
||||
listOptions.Size = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return options, nil
|
||||
return listOptions, nil
|
||||
}
|
||||
|
||||
func runPs(dockerCli command.Cli, options *psOptions) error {
|
||||
func runPs(dockerCLI command.Cli, options *psOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if len(options.format) == 0 {
|
||||
// load custom psFormat from CLI config (if any)
|
||||
options.format = dockerCli.ConfigFile().PsFormat
|
||||
options.format = dockerCLI.ConfigFile().PsFormat
|
||||
} else if options.quiet {
|
||||
_, _ = dockerCli.Err().Write([]byte("WARNING: Ignoring custom format, because both --format and --quiet are set.\n"))
|
||||
_, _ = dockerCLI.Err().Write([]byte("WARNING: Ignoring custom format, because both --format and --quiet are set.\n"))
|
||||
}
|
||||
|
||||
listOptions, err := buildContainerListOptions(options)
|
||||
|
@ -129,13 +129,13 @@ func runPs(dockerCli command.Cli, options *psOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
containers, err := dockerCli.Client().ContainerList(ctx, *listOptions)
|
||||
containers, err := dockerCLI.Client().ContainerList(ctx, *listOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Output: dockerCLI.Out(),
|
||||
Format: formatter.NewContainerFormat(options.format, options.quiet, listOptions.Size),
|
||||
Trunc: !options.noTrunc,
|
||||
}
|
||||
|
|
|
@ -118,14 +118,14 @@ func runRun(dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copt
|
|||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptions, containerCfg *containerConfig) error {
|
||||
func runContainer(dockerCli command.Cli, runOpts *runOptions, copts *containerOptions, containerCfg *containerConfig) error {
|
||||
config := containerCfg.Config
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
apiClient := dockerCli.Client()
|
||||
|
||||
config.ArgsEscaped = false
|
||||
|
||||
if !opts.detach {
|
||||
if !runOpts.detach {
|
||||
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -143,12 +143,12 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
|||
ctx, cancelFun := context.WithCancel(context.Background())
|
||||
defer cancelFun()
|
||||
|
||||
containerID, err := createContainer(ctx, dockerCli, containerCfg, &opts.createOptions)
|
||||
containerID, err := createContainer(ctx, dockerCli, containerCfg, &runOpts.createOptions)
|
||||
if err != nil {
|
||||
reportError(stderr, "run", err.Error(), true)
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if opts.sigProxy {
|
||||
if runOpts.sigProxy {
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, apiClient, containerID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
|
@ -169,8 +169,8 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
|||
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
|
||||
if attach {
|
||||
detachKeys := dockerCli.ConfigFile().DetachKeys
|
||||
if opts.detachKeys != "" {
|
||||
detachKeys = opts.detachKeys
|
||||
if runOpts.detachKeys != "" {
|
||||
detachKeys = runOpts.detachKeys
|
||||
}
|
||||
|
||||
closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRunLabel(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{
|
||||
createContainerFunc: func(_ *container.Config, _ *container.HostConfig, _ *network.NetworkingConfig, _ *specs.Platform, _ string) (container.CreateResponse, error) {
|
||||
return container.CreateResponse{
|
||||
ID: "id",
|
||||
|
@ -26,7 +26,7 @@ func TestRunLabel(t *testing.T) {
|
|||
},
|
||||
Version: "1.36",
|
||||
})
|
||||
cmd := NewRunCommand(cli)
|
||||
cmd := NewRunCommand(fakeCLI)
|
||||
cmd.SetArgs([]string{"--detach=true", "--label", "foo", "busybox"})
|
||||
assert.NilError(t, cmd.Execute())
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func TestRunCommandWithContentTrustErrors(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
fakeCLI := test.NewFakeCli(&fakeClient{
|
||||
createContainerFunc: func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
|
@ -68,13 +68,13 @@ func TestRunCommandWithContentTrustErrors(t *testing.T) {
|
|||
return container.CreateResponse{}, fmt.Errorf("shouldn't try to pull image")
|
||||
},
|
||||
}, test.EnableContentTrust)
|
||||
cli.SetNotaryClient(tc.notaryFunc)
|
||||
cmd := NewRunCommand(cli)
|
||||
fakeCLI.SetNotaryClient(tc.notaryFunc)
|
||||
cmd := NewRunCommand(fakeCLI)
|
||||
cmd.SetArgs(tc.args)
|
||||
cmd.SetOut(io.Discard)
|
||||
err := cmd.Execute()
|
||||
assert.Assert(t, err != nil)
|
||||
assert.Assert(t, is.Contains(cli.ErrBuffer().String(), tc.expectedError))
|
||||
assert.Assert(t, is.Contains(fakeCLI.ErrBuffer().String(), tc.expectedError))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
// resizeTtyTo resizes tty to specific height and width
|
||||
func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
|
||||
func resizeTtyTo(ctx context.Context, apiClient client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
|
||||
if height == 0 && width == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -28,9 +28,9 @@ func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id strin
|
|||
|
||||
var err error
|
||||
if isExec {
|
||||
err = client.ContainerExecResize(ctx, id, options)
|
||||
err = apiClient.ContainerExecResize(ctx, id, options)
|
||||
} else {
|
||||
err = client.ContainerResize(ctx, id, options)
|
||||
err = apiClient.ContainerResize(ctx, id, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -127,7 +127,7 @@ func legacyWaitExitOrRemoved(ctx context.Context, apiClient client.APIClient, co
|
|||
return statusChan
|
||||
}
|
||||
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error {
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, containerID string) error) chan error {
|
||||
if len(containers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func longCreateDescription() string {
|
|||
return buf.String()
|
||||
}
|
||||
|
||||
func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func newCreateCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
opts := &CreateOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "create [OPTIONS] CONTEXT",
|
||||
|
@ -44,7 +44,7 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Name = args[0]
|
||||
return RunCreate(dockerCli, opts)
|
||||
return RunCreate(dockerCLI, opts)
|
||||
},
|
||||
Long: longCreateDescription(),
|
||||
ValidArgsFunction: completion.NoComplete,
|
||||
|
@ -57,23 +57,23 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
// RunCreate creates a Docker context
|
||||
func RunCreate(cli command.Cli, o *CreateOptions) error {
|
||||
s := cli.ContextStore()
|
||||
func RunCreate(dockerCLI command.Cli, o *CreateOptions) error {
|
||||
s := dockerCLI.ContextStore()
|
||||
err := checkContextNameForCreation(s, o.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case o.From == "" && o.Docker == nil:
|
||||
err = createFromExistingContext(s, cli.CurrentContext(), o)
|
||||
err = createFromExistingContext(s, dockerCLI.CurrentContext(), o)
|
||||
case o.From != "":
|
||||
err = createFromExistingContext(s, o.From, o)
|
||||
default:
|
||||
err = createNewContext(s, o)
|
||||
}
|
||||
if err == nil {
|
||||
fmt.Fprintln(cli.Out(), o.Name)
|
||||
fmt.Fprintf(cli.Err(), "Successfully created context %q\n", o.Name)
|
||||
fmt.Fprintln(dockerCLI.Out(), o.Name)
|
||||
fmt.Fprintf(dockerCLI.Err(), "Successfully created context %q\n", o.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ func makeFakeCli(t *testing.T, opts ...func(*test.FakeCli)) *test.FakeCli {
|
|||
func() interface{} { return &command.DockerContext{} },
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
)
|
||||
store := &command.ContextStoreWithDefault{
|
||||
contextStore := &command.ContextStoreWithDefault{
|
||||
Store: store.New(dir, storeConfig),
|
||||
Resolver: func() (*command.DefaultContext, error) {
|
||||
return &command.DefaultContext{
|
||||
|
@ -42,7 +42,7 @@ func makeFakeCli(t *testing.T, opts ...func(*test.FakeCli)) *test.FakeCli {
|
|||
for _, o := range opts {
|
||||
o(result)
|
||||
}
|
||||
result.SetContextStore(store)
|
||||
result.SetContextStore(contextStore)
|
||||
return result
|
||||
}
|
||||
|
||||
|
|
|
@ -52,11 +52,11 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
// RunUpdate updates a Docker context
|
||||
func RunUpdate(cli command.Cli, o *UpdateOptions) error {
|
||||
func RunUpdate(dockerCLI command.Cli, o *UpdateOptions) error {
|
||||
if err := store.ValidateContextName(o.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
s := cli.ContextStore()
|
||||
s := dockerCLI.ContextStore()
|
||||
c, err := s.GetMetadata(o.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -93,8 +93,8 @@ func RunUpdate(cli command.Cli, o *UpdateOptions) error {
|
|||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(cli.Out(), o.Name)
|
||||
fmt.Fprintf(cli.Err(), "Successfully updated context %q\n", o.Name)
|
||||
fmt.Fprintln(dockerCLI.Out(), o.Name)
|
||||
fmt.Fprintf(dockerCLI.Err(), "Successfully updated context %q\n", o.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -26,11 +26,11 @@ type ImageContext struct {
|
|||
Digest bool
|
||||
}
|
||||
|
||||
func isDangling(image image.Summary) bool {
|
||||
if len(image.RepoTags) == 0 && len(image.RepoDigests) == 0 {
|
||||
func isDangling(img image.Summary) bool {
|
||||
if len(img.RepoTags) == 0 && len(img.RepoDigests) == 0 {
|
||||
return true
|
||||
}
|
||||
return len(image.RepoTags) == 1 && image.RepoTags[0] == "<none>:<none>" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "<none>@<none>"
|
||||
return len(img.RepoTags) == 1 && img.RepoTags[0] == "<none>:<none>" && len(img.RepoDigests) == 1 && img.RepoDigests[0] == "<none>@<none>"
|
||||
}
|
||||
|
||||
// NewImageFormat returns a format for rendering an ImageContext
|
||||
|
@ -88,18 +88,18 @@ func needDigest(ctx ImageContext) bool {
|
|||
}
|
||||
|
||||
func imageFormat(ctx ImageContext, images []image.Summary, format func(subContext SubContext) error) error {
|
||||
for _, image := range images {
|
||||
for _, img := range images {
|
||||
formatted := []*imageContext{}
|
||||
if isDangling(image) {
|
||||
if isDangling(img) {
|
||||
formatted = append(formatted, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
i: img,
|
||||
repo: "<none>",
|
||||
tag: "<none>",
|
||||
digest: "<none>",
|
||||
})
|
||||
} else {
|
||||
formatted = imageFormatTaggedAndDigest(ctx, image)
|
||||
formatted = imageFormatTaggedAndDigest(ctx, img)
|
||||
}
|
||||
for _, imageCtx := range formatted {
|
||||
if err := format(imageCtx); err != nil {
|
||||
|
@ -110,12 +110,12 @@ func imageFormat(ctx ImageContext, images []image.Summary, format func(subContex
|
|||
return nil
|
||||
}
|
||||
|
||||
func imageFormatTaggedAndDigest(ctx ImageContext, image image.Summary) []*imageContext {
|
||||
func imageFormatTaggedAndDigest(ctx ImageContext, img image.Summary) []*imageContext {
|
||||
repoTags := map[string][]string{}
|
||||
repoDigests := map[string][]string{}
|
||||
images := []*imageContext{}
|
||||
|
||||
for _, refString := range image.RepoTags {
|
||||
for _, refString := range img.RepoTags {
|
||||
ref, err := reference.ParseNormalizedNamed(refString)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -125,7 +125,7 @@ func imageFormatTaggedAndDigest(ctx ImageContext, image image.Summary) []*imageC
|
|||
repoTags[familiarRef] = append(repoTags[familiarRef], nt.Tag())
|
||||
}
|
||||
}
|
||||
for _, refString := range image.RepoDigests {
|
||||
for _, refString := range img.RepoDigests {
|
||||
ref, err := reference.ParseNormalizedNamed(refString)
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -139,7 +139,7 @@ func imageFormatTaggedAndDigest(ctx ImageContext, image image.Summary) []*imageC
|
|||
addImage := func(repo, tag, digest string) {
|
||||
images = append(images, &imageContext{
|
||||
trunc: ctx.Trunc,
|
||||
i: image,
|
||||
i: img,
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
digest: digest,
|
||||
|
|
|
@ -17,9 +17,9 @@ type IDResolver struct {
|
|||
}
|
||||
|
||||
// New creates a new IDResolver.
|
||||
func New(client client.APIClient, noResolve bool) *IDResolver {
|
||||
func New(apiClient client.APIClient, noResolve bool) *IDResolver {
|
||||
return &IDResolver{
|
||||
client: client,
|
||||
client: apiClient,
|
||||
noResolve: noResolve,
|
||||
cache: make(map[string]string),
|
||||
}
|
||||
|
|
|
@ -25,8 +25,8 @@ func TestRunBuildDockerfileFromStdinWithCompress(t *testing.T) {
|
|||
t.Setenv("DOCKER_BUILDKIT", "0")
|
||||
buffer := new(bytes.Buffer)
|
||||
fakeBuild := newFakeBuild()
|
||||
fakeImageBuild := func(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
tee := io.TeeReader(context, buffer)
|
||||
fakeImageBuild := func(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
tee := io.TeeReader(buildContext, buffer)
|
||||
gzipReader, err := gzip.NewReader(tee)
|
||||
assert.NilError(t, err)
|
||||
return fakeBuild.build(ctx, gzipReader, options)
|
||||
|
@ -184,8 +184,8 @@ func newFakeBuild() *fakeBuild {
|
|||
return &fakeBuild{}
|
||||
}
|
||||
|
||||
func (f *fakeBuild) build(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
f.context = tar.NewReader(context)
|
||||
func (f *fakeBuild) build(_ context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
f.context = tar.NewReader(buildContext)
|
||||
f.options = options
|
||||
body := new(bytes.Buffer)
|
||||
return types.ImageBuildResponse{Body: io.NopCloser(body)}, nil
|
||||
|
|
|
@ -30,9 +30,9 @@ type fakeClient struct {
|
|||
imageBuildFunc func(context.Context, io.Reader, types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageTag(_ context.Context, image, ref string) error {
|
||||
func (cli *fakeClient) ImageTag(_ context.Context, img, ref string) error {
|
||||
if cli.imageTagFunc != nil {
|
||||
return cli.imageTagFunc(image, ref)
|
||||
return cli.imageTagFunc(img, ref)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -95,9 +95,9 @@ func (cli *fakeClient) ImageList(_ context.Context, options types.ImageListOptio
|
|||
return []image.Summary{}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageInspectWithRaw(_ context.Context, image string) (types.ImageInspect, []byte, error) {
|
||||
func (cli *fakeClient) ImageInspectWithRaw(_ context.Context, img string) (types.ImageInspect, []byte, error) {
|
||||
if cli.imageInspectFunc != nil {
|
||||
return cli.imageInspectFunc(image)
|
||||
return cli.imageInspectFunc(img)
|
||||
}
|
||||
return types.ImageInspect{}, nil, nil
|
||||
}
|
||||
|
@ -118,9 +118,9 @@ func (cli *fakeClient) ImageHistory(_ context.Context, img string) ([]image.Hist
|
|||
return []image.HistoryResponseItem{{ID: img, Created: time.Now().Unix()}}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
func (cli *fakeClient) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
if cli.imageBuildFunc != nil {
|
||||
return cli.imageBuildFunc(ctx, context, options)
|
||||
return cli.imageBuildFunc(ctx, buildContext, options)
|
||||
}
|
||||
return types.ImageBuildResponse{Body: io.NopCloser(strings.NewReader(""))}, nil
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func NewPullCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
// RunPull performs a pull against the engine based on the specified options
|
||||
func RunPull(cli command.Cli, opts PullOptions) error {
|
||||
func RunPull(dockerCLI command.Cli, opts PullOptions) error {
|
||||
distributionRef, err := reference.ParseNormalizedNamed(opts.remote)
|
||||
switch {
|
||||
case err != nil:
|
||||
|
@ -64,12 +64,12 @@ func RunPull(cli command.Cli, opts PullOptions) error {
|
|||
case !opts.all && reference.IsNameOnly(distributionRef):
|
||||
distributionRef = reference.TagNameOnly(distributionRef)
|
||||
if tagged, ok := distributionRef.(reference.Tagged); ok && !opts.quiet {
|
||||
fmt.Fprintf(cli.Out(), "Using default tag: %s\n", tagged.Tag())
|
||||
fmt.Fprintf(dockerCLI.Out(), "Using default tag: %s\n", tagged.Tag())
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(cli), distributionRef.String())
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(dockerCLI), distributionRef.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -77,9 +77,9 @@ func RunPull(cli command.Cli, opts PullOptions) error {
|
|||
// Check if reference has a digest
|
||||
_, isCanonical := distributionRef.(reference.Canonical)
|
||||
if !opts.untrusted && !isCanonical {
|
||||
err = trustedPull(ctx, cli, imgRefAndAuth, opts)
|
||||
err = trustedPull(ctx, dockerCLI, imgRefAndAuth, opts)
|
||||
} else {
|
||||
err = imagePullPrivileged(ctx, cli, imgRefAndAuth, opts)
|
||||
err = imagePullPrivileged(ctx, dockerCLI, imgRefAndAuth, opts)
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "when fetching 'plugin'") {
|
||||
|
@ -87,6 +87,6 @@ func RunPull(cli command.Cli, opts PullOptions) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(cli.Out(), imgRefAndAuth.Reference().String())
|
||||
fmt.Fprintln(dockerCLI.Out(), imgRefAndAuth.Reference().String())
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ func TrustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.Reposi
|
|||
// PushTrustedReference pushes a canonical reference to the trust server.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig registrytypes.AuthConfig, in io.Reader) error {
|
||||
func PushTrustedReference(ioStreams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig registrytypes.AuthConfig, in io.Reader) error {
|
||||
// If it is a trusted push we would like to find the target entry which match the
|
||||
// tag provided in the function and then do an AddTarget later.
|
||||
target := &client.Target{}
|
||||
|
@ -83,14 +83,14 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
default:
|
||||
// We want trust signatures to always take an explicit tag,
|
||||
// otherwise it will act as an untrusted push.
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil {
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, ioStreams.Out(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(streams.Err(), "No tag specified, skipping trust metadata push")
|
||||
fmt.Fprintln(ioStreams.Err(), "No tag specified, skipping trust metadata push")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil {
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, ioStreams.Out(), handleTarget); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -102,9 +102,9 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
return errors.Errorf("no targets found, please provide a specific tag in order to sign it")
|
||||
}
|
||||
|
||||
fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata")
|
||||
fmt.Fprintln(ioStreams.Out(), "Signing and pushing trust metadata")
|
||||
|
||||
repo, err := trust.GetNotaryRepository(streams.In(), streams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull")
|
||||
repo, err := trust.GetNotaryRepository(ioStreams.In(), ioStreams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil {
|
||||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name())
|
||||
fmt.Fprintf(ioStreams.Out(), "Finished initializing %q\n", repoInfo.Name.Name())
|
||||
err = repo.AddTarget(target, data.CanonicalTargetsRole)
|
||||
case nil:
|
||||
// already initialized and we have successfully downloaded the latest metadata
|
||||
|
@ -150,7 +150,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(streams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag)
|
||||
fmt.Fprintf(ioStreams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -40,13 +40,13 @@ func TestManifestAnnotateError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManifestAnnotate(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
imageManifest := fullImageManifest(t, namedRef)
|
||||
err := store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err := manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newAnnotateCommand(cli)
|
||||
|
|
|
@ -41,18 +41,18 @@ func TestManifestCreateErrors(t *testing.T) {
|
|||
|
||||
// create a manifest list, then overwrite it, and inspect to see if the old one is still there
|
||||
func TestManifestCreateAmend(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
imageManifest := fullImageManifest(t, namedRef)
|
||||
err := store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err := manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
namedRef = ref(t, "alpine:3.1")
|
||||
imageManifest = fullImageManifest(t, namedRef)
|
||||
err = store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err = manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newCreateListCommand(cli)
|
||||
|
@ -64,7 +64,7 @@ func TestManifestCreateAmend(t *testing.T) {
|
|||
|
||||
// make a new cli to clear the buffers
|
||||
cli = test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
inspectCmd := newInspectCommand(cli)
|
||||
inspectCmd.SetArgs([]string{"example.com/list:v1"})
|
||||
assert.NilError(t, inspectCmd.Execute())
|
||||
|
@ -75,13 +75,13 @@ func TestManifestCreateAmend(t *testing.T) {
|
|||
|
||||
// attempt to overwrite a saved manifest and get refused
|
||||
func TestManifestCreateRefuseAmend(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
imageManifest := fullImageManifest(t, namedRef)
|
||||
err := store.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
err := manifestStore.Save(ref(t, "list:v1"), namedRef, imageManifest)
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newCreateListCommand(cli)
|
||||
|
@ -93,10 +93,10 @@ func TestManifestCreateRefuseAmend(t *testing.T) {
|
|||
|
||||
// attempt to make a manifest list without valid images
|
||||
func TestManifestCreateNoManifest(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
cli.SetRegistryClient(&fakeRegistryClient{
|
||||
getManifestFunc: func(_ context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
|
||||
return manifesttypes.ImageManifest{}, errors.Errorf("No such image: %v", ref)
|
||||
|
|
|
@ -11,22 +11,22 @@ import (
|
|||
|
||||
// create two manifest lists and remove them both
|
||||
func TestRmSeveralManifests(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
|
||||
list1 := ref(t, "first:1")
|
||||
namedRef := ref(t, "alpine:3.0")
|
||||
err := store.Save(list1, namedRef, fullImageManifest(t, namedRef))
|
||||
err := manifestStore.Save(list1, namedRef, fullImageManifest(t, namedRef))
|
||||
assert.NilError(t, err)
|
||||
namedRef = ref(t, "alpine:3.1")
|
||||
err = store.Save(list1, namedRef, fullImageManifest(t, namedRef))
|
||||
err = manifestStore.Save(list1, namedRef, fullImageManifest(t, namedRef))
|
||||
assert.NilError(t, err)
|
||||
|
||||
list2 := ref(t, "second:2")
|
||||
namedRef = ref(t, "alpine:3.2")
|
||||
err = store.Save(list2, namedRef, fullImageManifest(t, namedRef))
|
||||
err = manifestStore.Save(list2, namedRef, fullImageManifest(t, namedRef))
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newRmManifestListCommand(cli)
|
||||
|
@ -43,14 +43,14 @@ func TestRmSeveralManifests(t *testing.T) {
|
|||
|
||||
// attempt to remove a manifest list which was never created
|
||||
func TestRmManifestNotCreated(t *testing.T) {
|
||||
store := store.NewStore(t.TempDir())
|
||||
manifestStore := store.NewStore(t.TempDir())
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
cli.SetManifestStore(manifestStore)
|
||||
|
||||
list2 := ref(t, "second:2")
|
||||
namedRef := ref(t, "alpine:3.2")
|
||||
err := store.Save(list2, namedRef, fullImageManifest(t, namedRef))
|
||||
err := manifestStore.Save(list2, namedRef, fullImageManifest(t, namedRef))
|
||||
assert.NilError(t, err)
|
||||
|
||||
cmd := newRmManifestListCommand(cli)
|
||||
|
|
|
@ -78,9 +78,9 @@ func runConnect(dockerCli command.Cli, options connectOptions) error {
|
|||
return client.NetworkConnect(context.Background(), options.network, options.container, epConfig)
|
||||
}
|
||||
|
||||
func convertDriverOpt(opts []string) (map[string]string, error) {
|
||||
func convertDriverOpt(options []string) (map[string]string, error) {
|
||||
driverOpt := make(map[string]string)
|
||||
for _, opt := range opts {
|
||||
for _, opt := range options {
|
||||
k, v, ok := strings.Cut(opt, "=")
|
||||
// TODO(thaJeztah): we should probably not accept whitespace here (both for key and value).
|
||||
k = strings.TrimSpace(k)
|
||||
|
|
|
@ -346,13 +346,13 @@ func (ctx *serviceInspectContext) TaskPlacementPreferences() []string {
|
|||
if ctx.Service.Spec.TaskTemplate.Placement == nil {
|
||||
return nil
|
||||
}
|
||||
var strings []string
|
||||
var out []string
|
||||
for _, pref := range ctx.Service.Spec.TaskTemplate.Placement.Preferences {
|
||||
if pref.Spread != nil {
|
||||
strings = append(strings, "spread="+pref.Spread.SpreadDescriptor)
|
||||
out = append(out, "spread="+pref.Spread.SpreadDescriptor)
|
||||
}
|
||||
}
|
||||
return strings
|
||||
return out
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) MaxReplicas() uint64 {
|
||||
|
|
|
@ -22,7 +22,7 @@ type listOptions struct {
|
|||
filter opts.FilterOpt
|
||||
}
|
||||
|
||||
func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func newListCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
options := listOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -31,7 +31,7 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Short: "List services",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runList(dockerCli, options)
|
||||
return runList(dockerCLI, options)
|
||||
},
|
||||
ValidArgsFunction: completion.NoComplete,
|
||||
}
|
||||
|
@ -44,20 +44,20 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func runList(dockerCli command.Cli, opts listOptions) error {
|
||||
func runList(dockerCLI command.Cli, options listOptions) error {
|
||||
var (
|
||||
apiClient = dockerCli.Client()
|
||||
apiClient = dockerCLI.Client()
|
||||
ctx = context.Background()
|
||||
err error
|
||||
)
|
||||
|
||||
listOpts := types.ServiceListOptions{
|
||||
Filters: opts.filter.Value(),
|
||||
Filters: options.filter.Value(),
|
||||
// When not running "quiet", also get service status (number of running
|
||||
// and desired tasks). Note that this is only supported on API v1.41 and
|
||||
// up; older API versions ignore this option, and we will have to collect
|
||||
// the information manually below.
|
||||
Status: !opts.quiet,
|
||||
Status: !options.quiet,
|
||||
}
|
||||
|
||||
services, err := apiClient.ServiceList(ctx, listOpts)
|
||||
|
@ -84,18 +84,18 @@ func runList(dockerCli command.Cli, opts listOptions) error {
|
|||
}
|
||||
}
|
||||
|
||||
format := opts.format
|
||||
format := options.format
|
||||
if len(format) == 0 {
|
||||
if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !opts.quiet {
|
||||
format = dockerCli.ConfigFile().ServicesFormat
|
||||
if len(dockerCLI.ConfigFile().ServicesFormat) > 0 && !options.quiet {
|
||||
format = dockerCLI.ConfigFile().ServicesFormat
|
||||
} else {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
}
|
||||
|
||||
servicesCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: NewListFormat(format, opts.quiet),
|
||||
Output: dockerCLI.Out(),
|
||||
Format: NewListFormat(format, options.quiet),
|
||||
}
|
||||
return ListFormatWrite(servicesCtx, services)
|
||||
}
|
||||
|
|
|
@ -181,12 +181,12 @@ type taskFormatter struct {
|
|||
cache map[logContext]string
|
||||
}
|
||||
|
||||
func newTaskFormatter(client client.APIClient, opts *logsOptions, padding int) *taskFormatter {
|
||||
func newTaskFormatter(apiClient client.APIClient, opts *logsOptions, padding int) *taskFormatter {
|
||||
return &taskFormatter{
|
||||
client: client,
|
||||
client: apiClient,
|
||||
opts: opts,
|
||||
padding: padding,
|
||||
r: idresolver.New(client, opts.noResolve),
|
||||
r: idresolver.New(apiClient, opts.noResolve),
|
||||
cache: make(map[logContext]string),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,17 +82,17 @@ type placementPrefOpts struct {
|
|||
strings []string
|
||||
}
|
||||
|
||||
func (opts *placementPrefOpts) String() string {
|
||||
if len(opts.strings) == 0 {
|
||||
func (o *placementPrefOpts) String() string {
|
||||
if len(o.strings) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%v", opts.strings)
|
||||
return fmt.Sprintf("%v", o.strings)
|
||||
}
|
||||
|
||||
// Set validates the input value and adds it to the internal slices.
|
||||
// Note: in the future strategies other than "spread", may be supported,
|
||||
// as well as additional comma-separated options.
|
||||
func (opts *placementPrefOpts) Set(value string) error {
|
||||
func (o *placementPrefOpts) Set(value string) error {
|
||||
strategy, arg, ok := strings.Cut(value, "=")
|
||||
if !ok || strategy == "" {
|
||||
return errors.New(`placement preference must be of the format "<strategy>=<arg>"`)
|
||||
|
@ -101,17 +101,17 @@ func (opts *placementPrefOpts) Set(value string) error {
|
|||
return errors.Errorf("unsupported placement preference %s (only spread is supported)", strategy)
|
||||
}
|
||||
|
||||
opts.prefs = append(opts.prefs, swarm.PlacementPreference{
|
||||
o.prefs = append(o.prefs, swarm.PlacementPreference{
|
||||
Spread: &swarm.SpreadOver{
|
||||
SpreadDescriptor: arg,
|
||||
},
|
||||
})
|
||||
opts.strings = append(opts.strings, value)
|
||||
o.strings = append(o.strings, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns a string name for this Option type
|
||||
func (opts *placementPrefOpts) Type() string {
|
||||
func (o *placementPrefOpts) Type() string {
|
||||
return "pref"
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.Upda
|
|||
}
|
||||
}
|
||||
|
||||
func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
func (o updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
if !anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) {
|
||||
return nil
|
||||
}
|
||||
|
@ -175,28 +175,28 @@ func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig
|
|||
updateConfig := updateConfigFromDefaults(defaults.Service.Update)
|
||||
|
||||
if flags.Changed(flagUpdateParallelism) {
|
||||
updateConfig.Parallelism = opts.parallelism
|
||||
updateConfig.Parallelism = o.parallelism
|
||||
}
|
||||
if flags.Changed(flagUpdateDelay) {
|
||||
updateConfig.Delay = opts.delay
|
||||
updateConfig.Delay = o.delay
|
||||
}
|
||||
if flags.Changed(flagUpdateMonitor) {
|
||||
updateConfig.Monitor = opts.monitor
|
||||
updateConfig.Monitor = o.monitor
|
||||
}
|
||||
if flags.Changed(flagUpdateFailureAction) {
|
||||
updateConfig.FailureAction = opts.onFailure
|
||||
updateConfig.FailureAction = o.onFailure
|
||||
}
|
||||
if flags.Changed(flagUpdateMaxFailureRatio) {
|
||||
updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value()
|
||||
updateConfig.MaxFailureRatio = o.maxFailureRatio.Value()
|
||||
}
|
||||
if flags.Changed(flagUpdateOrder) {
|
||||
updateConfig.Order = opts.order
|
||||
updateConfig.Order = o.order
|
||||
}
|
||||
|
||||
return updateConfig
|
||||
}
|
||||
|
||||
func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
func (o updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
if !anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) {
|
||||
return nil
|
||||
}
|
||||
|
@ -204,22 +204,22 @@ func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConf
|
|||
updateConfig := updateConfigFromDefaults(defaults.Service.Rollback)
|
||||
|
||||
if flags.Changed(flagRollbackParallelism) {
|
||||
updateConfig.Parallelism = opts.parallelism
|
||||
updateConfig.Parallelism = o.parallelism
|
||||
}
|
||||
if flags.Changed(flagRollbackDelay) {
|
||||
updateConfig.Delay = opts.delay
|
||||
updateConfig.Delay = o.delay
|
||||
}
|
||||
if flags.Changed(flagRollbackMonitor) {
|
||||
updateConfig.Monitor = opts.monitor
|
||||
updateConfig.Monitor = o.monitor
|
||||
}
|
||||
if flags.Changed(flagRollbackFailureAction) {
|
||||
updateConfig.FailureAction = opts.onFailure
|
||||
updateConfig.FailureAction = o.onFailure
|
||||
}
|
||||
if flags.Changed(flagRollbackMaxFailureRatio) {
|
||||
updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value()
|
||||
updateConfig.MaxFailureRatio = o.maxFailureRatio.Value()
|
||||
}
|
||||
if flags.Changed(flagRollbackOrder) {
|
||||
updateConfig.Order = opts.order
|
||||
updateConfig.Order = o.order
|
||||
}
|
||||
|
||||
return updateConfig
|
||||
|
@ -433,42 +433,42 @@ type healthCheckOptions struct {
|
|||
noHealthcheck bool
|
||||
}
|
||||
|
||||
func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) {
|
||||
func (o *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) {
|
||||
var healthConfig *container.HealthConfig
|
||||
haveHealthSettings := opts.cmd != "" ||
|
||||
opts.interval.Value() != nil ||
|
||||
opts.timeout.Value() != nil ||
|
||||
opts.startPeriod.Value() != nil ||
|
||||
opts.startInterval.Value() != nil ||
|
||||
opts.retries != 0
|
||||
if opts.noHealthcheck {
|
||||
haveHealthSettings := o.cmd != "" ||
|
||||
o.interval.Value() != nil ||
|
||||
o.timeout.Value() != nil ||
|
||||
o.startPeriod.Value() != nil ||
|
||||
o.startInterval.Value() != nil ||
|
||||
o.retries != 0
|
||||
if o.noHealthcheck {
|
||||
if haveHealthSettings {
|
||||
return nil, errors.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck)
|
||||
}
|
||||
healthConfig = &container.HealthConfig{Test: []string{"NONE"}}
|
||||
} else if haveHealthSettings {
|
||||
var test []string
|
||||
if opts.cmd != "" {
|
||||
test = []string{"CMD-SHELL", opts.cmd}
|
||||
if o.cmd != "" {
|
||||
test = []string{"CMD-SHELL", o.cmd}
|
||||
}
|
||||
var interval, timeout, startPeriod, startInterval time.Duration
|
||||
if ptr := opts.interval.Value(); ptr != nil {
|
||||
if ptr := o.interval.Value(); ptr != nil {
|
||||
interval = *ptr
|
||||
}
|
||||
if ptr := opts.timeout.Value(); ptr != nil {
|
||||
if ptr := o.timeout.Value(); ptr != nil {
|
||||
timeout = *ptr
|
||||
}
|
||||
if ptr := opts.startPeriod.Value(); ptr != nil {
|
||||
if ptr := o.startPeriod.Value(); ptr != nil {
|
||||
startPeriod = *ptr
|
||||
}
|
||||
if ptr := opts.startInterval.Value(); ptr != nil {
|
||||
if ptr := o.startInterval.Value(); ptr != nil {
|
||||
startInterval = *ptr
|
||||
}
|
||||
healthConfig = &container.HealthConfig{
|
||||
Test: test,
|
||||
Interval: interval,
|
||||
Timeout: timeout,
|
||||
Retries: opts.retries,
|
||||
Retries: o.retries,
|
||||
StartPeriod: startPeriod,
|
||||
StartInterval: startInterval,
|
||||
}
|
||||
|
@ -828,7 +828,7 @@ func addDetachFlag(flags *pflag.FlagSet, detach *bool) {
|
|||
|
||||
// addServiceFlags adds all flags that are common to both `create` and `update`.
|
||||
// Any flags that are not common are added separately in the individual command
|
||||
func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValues flagDefaults) {
|
||||
func addServiceFlags(flags *pflag.FlagSet, options *serviceOptions, defaultFlagValues flagDefaults) {
|
||||
flagDesc := func(flagName string, desc string) string {
|
||||
if defaultValue, ok := defaultFlagValues[flagName]; ok {
|
||||
return fmt.Sprintf("%s (default %v)", desc, defaultValue)
|
||||
|
@ -836,98 +836,98 @@ func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValu
|
|||
return desc
|
||||
}
|
||||
|
||||
addDetachFlag(flags, &opts.detach)
|
||||
flags.BoolVarP(&opts.quiet, flagQuiet, "q", false, "Suppress progress output")
|
||||
addDetachFlag(flags, &options.detach)
|
||||
flags.BoolVarP(&options.quiet, flagQuiet, "q", false, "Suppress progress output")
|
||||
|
||||
flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container")
|
||||
flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
|
||||
flags.Var(&opts.credentialSpec, flagCredentialSpec, "Credential spec for managed service account (Windows only)")
|
||||
flags.StringVarP(&options.workdir, flagWorkdir, "w", "", "Working directory inside the container")
|
||||
flags.StringVarP(&options.user, flagUser, "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
|
||||
flags.Var(&options.credentialSpec, flagCredentialSpec, "Credential spec for managed service account (Windows only)")
|
||||
flags.SetAnnotation(flagCredentialSpec, "version", []string{"1.29"})
|
||||
flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname")
|
||||
flags.StringVar(&options.hostname, flagHostname, "", "Container hostname")
|
||||
flags.SetAnnotation(flagHostname, "version", []string{"1.25"})
|
||||
flags.Var(&opts.entrypoint, flagEntrypoint, "Overwrite the default ENTRYPOINT of the image")
|
||||
flags.Var(&opts.capAdd, flagCapAdd, "Add Linux capabilities")
|
||||
flags.Var(&options.entrypoint, flagEntrypoint, "Overwrite the default ENTRYPOINT of the image")
|
||||
flags.Var(&options.capAdd, flagCapAdd, "Add Linux capabilities")
|
||||
flags.SetAnnotation(flagCapAdd, "version", []string{"1.41"})
|
||||
flags.Var(&opts.capDrop, flagCapDrop, "Drop Linux capabilities")
|
||||
flags.Var(&options.capDrop, flagCapDrop, "Drop Linux capabilities")
|
||||
flags.SetAnnotation(flagCapDrop, "version", []string{"1.41"})
|
||||
|
||||
flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs")
|
||||
flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
|
||||
flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs")
|
||||
flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
|
||||
flags.Int64Var(&opts.resources.limitPids, flagLimitPids, 0, "Limit maximum number of processes (default 0 = unlimited)")
|
||||
flags.Var(&options.resources.limitCPU, flagLimitCPU, "Limit CPUs")
|
||||
flags.Var(&options.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
|
||||
flags.Var(&options.resources.resCPU, flagReserveCPU, "Reserve CPUs")
|
||||
flags.Var(&options.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
|
||||
flags.Int64Var(&options.resources.limitPids, flagLimitPids, 0, "Limit maximum number of processes (default 0 = unlimited)")
|
||||
flags.SetAnnotation(flagLimitPids, "version", []string{"1.41"})
|
||||
|
||||
flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
|
||||
flags.Var(&opts.maxConcurrent, flagConcurrent, "Number of job tasks to run concurrently (default equal to --replicas)")
|
||||
flags.Var(&options.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&options.replicas, flagReplicas, "Number of tasks")
|
||||
flags.Var(&options.maxConcurrent, flagConcurrent, "Number of job tasks to run concurrently (default equal to --replicas)")
|
||||
flags.SetAnnotation(flagConcurrent, "version", []string{"1.41"})
|
||||
flags.Uint64Var(&opts.maxReplicas, flagMaxReplicas, defaultFlagValues.getUint64(flagMaxReplicas), "Maximum number of tasks per node (default 0 = unlimited)")
|
||||
flags.Uint64Var(&options.maxReplicas, flagMaxReplicas, defaultFlagValues.getUint64(flagMaxReplicas), "Maximum number of tasks per node (default 0 = unlimited)")
|
||||
flags.SetAnnotation(flagMaxReplicas, "version", []string{"1.40"})
|
||||
|
||||
flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none", "on-failure", "any")`))
|
||||
flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, flagDesc(flagRestartMaxAttempts, "Maximum number of restarts before giving up"))
|
||||
flags.StringVar(&options.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none", "on-failure", "any")`))
|
||||
flags.Var(&options.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&options.restartPolicy.maxAttempts, flagRestartMaxAttempts, flagDesc(flagRestartMaxAttempts, "Maximum number of restarts before giving up"))
|
||||
|
||||
flags.Var(&opts.restartPolicy.window, flagRestartWindow, flagDesc(flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)"))
|
||||
flags.Var(&options.restartPolicy.window, flagRestartWindow, flagDesc(flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)"))
|
||||
|
||||
flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, defaultFlagValues.getUint64(flagUpdateParallelism), "Maximum number of tasks updated simultaneously (0 to update all at once)")
|
||||
flags.DurationVar(&opts.update.delay, flagUpdateDelay, 0, flagDesc(flagUpdateDelay, "Delay between updates (ns|us|ms|s|m|h)"))
|
||||
flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, 0, flagDesc(flagUpdateMonitor, "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)"))
|
||||
flags.Uint64Var(&options.update.parallelism, flagUpdateParallelism, defaultFlagValues.getUint64(flagUpdateParallelism), "Maximum number of tasks updated simultaneously (0 to update all at once)")
|
||||
flags.DurationVar(&options.update.delay, flagUpdateDelay, 0, flagDesc(flagUpdateDelay, "Delay between updates (ns|us|ms|s|m|h)"))
|
||||
flags.DurationVar(&options.update.monitor, flagUpdateMonitor, 0, flagDesc(flagUpdateMonitor, "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)"))
|
||||
flags.SetAnnotation(flagUpdateMonitor, "version", []string{"1.25"})
|
||||
flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "", flagDesc(flagUpdateFailureAction, `Action on update failure ("pause", "continue", "rollback")`))
|
||||
flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, flagDesc(flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update"))
|
||||
flags.StringVar(&options.update.onFailure, flagUpdateFailureAction, "", flagDesc(flagUpdateFailureAction, `Action on update failure ("pause", "continue", "rollback")`))
|
||||
flags.Var(&options.update.maxFailureRatio, flagUpdateMaxFailureRatio, flagDesc(flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update"))
|
||||
flags.SetAnnotation(flagUpdateMaxFailureRatio, "version", []string{"1.25"})
|
||||
flags.StringVar(&opts.update.order, flagUpdateOrder, "", flagDesc(flagUpdateOrder, `Update order ("start-first", "stop-first")`))
|
||||
flags.StringVar(&options.update.order, flagUpdateOrder, "", flagDesc(flagUpdateOrder, `Update order ("start-first", "stop-first")`))
|
||||
flags.SetAnnotation(flagUpdateOrder, "version", []string{"1.29"})
|
||||
|
||||
flags.Uint64Var(&opts.rollback.parallelism, flagRollbackParallelism, defaultFlagValues.getUint64(flagRollbackParallelism),
|
||||
flags.Uint64Var(&options.rollback.parallelism, flagRollbackParallelism, defaultFlagValues.getUint64(flagRollbackParallelism),
|
||||
"Maximum number of tasks rolled back simultaneously (0 to roll back all at once)")
|
||||
flags.SetAnnotation(flagRollbackParallelism, "version", []string{"1.28"})
|
||||
flags.DurationVar(&opts.rollback.delay, flagRollbackDelay, 0, flagDesc(flagRollbackDelay, "Delay between task rollbacks (ns|us|ms|s|m|h)"))
|
||||
flags.DurationVar(&options.rollback.delay, flagRollbackDelay, 0, flagDesc(flagRollbackDelay, "Delay between task rollbacks (ns|us|ms|s|m|h)"))
|
||||
flags.SetAnnotation(flagRollbackDelay, "version", []string{"1.28"})
|
||||
flags.DurationVar(&opts.rollback.monitor, flagRollbackMonitor, 0, flagDesc(flagRollbackMonitor, "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)"))
|
||||
flags.DurationVar(&options.rollback.monitor, flagRollbackMonitor, 0, flagDesc(flagRollbackMonitor, "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)"))
|
||||
flags.SetAnnotation(flagRollbackMonitor, "version", []string{"1.28"})
|
||||
flags.StringVar(&opts.rollback.onFailure, flagRollbackFailureAction, "", flagDesc(flagRollbackFailureAction, `Action on rollback failure ("pause", "continue")`))
|
||||
flags.StringVar(&options.rollback.onFailure, flagRollbackFailureAction, "", flagDesc(flagRollbackFailureAction, `Action on rollback failure ("pause", "continue")`))
|
||||
flags.SetAnnotation(flagRollbackFailureAction, "version", []string{"1.28"})
|
||||
flags.Var(&opts.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, flagDesc(flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback"))
|
||||
flags.Var(&options.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, flagDesc(flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback"))
|
||||
flags.SetAnnotation(flagRollbackMaxFailureRatio, "version", []string{"1.28"})
|
||||
flags.StringVar(&opts.rollback.order, flagRollbackOrder, "", flagDesc(flagRollbackOrder, `Rollback order ("start-first", "stop-first")`))
|
||||
flags.StringVar(&options.rollback.order, flagRollbackOrder, "", flagDesc(flagRollbackOrder, `Rollback order ("start-first", "stop-first")`))
|
||||
flags.SetAnnotation(flagRollbackOrder, "version", []string{"1.29"})
|
||||
|
||||
flags.StringVar(&opts.endpoint.mode, flagEndpointMode, defaultFlagValues.getString(flagEndpointMode), "Endpoint mode (vip or dnsrr)")
|
||||
flags.StringVar(&options.endpoint.mode, flagEndpointMode, defaultFlagValues.getString(flagEndpointMode), "Endpoint mode (vip or dnsrr)")
|
||||
|
||||
flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents")
|
||||
flags.BoolVar(&opts.noResolveImage, flagNoResolveImage, false, "Do not query the registry to resolve image digest and supported platforms")
|
||||
flags.BoolVar(&options.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents")
|
||||
flags.BoolVar(&options.noResolveImage, flagNoResolveImage, false, "Do not query the registry to resolve image digest and supported platforms")
|
||||
flags.SetAnnotation(flagNoResolveImage, "version", []string{"1.30"})
|
||||
|
||||
flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service")
|
||||
flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options")
|
||||
flags.StringVar(&options.logDriver.name, flagLogDriver, "", "Logging driver for service")
|
||||
flags.Var(&options.logDriver.opts, flagLogOpt, "Logging driver options")
|
||||
|
||||
flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health")
|
||||
flags.StringVar(&options.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health")
|
||||
flags.SetAnnotation(flagHealthCmd, "version", []string{"1.25"})
|
||||
flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ms|s|m|h)")
|
||||
flags.Var(&options.healthcheck.interval, flagHealthInterval, "Time between running the check (ms|s|m|h)")
|
||||
flags.SetAnnotation(flagHealthInterval, "version", []string{"1.25"})
|
||||
flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ms|s|m|h)")
|
||||
flags.Var(&options.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ms|s|m|h)")
|
||||
flags.SetAnnotation(flagHealthTimeout, "version", []string{"1.25"})
|
||||
flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy")
|
||||
flags.IntVar(&options.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy")
|
||||
flags.SetAnnotation(flagHealthRetries, "version", []string{"1.25"})
|
||||
flags.Var(&opts.healthcheck.startPeriod, flagHealthStartPeriod, "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)")
|
||||
flags.Var(&options.healthcheck.startPeriod, flagHealthStartPeriod, "Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)")
|
||||
flags.SetAnnotation(flagHealthStartPeriod, "version", []string{"1.29"})
|
||||
flags.Var(&opts.healthcheck.startInterval, flagHealthStartInterval, "Time between running the check during the start period (ms|s|m|h)")
|
||||
flags.Var(&options.healthcheck.startInterval, flagHealthStartInterval, "Time between running the check during the start period (ms|s|m|h)")
|
||||
flags.SetAnnotation(flagHealthStartInterval, "version", []string{"1.44"})
|
||||
flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK")
|
||||
flags.BoolVar(&options.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK")
|
||||
flags.SetAnnotation(flagNoHealthcheck, "version", []string{"1.25"})
|
||||
|
||||
flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY")
|
||||
flags.BoolVarP(&options.tty, flagTTY, "t", false, "Allocate a pseudo-TTY")
|
||||
flags.SetAnnotation(flagTTY, "version", []string{"1.25"})
|
||||
|
||||
flags.BoolVar(&opts.readOnly, flagReadOnly, false, "Mount the container's root filesystem as read only")
|
||||
flags.BoolVar(&options.readOnly, flagReadOnly, false, "Mount the container's root filesystem as read only")
|
||||
flags.SetAnnotation(flagReadOnly, "version", []string{"1.28"})
|
||||
|
||||
flags.StringVar(&opts.stopSignal, flagStopSignal, "", "Signal to stop the container")
|
||||
flags.StringVar(&options.stopSignal, flagStopSignal, "", "Signal to stop the container")
|
||||
flags.SetAnnotation(flagStopSignal, "version", []string{"1.28"})
|
||||
flags.StringVar(&opts.isolation, flagIsolation, "", "Service container isolation mode")
|
||||
flags.StringVar(&options.isolation, flagIsolation, "", "Service container isolation mode")
|
||||
flags.SetAnnotation(flagIsolation, "version", []string{"1.35"})
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
// ParseSecrets retrieves the secrets with the requested names and fills
|
||||
// secret IDs into the secret references.
|
||||
func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) {
|
||||
func ParseSecrets(apiClient client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) {
|
||||
if len(requestedSecrets) == 0 {
|
||||
return []*swarmtypes.SecretReference{}, nil
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.
|
|||
args.Add("name", s.SecretName)
|
||||
}
|
||||
|
||||
secrets, err := client.SecretList(ctx, types.SecretListOptions{
|
||||
secrets, err := apiClient.SecretList(ctx, types.SecretListOptions{
|
||||
Filters: args,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -65,7 +65,7 @@ func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.
|
|||
|
||||
// ParseConfigs retrieves the configs from the requested names and converts
|
||||
// them to config references to use with the spec
|
||||
func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) {
|
||||
func ParseConfigs(apiClient client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) {
|
||||
if len(requestedConfigs) == 0 {
|
||||
return []*swarmtypes.ConfigReference{}, nil
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.
|
|||
args.Add("name", s.ConfigName)
|
||||
}
|
||||
|
||||
configs, err := client.ConfigList(ctx, types.ConfigListOptions{
|
||||
configs, err := apiClient.ConfigList(ctx, types.ConfigListOptions{
|
||||
Filters: args,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -70,7 +70,7 @@ func terminalState(state swarm.TaskState) bool {
|
|||
// ServiceProgress outputs progress information for convergence of a service.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func ServiceProgress(ctx context.Context, client client.APIClient, serviceID string, progressWriter io.WriteCloser) error {
|
||||
func ServiceProgress(ctx context.Context, apiClient client.APIClient, serviceID string, progressWriter io.WriteCloser) error {
|
||||
defer progressWriter.Close()
|
||||
|
||||
progressOut := streamformatter.NewJSONProgressOutput(progressWriter, false)
|
||||
|
@ -84,7 +84,7 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
|||
taskFilter.Add("_up-to-date", "true")
|
||||
|
||||
getUpToDateTasks := func() ([]swarm.Task, error) {
|
||||
return client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter})
|
||||
return apiClient.TaskList(ctx, types.TaskListOptions{Filters: taskFilter})
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -97,7 +97,7 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
|||
)
|
||||
|
||||
for {
|
||||
service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
||||
service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
|||
return err
|
||||
}
|
||||
|
||||
activeNodes, err := getActiveNodes(ctx, client)
|
||||
activeNodes, err := getActiveNodes(ctx, apiClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -218,8 +218,8 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
|||
}
|
||||
}
|
||||
|
||||
func getActiveNodes(ctx context.Context, client client.APIClient) (map[string]struct{}, error) {
|
||||
nodes, err := client.NodeList(ctx, types.NodeListOptions{})
|
||||
func getActiveNodes(ctx context.Context, apiClient client.APIClient) (map[string]struct{}, error) {
|
||||
nodes, err := apiClient.NodeList(ctx, types.NodeListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -52,18 +52,18 @@ func newPsCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
func runPS(dockerCli command.Cli, options psOptions) error {
|
||||
client := dockerCli.Client()
|
||||
apiClient := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
filter, notfound, err := createFilter(ctx, client, options)
|
||||
filter, notfound, err := createFilter(ctx, apiClient, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := updateNodeFilter(ctx, client, filter); err != nil {
|
||||
if err := updateNodeFilter(ctx, apiClient, filter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter})
|
||||
tasks, err := apiClient.TaskList(ctx, types.TaskListOptions{Filters: filter})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func runPS(dockerCli command.Cli, options psOptions) error {
|
|||
if options.quiet {
|
||||
options.noTrunc = true
|
||||
}
|
||||
if err := task.Print(ctx, dockerCli, tasks, idresolver.New(client, options.noResolve), !options.noTrunc, options.quiet, format); err != nil {
|
||||
if err := task.Print(ctx, dockerCli, tasks, idresolver.New(apiClient, options.noResolve), !options.noTrunc, options.quiet, format); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(notfound) != 0 {
|
||||
|
@ -84,7 +84,7 @@ func runPS(dockerCli command.Cli, options psOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func createFilter(ctx context.Context, client client.APIClient, options psOptions) (filters.Args, []string, error) {
|
||||
func createFilter(ctx context.Context, apiClient client.APIClient, options psOptions) (filters.Args, []string, error) {
|
||||
filter := options.filter.Value()
|
||||
|
||||
serviceIDFilter := filters.NewArgs()
|
||||
|
@ -93,11 +93,11 @@ func createFilter(ctx context.Context, client client.APIClient, options psOption
|
|||
serviceIDFilter.Add("id", service)
|
||||
serviceNameFilter.Add("name", service)
|
||||
}
|
||||
serviceByIDList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceIDFilter})
|
||||
serviceByIDList, err := apiClient.ServiceList(ctx, types.ServiceListOptions{Filters: serviceIDFilter})
|
||||
if err != nil {
|
||||
return filter, nil, err
|
||||
}
|
||||
serviceByNameList, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: serviceNameFilter})
|
||||
serviceByNameList, err := apiClient.ServiceList(ctx, types.ServiceListOptions{Filters: serviceNameFilter})
|
||||
if err != nil {
|
||||
return filter, nil, err
|
||||
}
|
||||
|
@ -142,11 +142,11 @@ loop:
|
|||
return filter, notfound, err
|
||||
}
|
||||
|
||||
func updateNodeFilter(ctx context.Context, client client.APIClient, filter filters.Args) error {
|
||||
func updateNodeFilter(ctx context.Context, apiClient client.APIClient, filter filters.Args) error {
|
||||
if filter.Contains("node") {
|
||||
nodeFilters := filter.Get("node")
|
||||
for _, nodeFilter := range nodeFilters {
|
||||
nodeReference, err := node.Reference(ctx, client, nodeFilter)
|
||||
nodeReference, err := node.Reference(ctx, apiClient, nodeFilter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -224,8 +224,8 @@ func configFromName(name string) swarm.Config {
|
|||
}
|
||||
}
|
||||
|
||||
func namespaceFromFilters(filters filters.Args) string {
|
||||
label := filters.Get("label")[0]
|
||||
func namespaceFromFilters(fltrs filters.Args) string {
|
||||
label := fltrs.Get("label")[0]
|
||||
return strings.TrimPrefix(label, convert.LabelNamespace+"=")
|
||||
}
|
||||
|
||||
|
|
|
@ -46,8 +46,8 @@ func newConfigCommand(dockerCli command.Cli) *cobra.Command {
|
|||
|
||||
// outputConfig returns the merged and interpolated config file
|
||||
func outputConfig(configFiles composetypes.ConfigDetails, skipInterpolation bool) (string, error) {
|
||||
optsFunc := func(options *composeLoader.Options) {
|
||||
options.SkipInterpolation = skipInterpolation
|
||||
optsFunc := func(opts *composeLoader.Options) {
|
||||
opts.SkipInterpolation = skipInterpolation
|
||||
}
|
||||
config, err := composeLoader.Load(configFiles, optsFunc)
|
||||
if err != nil {
|
||||
|
|
|
@ -213,8 +213,8 @@ func configFromName(name string) swarm.Config {
|
|||
}
|
||||
}
|
||||
|
||||
func namespaceFromFilters(filters filters.Args) string {
|
||||
label := filters.Get("label")[0]
|
||||
func namespaceFromFilters(fltrs filters.Args) string {
|
||||
label := fltrs.Get("label")[0]
|
||||
return strings.TrimPrefix(label, convert.LabelNamespace+"=")
|
||||
}
|
||||
|
||||
|
|
|
@ -71,9 +71,9 @@ func (cli *fakeClient) SwarmLeave(context.Context, bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) SwarmUpdate(_ context.Context, _ swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error {
|
||||
func (cli *fakeClient) SwarmUpdate(_ context.Context, _ swarm.Version, swarmSpec swarm.Spec, flags swarm.UpdateFlags) error {
|
||||
if cli.swarmUpdateFunc != nil {
|
||||
return cli.swarmUpdateFunc(swarm, flags)
|
||||
return cli.swarmUpdateFunc(swarmSpec, flags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -216,38 +216,38 @@ func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) {
|
|||
return &externalCA, nil
|
||||
}
|
||||
|
||||
func addSwarmCAFlags(flags *pflag.FlagSet, opts *swarmCAOptions) {
|
||||
flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, 90*24*time.Hour, "Validity period for node certificates (ns|us|ms|s|m|h)")
|
||||
flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints")
|
||||
func addSwarmCAFlags(flags *pflag.FlagSet, options *swarmCAOptions) {
|
||||
flags.DurationVar(&options.nodeCertExpiry, flagCertExpiry, 90*24*time.Hour, "Validity period for node certificates (ns|us|ms|s|m|h)")
|
||||
flags.Var(&options.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints")
|
||||
}
|
||||
|
||||
func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
||||
flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit")
|
||||
flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, 5*time.Second, "Dispatcher heartbeat period (ns|us|ms|s|m|h)")
|
||||
flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain")
|
||||
func addSwarmFlags(flags *pflag.FlagSet, options *swarmOptions) {
|
||||
flags.Int64Var(&options.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit")
|
||||
flags.DurationVar(&options.dispatcherHeartbeat, flagDispatcherHeartbeat, 5*time.Second, "Dispatcher heartbeat period (ns|us|ms|s|m|h)")
|
||||
flags.Uint64Var(&options.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain")
|
||||
flags.SetAnnotation(flagMaxSnapshots, "version", []string{"1.25"})
|
||||
flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots")
|
||||
flags.Uint64Var(&options.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots")
|
||||
flags.SetAnnotation(flagSnapshotInterval, "version", []string{"1.25"})
|
||||
addSwarmCAFlags(flags, &opts.swarmCAOptions)
|
||||
addSwarmCAFlags(flags, &options.swarmCAOptions)
|
||||
}
|
||||
|
||||
func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) {
|
||||
func (o *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) {
|
||||
if flags.Changed(flagTaskHistoryLimit) {
|
||||
spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit
|
||||
spec.Orchestration.TaskHistoryRetentionLimit = &o.taskHistoryLimit
|
||||
}
|
||||
if flags.Changed(flagDispatcherHeartbeat) {
|
||||
spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat
|
||||
spec.Dispatcher.HeartbeatPeriod = o.dispatcherHeartbeat
|
||||
}
|
||||
if flags.Changed(flagMaxSnapshots) {
|
||||
spec.Raft.KeepOldSnapshots = &opts.maxSnapshots
|
||||
spec.Raft.KeepOldSnapshots = &o.maxSnapshots
|
||||
}
|
||||
if flags.Changed(flagSnapshotInterval) {
|
||||
spec.Raft.SnapshotInterval = opts.snapshotInterval
|
||||
spec.Raft.SnapshotInterval = o.snapshotInterval
|
||||
}
|
||||
if flags.Changed(flagAutolock) {
|
||||
spec.EncryptionConfig.AutoLockManagers = opts.autolock
|
||||
spec.EncryptionConfig.AutoLockManagers = o.autolock
|
||||
}
|
||||
opts.mergeSwarmSpecCAFlags(spec, flags, caCert)
|
||||
o.mergeSwarmSpecCAFlags(spec, flags, caCert)
|
||||
}
|
||||
|
||||
type swarmCAOptions struct {
|
||||
|
@ -255,20 +255,20 @@ type swarmCAOptions struct {
|
|||
externalCA ExternalCAOption
|
||||
}
|
||||
|
||||
func (opts *swarmCAOptions) mergeSwarmSpecCAFlags(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) {
|
||||
func (o *swarmCAOptions) mergeSwarmSpecCAFlags(spec *swarm.Spec, flags *pflag.FlagSet, caCert string) {
|
||||
if flags.Changed(flagCertExpiry) {
|
||||
spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry
|
||||
spec.CAConfig.NodeCertExpiry = o.nodeCertExpiry
|
||||
}
|
||||
if flags.Changed(flagExternalCA) {
|
||||
spec.CAConfig.ExternalCAs = opts.externalCA.Value()
|
||||
spec.CAConfig.ExternalCAs = o.externalCA.Value()
|
||||
for _, ca := range spec.CAConfig.ExternalCAs {
|
||||
ca.CACert = caCert
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec {
|
||||
func (o *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec {
|
||||
var spec swarm.Spec
|
||||
opts.mergeSwarmSpec(&spec, flags, "")
|
||||
o.mergeSwarmSpec(&spec, flags, "")
|
||||
return spec
|
||||
}
|
||||
|
|
|
@ -464,7 +464,7 @@ func TestFormatInfo(t *testing.T) {
|
|||
{
|
||||
doc: "syntax",
|
||||
template: "{{.badString}}",
|
||||
expectedError: `template: :1:2: executing "" at <.badString>: can't evaluate field badString in type system.info`,
|
||||
expectedError: `template: :1:2: executing "" at <.badString>: can't evaluate field badString in type system.dockerInfo`,
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
|
|
|
@ -38,18 +38,18 @@ func newInspectCommand(dockerCli command.Cli) *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
||||
func runInspect(dockerCLI command.Cli, opts inspectOptions) error {
|
||||
if opts.prettyPrint {
|
||||
var err error
|
||||
|
||||
for index, remote := range opts.remotes {
|
||||
if err = prettyPrintTrustInfo(dockerCli, remote); err != nil {
|
||||
if err = prettyPrintTrustInfo(dockerCLI, remote); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Additional separator between the inspection output of each image
|
||||
if index < len(opts.remotes)-1 {
|
||||
fmt.Fprint(dockerCli.Out(), "\n\n")
|
||||
fmt.Fprint(dockerCLI.Out(), "\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,14 +57,14 @@ func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
|||
}
|
||||
|
||||
getRefFunc := func(ref string) (interface{}, []byte, error) {
|
||||
i, err := getRepoTrustInfo(dockerCli, ref)
|
||||
i, err := getRepoTrustInfo(dockerCLI, ref)
|
||||
return nil, i, err
|
||||
}
|
||||
return inspect.Inspect(dockerCli.Out(), opts.remotes, "", getRefFunc)
|
||||
return inspect.Inspect(dockerCLI.Out(), opts.remotes, "", getRefFunc)
|
||||
}
|
||||
|
||||
func getRepoTrustInfo(cli command.Cli, remote string) ([]byte, error) {
|
||||
signatureRows, adminRolesWithSigs, delegationRoles, err := lookupTrustInfo(cli, remote)
|
||||
func getRepoTrustInfo(dockerCLI command.Cli, remote string) ([]byte, error) {
|
||||
signatureRows, adminRolesWithSigs, delegationRoles, err := lookupTrustInfo(dockerCLI, remote)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
|
|
@ -19,14 +19,14 @@ type revokeOptions struct {
|
|||
forceYes bool
|
||||
}
|
||||
|
||||
func newRevokeCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func newRevokeCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
options := revokeOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "revoke [OPTIONS] IMAGE[:TAG]",
|
||||
Short: "Remove trust for an image",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return revokeTrust(dockerCli, args[0], options)
|
||||
return revokeTrust(dockerCLI, args[0], options)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
|
@ -34,9 +34,9 @@ func newRevokeCommand(dockerCli command.Cli) *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func revokeTrust(cli command.Cli, remote string, options revokeOptions) error {
|
||||
func revokeTrust(dockerCLI command.Cli, remote string, options revokeOptions) error {
|
||||
ctx := context.Background()
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(cli), remote)
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(dockerCLI), remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -45,14 +45,14 @@ func revokeTrust(cli command.Cli, remote string, options revokeOptions) error {
|
|||
return fmt.Errorf("cannot use a digest reference for IMAGE:TAG")
|
||||
}
|
||||
if imgRefAndAuth.Tag() == "" && !options.forceYes {
|
||||
deleteRemote := command.PromptForConfirmation(os.Stdin, cli.Out(), fmt.Sprintf("Please confirm you would like to delete all signature data for %s?", remote))
|
||||
deleteRemote := command.PromptForConfirmation(os.Stdin, dockerCLI.Out(), fmt.Sprintf("Please confirm you would like to delete all signature data for %s?", remote))
|
||||
if !deleteRemote {
|
||||
fmt.Fprintf(cli.Out(), "\nAborting action.\n")
|
||||
fmt.Fprintf(dockerCLI.Out(), "\nAborting action.\n")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||
notaryRepo, err := dockerCLI.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ func revokeTrust(cli command.Cli, remote string, options revokeOptions) error {
|
|||
if err := revokeSignature(notaryRepo, tag); err != nil {
|
||||
return errors.Wrapf(err, "could not remove signature for %s", remote)
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "Successfully deleted signature for %s\n", remote)
|
||||
fmt.Fprintf(dockerCLI.Out(), "Successfully deleted signature for %s\n", remote)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/theupdateframework/notary/client"
|
||||
|
@ -25,7 +26,7 @@ type signOptions struct {
|
|||
imageName string
|
||||
}
|
||||
|
||||
func newSignCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func newSignCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
options := signOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "sign IMAGE:TAG",
|
||||
|
@ -33,7 +34,7 @@ func newSignCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.imageName = args[0]
|
||||
return runSignImage(dockerCli, options)
|
||||
return runSignImage(dockerCLI, options)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
|
@ -41,10 +42,10 @@ func newSignCommand(dockerCli command.Cli) *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func runSignImage(cli command.Cli, options signOptions) error {
|
||||
func runSignImage(dockerCLI command.Cli, options signOptions) error {
|
||||
imageName := options.imageName
|
||||
ctx := context.Background()
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(cli), imageName)
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(dockerCLI), imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -52,7 +53,7 @@ func runSignImage(cli command.Cli, options signOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||
notaryRepo, err := dockerCLI.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||
if err != nil {
|
||||
return trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
|
||||
}
|
||||
|
@ -66,7 +67,7 @@ func runSignImage(cli command.Cli, options signOptions) error {
|
|||
switch err.(type) {
|
||||
case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist:
|
||||
// before initializing a new repo, check that the image exists locally:
|
||||
if err := checkLocalImageExistence(ctx, cli, imageName); err != nil {
|
||||
if err := checkLocalImageExistence(ctx, dockerCLI.Client(), imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -75,25 +76,25 @@ func runSignImage(cli command.Cli, options signOptions) error {
|
|||
return trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.Out(), "Created signer: %s\n", imgRefAndAuth.AuthConfig().Username)
|
||||
fmt.Fprintf(cli.Out(), "Finished initializing signed repository for %s\n", imageName)
|
||||
fmt.Fprintf(dockerCLI.Out(), "Created signer: %s\n", imgRefAndAuth.AuthConfig().Username)
|
||||
fmt.Fprintf(dockerCLI.Out(), "Finished initializing signed repository for %s\n", imageName)
|
||||
default:
|
||||
return trust.NotaryError(imgRefAndAuth.RepoInfo().Name.Name(), err)
|
||||
}
|
||||
}
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "push")
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCLI, imgRefAndAuth.RepoInfo().Index, "push")
|
||||
target, err := createTarget(notaryRepo, imgRefAndAuth.Tag())
|
||||
if err != nil || options.local {
|
||||
switch err := err.(type) {
|
||||
// If the error is nil then the local flag is set
|
||||
case client.ErrNoSuchTarget, client.ErrRepositoryNotExist, nil:
|
||||
// Fail fast if the image doesn't exist locally
|
||||
if err := checkLocalImageExistence(ctx, cli, imageName); err != nil {
|
||||
if err := checkLocalImageExistence(ctx, dockerCLI.Client(), imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.Err(), "Signing and pushing trust data for local image %s, may overwrite remote trust data\n", imageName)
|
||||
fmt.Fprintf(dockerCLI.Err(), "Signing and pushing trust data for local image %s, may overwrite remote trust data\n", imageName)
|
||||
|
||||
authConfig := command.ResolveAuthConfig(cli.ConfigFile(), imgRefAndAuth.RepoInfo().Index)
|
||||
authConfig := command.ResolveAuthConfig(dockerCLI.ConfigFile(), imgRefAndAuth.RepoInfo().Index)
|
||||
encodedAuth, err := registrytypes.EncodeAuthConfig(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -102,12 +103,12 @@ func runSignImage(cli command.Cli, options signOptions) error {
|
|||
RegistryAuth: encodedAuth,
|
||||
PrivilegeFunc: requestPrivilege,
|
||||
}
|
||||
return image.TrustedPush(ctx, cli, imgRefAndAuth.RepoInfo(), imgRefAndAuth.Reference(), *imgRefAndAuth.AuthConfig(), options)
|
||||
return image.TrustedPush(ctx, dockerCLI, imgRefAndAuth.RepoInfo(), imgRefAndAuth.Reference(), *imgRefAndAuth.AuthConfig(), options)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return signAndPublishToTarget(cli.Out(), imgRefAndAuth, notaryRepo, target)
|
||||
return signAndPublishToTarget(dockerCLI.Out(), imgRefAndAuth, notaryRepo, target)
|
||||
}
|
||||
|
||||
func signAndPublishToTarget(out io.Writer, imgRefAndAuth trust.ImageRefAndAuth, notaryRepo client.Repository, target client.Target) error {
|
||||
|
@ -140,8 +141,8 @@ func validateTag(imgRefAndAuth trust.ImageRefAndAuth) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func checkLocalImageExistence(ctx context.Context, cli command.Cli, imageName string) error {
|
||||
_, _, err := cli.Client().ImageInspectWithRaw(ctx, imageName)
|
||||
func checkLocalImageExistence(ctx context.Context, apiClient apiclient.APIClient, imageName string) error {
|
||||
_, _, err := apiClient.ImageInspectWithRaw(ctx, imageName)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ type signerAddOptions struct {
|
|||
repos []string
|
||||
}
|
||||
|
||||
func newSignerAddCommand(dockerCli command.Cli) *cobra.Command {
|
||||
func newSignerAddCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
var options signerAddOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "add OPTIONS NAME REPOSITORY [REPOSITORY...] ",
|
||||
|
@ -36,7 +36,7 @@ func newSignerAddCommand(dockerCli command.Cli) *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.signer = args[0]
|
||||
options.repos = args[1:]
|
||||
return addSigner(dockerCli, options)
|
||||
return addSigner(dockerCLI, options)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
|
@ -47,7 +47,7 @@ func newSignerAddCommand(dockerCli command.Cli) *cobra.Command {
|
|||
|
||||
var validSignerName = regexp.MustCompile(`^[a-z0-9][a-z0-9\_\-]*$`).MatchString
|
||||
|
||||
func addSigner(cli command.Cli, options signerAddOptions) error {
|
||||
func addSigner(dockerCLI command.Cli, options signerAddOptions) error {
|
||||
signerName := options.signer
|
||||
if !validSignerName(signerName) {
|
||||
return fmt.Errorf("signer name \"%s\" must start with lowercase alphanumeric characters and can include \"-\" or \"_\" after the first character", signerName)
|
||||
|
@ -65,12 +65,12 @@ func addSigner(cli command.Cli, options signerAddOptions) error {
|
|||
}
|
||||
var errRepos []string
|
||||
for _, repoName := range options.repos {
|
||||
fmt.Fprintf(cli.Out(), "Adding signer \"%s\" to %s...\n", signerName, repoName)
|
||||
if err := addSignerToRepo(cli, signerName, repoName, signerPubKeys); err != nil {
|
||||
fmt.Fprintln(cli.Err(), err.Error()+"\n")
|
||||
fmt.Fprintf(dockerCLI.Out(), "Adding signer \"%s\" to %s...\n", signerName, repoName)
|
||||
if err := addSignerToRepo(dockerCLI, signerName, repoName, signerPubKeys); err != nil {
|
||||
fmt.Fprintln(dockerCLI.Err(), err.Error()+"\n")
|
||||
errRepos = append(errRepos, repoName)
|
||||
} else {
|
||||
fmt.Fprintf(cli.Out(), "Successfully added signer: %s to %s\n\n", signerName, repoName)
|
||||
fmt.Fprintf(dockerCLI.Out(), "Successfully added signer: %s to %s\n\n", signerName, repoName)
|
||||
}
|
||||
}
|
||||
if len(errRepos) > 0 {
|
||||
|
@ -79,14 +79,14 @@ func addSigner(cli command.Cli, options signerAddOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func addSignerToRepo(cli command.Cli, signerName string, repoName string, signerPubKeys []data.PublicKey) error {
|
||||
func addSignerToRepo(dockerCLI command.Cli, signerName string, repoName string, signerPubKeys []data.PublicKey) error {
|
||||
ctx := context.Background()
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(cli), repoName)
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(dockerCLI), repoName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||
notaryRepo, err := dockerCLI.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||
if err != nil {
|
||||
return trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
|
||||
}
|
||||
|
@ -94,11 +94,11 @@ func addSignerToRepo(cli command.Cli, signerName string, repoName string, signer
|
|||
if _, err = notaryRepo.ListTargets(); err != nil {
|
||||
switch err.(type) {
|
||||
case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist:
|
||||
fmt.Fprintf(cli.Out(), "Initializing signed repository for %s...\n", repoName)
|
||||
fmt.Fprintf(dockerCLI.Out(), "Initializing signed repository for %s...\n", repoName)
|
||||
if err := getOrGenerateRootKeyAndInitRepo(notaryRepo); err != nil {
|
||||
return trust.NotaryError(repoName, err)
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "Successfully initialized %q\n", repoName)
|
||||
fmt.Fprintf(dockerCLI.Out(), "Successfully initialized %q\n", repoName)
|
||||
default:
|
||||
return trust.NotaryError(repoName, err)
|
||||
}
|
||||
|
|
|
@ -39,12 +39,12 @@ func newSignerRemoveCommand(dockerCli command.Cli) *cobra.Command {
|
|||
return cmd
|
||||
}
|
||||
|
||||
func removeSigner(cli command.Cli, options signerRemoveOptions) error {
|
||||
func removeSigner(dockerCLI command.Cli, options signerRemoveOptions) error {
|
||||
var errRepos []string
|
||||
for _, repo := range options.repos {
|
||||
fmt.Fprintf(cli.Out(), "Removing signer \"%s\" from %s...\n", options.signer, repo)
|
||||
if _, err := removeSingleSigner(cli, repo, options.signer, options.forceYes); err != nil {
|
||||
fmt.Fprintln(cli.Err(), err.Error()+"\n")
|
||||
fmt.Fprintf(dockerCLI.Out(), "Removing signer \"%s\" from %s...\n", options.signer, repo)
|
||||
if _, err := removeSingleSigner(dockerCLI, repo, options.signer, options.forceYes); err != nil {
|
||||
fmt.Fprintln(dockerCLI.Err(), err.Error()+"\n")
|
||||
errRepos = append(errRepos, repo)
|
||||
}
|
||||
}
|
||||
|
@ -78,9 +78,9 @@ func isLastSignerForReleases(roleWithSig data.Role, allRoles []client.RoleWithSi
|
|||
|
||||
// removeSingleSigner attempts to remove a single signer and returns whether signer removal happened.
|
||||
// The signer not being removed doesn't necessarily raise an error e.g. user choosing "No" when prompted for confirmation.
|
||||
func removeSingleSigner(cli command.Cli, repoName, signerName string, forceYes bool) (bool, error) {
|
||||
func removeSingleSigner(dockerCLI command.Cli, repoName, signerName string, forceYes bool) (bool, error) {
|
||||
ctx := context.Background()
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(cli), repoName)
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(dockerCLI), repoName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func removeSingleSigner(cli command.Cli, repoName, signerName string, forceYes b
|
|||
if signerDelegation == releasesRoleTUFName {
|
||||
return false, errors.Errorf("releases is a reserved keyword and cannot be removed")
|
||||
}
|
||||
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||
notaryRepo, err := dockerCLI.NotaryClient(imgRefAndAuth, trust.ActionsPushAndPull)
|
||||
if err != nil {
|
||||
return false, trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
|
||||
}
|
||||
|
@ -112,14 +112,14 @@ func removeSingleSigner(cli command.Cli, repoName, signerName string, forceYes b
|
|||
return false, err
|
||||
}
|
||||
if ok, err := isLastSignerForReleases(role, allRoles); ok && !forceYes {
|
||||
removeSigner := command.PromptForConfirmation(os.Stdin, cli.Out(), fmt.Sprintf("The signer \"%s\" signed the last released version of %s. "+
|
||||
removeSigner := command.PromptForConfirmation(os.Stdin, dockerCLI.Out(), fmt.Sprintf("The signer \"%s\" signed the last released version of %s. "+
|
||||
"Removing this signer will make %s unpullable. "+
|
||||
"Are you sure you want to continue?",
|
||||
signerName, repoName, repoName,
|
||||
))
|
||||
|
||||
if !removeSigner {
|
||||
fmt.Fprintf(cli.Out(), "\nAborting action.\n")
|
||||
fmt.Fprintf(dockerCLI.Out(), "\nAborting action.\n")
|
||||
return false, nil
|
||||
}
|
||||
} else if err != nil {
|
||||
|
@ -136,7 +136,7 @@ func removeSingleSigner(cli command.Cli, repoName, signerName string, forceYes b
|
|||
return false, err
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.Out(), "Successfully removed %s from %s\n\n", signerName, repoName)
|
||||
fmt.Fprintf(dockerCLI.Out(), "Successfully removed %s from %s\n\n", signerName, repoName)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -27,25 +27,20 @@ const (
|
|||
func Services(
|
||||
namespace Namespace,
|
||||
config *composetypes.Config,
|
||||
client client.CommonAPIClient,
|
||||
apiClient client.CommonAPIClient,
|
||||
) (map[string]swarm.ServiceSpec, error) {
|
||||
result := make(map[string]swarm.ServiceSpec)
|
||||
|
||||
services := config.Services
|
||||
volumes := config.Volumes
|
||||
networks := config.Networks
|
||||
|
||||
for _, service := range services {
|
||||
secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets)
|
||||
for _, service := range config.Services {
|
||||
secrets, err := convertServiceSecrets(apiClient, namespace, service.Secrets, config.Secrets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "service %s", service.Name)
|
||||
}
|
||||
configs, err := convertServiceConfigObjs(client, namespace, service, config.Configs)
|
||||
configs, err := convertServiceConfigObjs(apiClient, namespace, service, config.Configs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "service %s", service.Name)
|
||||
}
|
||||
|
||||
serviceSpec, err := Service(client.ClientVersion(), namespace, service, networks, volumes, secrets, configs)
|
||||
serviceSpec, err := Service(apiClient.ClientVersion(), namespace, service, config.Networks, config.Volumes, secrets, configs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "service %s", service.Name)
|
||||
}
|
||||
|
@ -245,7 +240,7 @@ func convertServiceNetworks(
|
|||
|
||||
// TODO: fix secrets API so that SecretAPIClient is not required here
|
||||
func convertServiceSecrets(
|
||||
client client.SecretAPIClient,
|
||||
apiClient client.SecretAPIClient,
|
||||
namespace Namespace,
|
||||
secrets []composetypes.ServiceSecretConfig,
|
||||
secretSpecs map[string]composetypes.SecretConfig,
|
||||
|
@ -272,7 +267,7 @@ func convertServiceSecrets(
|
|||
})
|
||||
}
|
||||
|
||||
secrs, err := servicecli.ParseSecrets(client, refs)
|
||||
secrs, err := servicecli.ParseSecrets(apiClient, refs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -289,7 +284,7 @@ func convertServiceSecrets(
|
|||
//
|
||||
// TODO: fix configs API so that ConfigsAPIClient is not required here
|
||||
func convertServiceConfigObjs(
|
||||
client client.ConfigAPIClient,
|
||||
apiClient client.ConfigAPIClient,
|
||||
namespace Namespace,
|
||||
service composetypes.ServiceConfig,
|
||||
configSpecs map[string]composetypes.ConfigObjConfig,
|
||||
|
@ -348,7 +343,7 @@ func convertServiceConfigObjs(
|
|||
|
||||
}
|
||||
|
||||
confs, err := servicecli.ParseConfigs(client, refs)
|
||||
confs, err := servicecli.ParseConfigs(apiClient, refs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -497,7 +497,7 @@ func TestConvertServiceSecrets(t *testing.T) {
|
|||
Name: "bar_secret",
|
||||
},
|
||||
}
|
||||
client := &fakeClient{
|
||||
apiClient := &fakeClient{
|
||||
secretListFunc: func(opts types.SecretListOptions) ([]swarm.Secret, error) {
|
||||
assert.Check(t, is.Contains(opts.Filters.Get("name"), "foo_secret"))
|
||||
assert.Check(t, is.Contains(opts.Filters.Get("name"), "bar_secret"))
|
||||
|
@ -507,7 +507,7 @@ func TestConvertServiceSecrets(t *testing.T) {
|
|||
}, nil
|
||||
},
|
||||
}
|
||||
refs, err := convertServiceSecrets(client, namespace, secrets, secretSpecs)
|
||||
refs, err := convertServiceSecrets(apiClient, namespace, secrets, secretSpecs)
|
||||
assert.NilError(t, err)
|
||||
expected := []*swarm.SecretReference{
|
||||
{
|
||||
|
@ -554,7 +554,7 @@ func TestConvertServiceConfigs(t *testing.T) {
|
|||
Name: "baz_config",
|
||||
},
|
||||
}
|
||||
client := &fakeClient{
|
||||
apiClient := &fakeClient{
|
||||
configListFunc: func(opts types.ConfigListOptions) ([]swarm.Config, error) {
|
||||
assert.Check(t, is.Contains(opts.Filters.Get("name"), "foo_config"))
|
||||
assert.Check(t, is.Contains(opts.Filters.Get("name"), "bar_config"))
|
||||
|
@ -566,7 +566,7 @@ func TestConvertServiceConfigs(t *testing.T) {
|
|||
}, nil
|
||||
},
|
||||
}
|
||||
refs, err := convertServiceConfigObjs(client, namespace, service, configSpecs)
|
||||
refs, err := convertServiceConfigObjs(apiClient, namespace, service, configSpecs)
|
||||
assert.NilError(t, err)
|
||||
expected := []*swarm.ConfigReference{
|
||||
{
|
||||
|
|
|
@ -18,9 +18,9 @@ func TestConvertVolumeToMountAnonymousVolume(t *testing.T) {
|
|||
Type: mount.TypeVolume,
|
||||
Target: "/foo/bar",
|
||||
}
|
||||
mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
|
||||
mnt, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountAnonymousBind(t *testing.T) {
|
||||
|
@ -173,9 +173,9 @@ func TestConvertVolumeToMountNamedVolume(t *testing.T) {
|
|||
NoCopy: true,
|
||||
},
|
||||
}
|
||||
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
mnt, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountNamedVolumeWithNameCustomizd(t *testing.T) {
|
||||
|
@ -220,9 +220,9 @@ func TestConvertVolumeToMountNamedVolumeWithNameCustomizd(t *testing.T) {
|
|||
NoCopy: true,
|
||||
},
|
||||
}
|
||||
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
mnt, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) {
|
||||
|
@ -244,9 +244,9 @@ func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) {
|
|||
Source: "outside",
|
||||
Target: "/foo",
|
||||
}
|
||||
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
mnt, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountNamedVolumeExternalNoCopy(t *testing.T) {
|
||||
|
@ -273,9 +273,9 @@ func TestConvertVolumeToMountNamedVolumeExternalNoCopy(t *testing.T) {
|
|||
NoCopy: true,
|
||||
},
|
||||
}
|
||||
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
mnt, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountBind(t *testing.T) {
|
||||
|
@ -295,9 +295,9 @@ func TestConvertVolumeToMountBind(t *testing.T) {
|
|||
ReadOnly: true,
|
||||
Bind: &composetypes.ServiceVolumeBind{Propagation: "shared"},
|
||||
}
|
||||
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
mnt, err := convertVolumeToMount(config, stackVolumes, namespace)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) {
|
||||
|
@ -325,9 +325,9 @@ func TestConvertTmpfsToMountVolume(t *testing.T) {
|
|||
Target: "/foo/bar",
|
||||
TmpfsOptions: &mount.TmpfsOptions{SizeBytes: 1000},
|
||||
}
|
||||
mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
|
||||
mnt, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertTmpfsToMountVolumeWithSource(t *testing.T) {
|
||||
|
@ -355,9 +355,9 @@ func TestConvertVolumeToMountAnonymousNpipe(t *testing.T) {
|
|||
Source: `\\.\pipe\foo`,
|
||||
Target: `\\.\pipe\foo`,
|
||||
}
|
||||
mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
|
||||
mnt, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeMountClusterName(t *testing.T) {
|
||||
|
@ -389,9 +389,9 @@ func TestConvertVolumeMountClusterName(t *testing.T) {
|
|||
ClusterOptions: &mount.ClusterOptions{},
|
||||
}
|
||||
|
||||
mount, err := convertVolumeToMount(config, stackVolumes, NewNamespace("foo"))
|
||||
mnt, err := convertVolumeToMount(config, stackVolumes, NewNamespace("foo"))
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
||||
func TestConvertVolumeMountClusterGroup(t *testing.T) {
|
||||
|
@ -423,7 +423,7 @@ func TestConvertVolumeMountClusterGroup(t *testing.T) {
|
|||
ClusterOptions: &mount.ClusterOptions{},
|
||||
}
|
||||
|
||||
mount, err := convertVolumeToMount(config, stackVolumes, NewNamespace("foo"))
|
||||
mnt, err := convertVolumeToMount(config, stackVolumes, NewNamespace("foo"))
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(expected, mount))
|
||||
assert.Check(t, is.DeepEqual(expected, mnt))
|
||||
}
|
||||
|
|
|
@ -39,8 +39,8 @@ type Options struct {
|
|||
|
||||
// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to
|
||||
// the `environment` section
|
||||
func WithDiscardEnvFiles(opts *Options) {
|
||||
opts.discardEnvFiles = true
|
||||
func WithDiscardEnvFiles(options *Options) {
|
||||
options.discardEnvFiles = true
|
||||
}
|
||||
|
||||
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
|
||||
|
@ -62,12 +62,12 @@ func ParseYAML(source []byte) (map[string]interface{}, error) {
|
|||
}
|
||||
|
||||
// Load reads a ConfigDetails and returns a fully loaded configuration
|
||||
func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Config, error) {
|
||||
func Load(configDetails types.ConfigDetails, opt ...func(*Options)) (*types.Config, error) {
|
||||
if len(configDetails.ConfigFiles) < 1 {
|
||||
return nil, errors.Errorf("No files specified")
|
||||
}
|
||||
|
||||
opts := &Options{
|
||||
options := &Options{
|
||||
Interpolate: &interp.Options{
|
||||
Substitute: template.Substitute,
|
||||
LookupValue: configDetails.LookupEnv,
|
||||
|
@ -75,8 +75,8 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
|||
},
|
||||
}
|
||||
|
||||
for _, op := range options {
|
||||
op(opts)
|
||||
for _, op := range opt {
|
||||
op(options)
|
||||
}
|
||||
|
||||
configs := []*types.Config{}
|
||||
|
@ -96,14 +96,14 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if !opts.SkipInterpolation {
|
||||
configDict, err = interpolateConfig(configDict, *opts.Interpolate)
|
||||
if !options.SkipInterpolation {
|
||||
configDict, err = interpolateConfig(configDict, *options.Interpolate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.SkipValidation {
|
||||
if !options.SkipValidation {
|
||||
if err := schema.Validate(configDict, configDetails.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
|||
return nil, err
|
||||
}
|
||||
cfg.Filename = file.Filename
|
||||
if opts.discardEnvFiles {
|
||||
if options.discardEnvFiles {
|
||||
for i := range cfg.Services {
|
||||
cfg.Services[i].EnvFile = nil
|
||||
}
|
||||
|
@ -512,16 +512,16 @@ func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string,
|
|||
}
|
||||
|
||||
// TODO: make this more robust
|
||||
func expandUser(path string, lookupEnv template.Mapping) string {
|
||||
if strings.HasPrefix(path, "~") {
|
||||
func expandUser(srcPath string, lookupEnv template.Mapping) string {
|
||||
if strings.HasPrefix(srcPath, "~") {
|
||||
home, ok := lookupEnv("HOME")
|
||||
if !ok {
|
||||
logrus.Warn("cannot expand '~', because the environment lacks HOME")
|
||||
return path
|
||||
return srcPath
|
||||
}
|
||||
return strings.Replace(path, "~", home, 1)
|
||||
return strings.Replace(srcPath, "~", home, 1)
|
||||
}
|
||||
return path
|
||||
return srcPath
|
||||
}
|
||||
|
||||
func transformUlimits(data interface{}) (interface{}, error) {
|
||||
|
|
|
@ -141,34 +141,34 @@ func (err validationError) Error() string {
|
|||
return fmt.Sprintf("%s %s", err.parent.Field(), description)
|
||||
}
|
||||
|
||||
func getMostSpecificError(errors []gojsonschema.ResultError) validationError {
|
||||
func getMostSpecificError(errs []gojsonschema.ResultError) validationError {
|
||||
mostSpecificError := 0
|
||||
for i, err := range errors {
|
||||
if specificity(err) > specificity(errors[mostSpecificError]) {
|
||||
for i, err := range errs {
|
||||
if specificity(err) > specificity(errs[mostSpecificError]) {
|
||||
mostSpecificError = i
|
||||
continue
|
||||
}
|
||||
|
||||
if specificity(err) == specificity(errors[mostSpecificError]) {
|
||||
if specificity(err) == specificity(errs[mostSpecificError]) {
|
||||
// Invalid type errors win in a tie-breaker for most specific field name
|
||||
if err.Type() == "invalid_type" && errors[mostSpecificError].Type() != "invalid_type" {
|
||||
if err.Type() == "invalid_type" && errs[mostSpecificError].Type() != "invalid_type" {
|
||||
mostSpecificError = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mostSpecificError+1 == len(errors) {
|
||||
return validationError{parent: errors[mostSpecificError]}
|
||||
if mostSpecificError+1 == len(errs) {
|
||||
return validationError{parent: errs[mostSpecificError]}
|
||||
}
|
||||
|
||||
switch errors[mostSpecificError].Type() {
|
||||
switch errs[mostSpecificError].Type() {
|
||||
case "number_one_of", "number_any_of":
|
||||
return validationError{
|
||||
parent: errors[mostSpecificError],
|
||||
child: errors[mostSpecificError+1],
|
||||
parent: errs[mostSpecificError],
|
||||
child: errs[mostSpecificError+1],
|
||||
}
|
||||
default:
|
||||
return validationError{parent: errors[mostSpecificError]}
|
||||
return validationError{parent: errs[mostSpecificError]}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -475,8 +475,8 @@ func parseMetadata(data []byte, name string) (Metadata, error) {
|
|||
return meta, nil
|
||||
}
|
||||
|
||||
func importEndpointTLS(tlsData *ContextTLSData, path string, data []byte) error {
|
||||
parts := strings.SplitN(strings.TrimPrefix(path, "tls/"), "/", 2)
|
||||
func importEndpointTLS(tlsData *ContextTLSData, tlsPath string, data []byte) error {
|
||||
parts := strings.SplitN(strings.TrimPrefix(tlsPath, "tls/"), "/", 2)
|
||||
if len(parts) != 2 {
|
||||
// TLS endpoints require archived file directory with 2 layers
|
||||
// i.e. tls/{endpointName}/{fileName}
|
||||
|
|
|
@ -378,16 +378,16 @@ func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) error {
|
|||
}
|
||||
|
||||
// Checks if a command or one of its ancestors is in the list
|
||||
func findCommand(cmd *cobra.Command, commands []string) bool {
|
||||
func findCommand(cmd *cobra.Command, cmds []string) bool {
|
||||
if cmd == nil {
|
||||
return false
|
||||
}
|
||||
for _, c := range commands {
|
||||
for _, c := range cmds {
|
||||
if c == cmd.Name() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return findCommand(cmd.Parent(), commands)
|
||||
return findCommand(cmd.Parent(), cmds)
|
||||
}
|
||||
|
||||
func isSupported(cmd *cobra.Command, details versionDetails) error {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
// Any number of swarm function builder can be pass to augment it.
|
||||
func Swarm(swarmBuilders ...func(*swarm.Swarm)) *swarm.Swarm {
|
||||
t1 := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
swarm := &swarm.Swarm{
|
||||
s := &swarm.Swarm{
|
||||
ClusterInfo: swarm.ClusterInfo{
|
||||
ID: "swarm",
|
||||
Meta: swarm.Meta{
|
||||
|
@ -25,15 +25,15 @@ func Swarm(swarmBuilders ...func(*swarm.Swarm)) *swarm.Swarm {
|
|||
}
|
||||
|
||||
for _, builder := range swarmBuilders {
|
||||
builder(swarm)
|
||||
builder(s)
|
||||
}
|
||||
|
||||
return swarm
|
||||
return s
|
||||
}
|
||||
|
||||
// Autolock set the swarm into autolock mode
|
||||
func Autolock() func(*swarm.Swarm) {
|
||||
return func(swarm *swarm.Swarm) {
|
||||
swarm.Spec.EncryptionConfig.AutoLockManagers = true
|
||||
return func(s *swarm.Swarm) {
|
||||
s.Spec.EncryptionConfig.AutoLockManagers = true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import "github.com/docker/docker/api/types/volume"
|
|||
|
||||
// Volume creates a volume with default values.
|
||||
// Any number of volume function builder can be passed to augment it.
|
||||
func Volume(builders ...func(volume *volume.Volume)) *volume.Volume {
|
||||
func Volume(builders ...func(vol *volume.Volume)) *volume.Volume {
|
||||
vol := &volume.Volume{
|
||||
Name: "volume",
|
||||
Driver: "local",
|
||||
|
@ -20,22 +20,22 @@ func Volume(builders ...func(volume *volume.Volume)) *volume.Volume {
|
|||
}
|
||||
|
||||
// VolumeLabels sets the volume labels
|
||||
func VolumeLabels(labels map[string]string) func(volume *volume.Volume) {
|
||||
return func(volume *volume.Volume) {
|
||||
volume.Labels = labels
|
||||
func VolumeLabels(labels map[string]string) func(vol *volume.Volume) {
|
||||
return func(vol *volume.Volume) {
|
||||
vol.Labels = labels
|
||||
}
|
||||
}
|
||||
|
||||
// VolumeName sets the volume labels
|
||||
func VolumeName(name string) func(volume *volume.Volume) {
|
||||
return func(volume *volume.Volume) {
|
||||
volume.Name = name
|
||||
func VolumeName(name string) func(vol *volume.Volume) {
|
||||
return func(vol *volume.Volume) {
|
||||
vol.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
// VolumeDriver sets the volume driver
|
||||
func VolumeDriver(name string) func(volume *volume.Volume) {
|
||||
return func(volume *volume.Volume) {
|
||||
volume.Driver = name
|
||||
func VolumeDriver(name string) func(vol *volume.Volume) {
|
||||
return func(vol *volume.Volume) {
|
||||
vol.Driver = name
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,11 +41,11 @@ type FakeCli struct {
|
|||
}
|
||||
|
||||
// NewFakeCli returns a fake for the command.Cli interface
|
||||
func NewFakeCli(client client.APIClient, opts ...func(*FakeCli)) *FakeCli {
|
||||
func NewFakeCli(apiClient client.APIClient, opts ...func(*FakeCli)) *FakeCli {
|
||||
outBuffer := new(bytes.Buffer)
|
||||
errBuffer := new(bytes.Buffer)
|
||||
c := &FakeCli{
|
||||
client: client,
|
||||
client: apiClient,
|
||||
out: streams.NewOut(outBuffer),
|
||||
outBuffer: outBuffer,
|
||||
err: errBuffer,
|
||||
|
@ -77,13 +77,13 @@ func (c *FakeCli) SetOut(out *streams.Out) {
|
|||
}
|
||||
|
||||
// SetConfigFile sets the "fake" config file
|
||||
func (c *FakeCli) SetConfigFile(configfile *configfile.ConfigFile) {
|
||||
c.configfile = configfile
|
||||
func (c *FakeCli) SetConfigFile(configFile *configfile.ConfigFile) {
|
||||
c.configfile = configFile
|
||||
}
|
||||
|
||||
// SetContextStore sets the "fake" context store
|
||||
func (c *FakeCli) SetContextStore(store store.Store) {
|
||||
c.contextStore = store
|
||||
func (c *FakeCli) SetContextStore(contextStore store.Store) {
|
||||
c.contextStore = contextStore
|
||||
}
|
||||
|
||||
// SetCurrentContext sets the "fake" current context
|
||||
|
@ -186,13 +186,13 @@ func (c *FakeCli) RegistryClient(bool) registryclient.RegistryClient {
|
|||
}
|
||||
|
||||
// SetManifestStore on the fake cli
|
||||
func (c *FakeCli) SetManifestStore(store manifeststore.Store) {
|
||||
c.manifestStore = store
|
||||
func (c *FakeCli) SetManifestStore(manifestStore manifeststore.Store) {
|
||||
c.manifestStore = manifestStore
|
||||
}
|
||||
|
||||
// SetRegistryClient on the fake cli
|
||||
func (c *FakeCli) SetRegistryClient(client registryclient.RegistryClient) {
|
||||
c.registryClient = client
|
||||
func (c *FakeCli) SetRegistryClient(registryClient registryclient.RegistryClient) {
|
||||
c.registryClient = registryClient
|
||||
}
|
||||
|
||||
// ContentTrustEnabled on the fake cli
|
||||
|
|
Loading…
Reference in New Issue