mirror of https://github.com/docker/cli.git
Review comments
Address code review comemnts and purge additional dead code.
Signed-off-by: Daniel Hiltgen <daniel.hiltgen@docker.com>
(cherry picked from commit f250152bf4
)
Signed-off-by: Daniel Hiltgen <daniel.hiltgen@docker.com>
This commit is contained in:
parent
f07f51f4c8
commit
b7ec4a42d9
4
Makefile
4
Makefile
|
@ -12,14 +12,14 @@ clean: ## remove build artifacts
|
|||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## run unit test
|
||||
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
|
||||
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
|
||||
.PHONY: test
|
||||
test: test-unit ## run tests
|
||||
|
||||
.PHONY: test-coverage
|
||||
test-coverage: ## run test coverage
|
||||
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
|
||||
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## run all the lint tools
|
||||
|
|
|
@ -56,7 +56,7 @@ https://hub.docker.com/ then specify the file with the '--license' flag.
|
|||
|
||||
flags.StringVar(&options.licenseFile, "license", "", "License File")
|
||||
flags.StringVar(&options.version, "version", "", "Specify engine version (default is to use currently running version)")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the default location where engine images are pulled")
|
||||
flags.StringVar(&options.image, "engine-image", clitypes.EnterpriseEngineImage, "Specify engine image")
|
||||
flags.StringVar(&options.format, "format", "", "Pretty-print licenses using a Go template")
|
||||
flags.BoolVar(&options.displayOnly, "display-only", false, "only display the available licenses and exit")
|
||||
|
@ -68,7 +68,7 @@ https://hub.docker.com/ then specify the file with the '--license' flag.
|
|||
|
||||
func runActivate(cli command.Cli, options activateOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("must be privileged to activate engine")
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := cli.NewContainerizedEngineClient(options.sockPath)
|
||||
|
@ -107,16 +107,16 @@ func runActivate(cli command.Cli, options activateOptions) error {
|
|||
EngineVersion: options.version,
|
||||
}
|
||||
|
||||
err = client.ActivateEngine(ctx, opts, cli.Out(), authConfig,
|
||||
if err := client.ActivateEngine(ctx, opts, cli.Out(), authConfig,
|
||||
func(ctx context.Context) error {
|
||||
client := cli.Client()
|
||||
_, err := client.Ping(ctx)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(cli.Out(), "To complete the activation, please restart docker with 'systemctl restart docker'")
|
||||
fmt.Fprintln(cli.Out(), `Succesfully activated engine.
|
||||
Restart docker with 'systemctl restart docker' to complete the activation.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
|
@ -13,7 +14,7 @@ import (
|
|||
|
||||
func getRegistryAuth(cli command.Cli, registryPrefix string) (*types.AuthConfig, error) {
|
||||
if registryPrefix == "" {
|
||||
registryPrefix = "docker.io/docker"
|
||||
registryPrefix = clitypes.RegistryPrefix
|
||||
}
|
||||
distributionRef, err := reference.ParseNormalizedNamed(registryPrefix)
|
||||
if err != nil {
|
||||
|
|
|
@ -13,10 +13,6 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
releaseNotePrefix = "https://docs.docker.com/releasenotes"
|
||||
)
|
||||
|
||||
type checkOptions struct {
|
||||
registryPrefix string
|
||||
preReleases bool
|
||||
|
@ -39,7 +35,7 @@ func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
|
|||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", "docker.io/store/docker", "Override the existing location where engine images are pulled")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the existing location where engine images are pulled")
|
||||
flags.BoolVar(&options.downgrades, "downgrades", false, "Report downgrades (default omits older versions)")
|
||||
flags.BoolVar(&options.preReleases, "pre-releases", false, "Include pre-release versions")
|
||||
flags.BoolVar(&options.upgrades, "upgrades", true, "Report available upgrades")
|
||||
|
@ -52,7 +48,7 @@ func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
|
|||
|
||||
func runCheck(dockerCli command.Cli, options checkOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("must be privileged to activate engine")
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := dockerCli.Client()
|
||||
|
@ -119,7 +115,7 @@ func processVersions(currentVersion, verType string,
|
|||
availUpdates = append(availUpdates, clitypes.Update{
|
||||
Type: verType,
|
||||
Version: ver.Tag,
|
||||
Notes: fmt.Sprintf("%s/%s", releaseNotePrefix, ver.Tag),
|
||||
Notes: fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, ver.Tag),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -25,7 +26,7 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
|
||||
flags.StringVar(&options.EngineVersion, "version", "", "Specify engine version")
|
||||
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image")
|
||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "", "Override the current location where engine images are pulled")
|
||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the current location where engine images are pulled")
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
return cmd
|
||||
|
@ -33,7 +34,7 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
|
||||
func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("must be privileged to activate engine")
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
|
@ -41,11 +42,6 @@ func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
|||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
if options.EngineImage == "" || options.RegistryPrefix == "" {
|
||||
if options.RegistryPrefix == "" {
|
||||
options.RegistryPrefix = "docker.io/store/docker"
|
||||
}
|
||||
}
|
||||
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -59,6 +55,7 @@ func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "To complete the update, please restart docker with 'systemctl restart docker'")
|
||||
fmt.Fprintln(dockerCli.Out(), `Succesfully updated engine.
|
||||
Restart docker with 'systemctl restart docker' to complete the update.`)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func TestUpdateHappy(t *testing.T) {
|
|||
},
|
||||
)
|
||||
cmd := newUpdateCommand(testCli)
|
||||
cmd.Flags().Set("registry-prefix", "docker.io/docker")
|
||||
cmd.Flags().Set("registry-prefix", clitypes.RegistryPrefix)
|
||||
cmd.Flags().Set("version", "someversion")
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
|
|
|
@ -15,28 +15,6 @@ RUN apt-get update && apt-get install -y \
|
|||
iptables \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
|
||||
#ARG CONTAINERD_VERSION=v1.1.2
|
||||
#ARG CONTAINERD_VERSION=47a128d
|
||||
#ARG CONTAINERD_VERSION=6c3e782f
|
||||
ARG CONTAINERD_VERSION=65839a47a88b0a1c5dc34981f1741eccefc9f2b0
|
||||
RUN git clone https://github.com/containerd/containerd.git /go/src/github.com/containerd/containerd && \
|
||||
cd /go/src/github.com/containerd/containerd && \
|
||||
git checkout ${CONTAINERD_VERSION} && \
|
||||
make && \
|
||||
make install
|
||||
COPY e2eengine/config.toml /etc/containerd/config.toml
|
||||
COPY --from=containerd-shim-process /bin/containerd-shim-process-v1 /bin/
|
||||
|
||||
|
||||
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
|
||||
ARG RUNC_VERSION=v1.0.0-rc5
|
||||
RUN git clone https://github.com/opencontainers/runc.git /go/src/github.com/opencontainers/runc && \
|
||||
cd /go/src/github.com/opencontainers/runc && \
|
||||
git checkout ${RUNC_VERSION} && \
|
||||
make && \
|
||||
make install
|
||||
|
||||
ARG COMPOSE_VERSION=1.21.2
|
||||
RUN curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose \
|
||||
&& chmod +x /usr/local/bin/docker-compose
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
package check
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/e2eengine"
|
||||
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestDockerEngineOnContainerdAltRootConfig(t *testing.T) {
|
||||
defer func() {
|
||||
err := e2eengine.CleanupEngine(t)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to cleanup engine: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Use a fixed version to prevent failures when development of the next version starts, and no image is available yet.
|
||||
targetVersion := "18.09.0-dev"
|
||||
|
||||
t.Log("First engine init")
|
||||
// First init
|
||||
result := icmd.RunCmd(icmd.Command("docker", "engine", "init", "--config-file", "/tmp/etc/docker/daemon.json", "--version", targetVersion),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
// Make sure update doesn't blow up with alternate config path
|
||||
t.Log("perform update")
|
||||
// Now update and succeed
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
oom_score = 0
|
||||
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
|
||||
[debug]
|
||||
address = "/run/containerd/debug.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
level = "debug"
|
|
@ -1,86 +0,0 @@
|
|||
package multi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/e2eengine"
|
||||
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestDockerEngineOnContainerdMultiTest(t *testing.T) {
|
||||
defer func() {
|
||||
err := e2eengine.CleanupEngine(t)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to cleanup engine: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Use a fixed version to prevent failures when development of the next version starts, and no image is available yet.
|
||||
targetVersion := "18.09.0-dev"
|
||||
|
||||
t.Log("Attempt engine init without experimental")
|
||||
// First init
|
||||
result := icmd.RunCmd(icmd.Command("docker", "engine", "init", "--version", targetVersion),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=disabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "docker engine init is only supported",
|
||||
ExitCode: 1,
|
||||
})
|
||||
|
||||
t.Log("First engine init")
|
||||
// First init
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "init", "--version", targetVersion),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("checking for updates")
|
||||
// Check for updates
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "check", "--downgrades", "--pre-releases"))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "VERSION",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("attempt second init (should fail)")
|
||||
// Attempt to init a second time and fail
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "engine already present",
|
||||
ExitCode: 1,
|
||||
})
|
||||
|
||||
t.Log("perform update")
|
||||
// Now update and succeed
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("remove engine")
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "rm"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package e2eengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/types"
|
||||
)
|
||||
|
||||
type containerizedclient interface {
|
||||
types.ContainerizedClient
|
||||
GetEngine(context.Context) (containerd.Container, error)
|
||||
}
|
||||
|
||||
// CleanupEngine ensures the local engine has been removed between testcases
|
||||
func CleanupEngine(t *testing.T) error {
|
||||
t.Log("doing engine cleanup")
|
||||
ctx := context.Background()
|
||||
|
||||
client, err := containerizedengine.NewClient("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// See if the engine exists first
|
||||
_, err = client.(containerizedclient).GetEngine(ctx)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not present") {
|
||||
t.Log("engine was not detected, no cleanup to perform")
|
||||
// Nothing to do, it's not defined
|
||||
return nil
|
||||
}
|
||||
t.Logf("failed to lookup engine: %s", err)
|
||||
// Any other error is not good...
|
||||
return err
|
||||
}
|
||||
// TODO Consider nuking the docker dir too so there's no cached content between test cases
|
||||
|
||||
// TODO - this needs refactoring still to actually work properly
|
||||
/*
|
||||
err = client.RemoveEngine(ctx)
|
||||
if err != nil {
|
||||
t.Logf("Failed to remove engine: %s", err)
|
||||
}
|
||||
*/
|
||||
return err
|
||||
}
|
|
@ -1,242 +0,0 @@
|
|||
package containerizedengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/runtime/restart"
|
||||
"github.com/docker/cli/internal/pkg/containerized"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ clitypes.ContainerizedClient = &baseClient{}
|
||||
|
||||
// InitEngine is the main entrypoint for `docker engine init`
|
||||
func (c *baseClient) InitEngine(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
|
||||
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
// Verify engine isn't already running
|
||||
_, err := c.GetEngine(ctx)
|
||||
if err == nil {
|
||||
return ErrEngineAlreadyPresent
|
||||
} else if err != ErrEngineNotPresent {
|
||||
return err
|
||||
}
|
||||
|
||||
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
||||
// Look for desired image
|
||||
_, err = c.cclient.GetImage(ctx, imageName)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
_, err = c.pullWithAuth(ctx, imageName, out, authConfig)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to pull image %s", imageName)
|
||||
}
|
||||
} else {
|
||||
return errors.Wrapf(err, "unable to check for image %s", imageName)
|
||||
}
|
||||
}
|
||||
|
||||
// Spin up the engine
|
||||
err = c.startEngineOnContainerd(ctx, imageName, opts.ConfigFile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create docker daemon")
|
||||
}
|
||||
|
||||
// Wait for the daemon to start, verify it's responsive
|
||||
fmt.Fprintf(out, "Waiting for engine to start... ")
|
||||
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
|
||||
defer cancel()
|
||||
if err := c.waitForEngine(ctx, out, healthfn); err != nil {
|
||||
// TODO once we have the logging strategy sorted out
|
||||
// this should likely gather the last few lines of logs to report
|
||||
// why the daemon failed to initialize
|
||||
return errors.Wrap(err, "failed to start docker daemon")
|
||||
}
|
||||
fmt.Fprintf(out, "Success! The docker engine is now running.\n")
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// GetEngine will return the containerd container running the engine (or error)
|
||||
func (c *baseClient) GetEngine(ctx context.Context) (containerd.Container, error) {
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
containers, err := c.cclient.Containers(ctx, "id=="+engineContainerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(containers) == 0 {
|
||||
return nil, ErrEngineNotPresent
|
||||
}
|
||||
return containers[0], nil
|
||||
}
|
||||
|
||||
// getEngineImage will return the current image used by the engine
|
||||
func (c *baseClient) getEngineImage(engine containerd.Container) (string, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), engineNamespace)
|
||||
image, err := engine.Image(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return image.Name(), nil
|
||||
}
|
||||
|
||||
var (
|
||||
engineWaitInterval = 500 * time.Millisecond
|
||||
engineWaitTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
// waitForEngine will wait for the engine to start
|
||||
func (c *baseClient) waitForEngine(ctx context.Context, out io.Writer, healthfn func(context.Context) error) error {
|
||||
ticker := time.NewTicker(engineWaitInterval)
|
||||
defer ticker.Stop()
|
||||
defer func() {
|
||||
fmt.Fprintf(out, "\n")
|
||||
}()
|
||||
|
||||
err := c.waitForEngineContainer(ctx, ticker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(out, "waiting for engine to be responsive... ")
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err = healthfn(ctx)
|
||||
if err == nil {
|
||||
fmt.Fprintf(out, "engine is online.")
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(err, "timeout waiting for engine to be responsive")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *baseClient) waitForEngineContainer(ctx context.Context, ticker *time.Ticker) error {
|
||||
var ret error
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
engine, err := c.GetEngine(ctx)
|
||||
if engine != nil {
|
||||
return nil
|
||||
}
|
||||
ret = err
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ret, "timeout waiting for engine to be responsive")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveEngine gracefully unwinds the current engine
|
||||
func (c *baseClient) RemoveEngine(ctx context.Context) error {
|
||||
engine, err := c.GetEngine(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.removeEngine(ctx, engine)
|
||||
}
|
||||
|
||||
func (c *baseClient) removeEngine(ctx context.Context, engine containerd.Container) error {
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
|
||||
// Make sure the container isn't being restarted while we unwind it
|
||||
stopLabel := map[string]string{}
|
||||
stopLabel[restart.StatusLabel] = string(containerd.Stopped)
|
||||
engine.SetLabels(ctx, stopLabel)
|
||||
|
||||
// Wind down the existing engine
|
||||
task, err := engine.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
status, err := task.Status(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status.Status == containerd.Running {
|
||||
// It's running, so kill it
|
||||
err := task.Kill(ctx, syscall.SIGTERM, []containerd.KillOpts{}...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "task kill error")
|
||||
}
|
||||
|
||||
ch, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
timeout := time.NewTimer(engineWaitTimeout)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
// TODO - consider a force flag in the future to allow a more aggressive
|
||||
// kill of the engine via
|
||||
// task.Kill(ctx, syscall.SIGKILL, containerd.WithKillAll)
|
||||
return ErrEngineShutdownTimeout
|
||||
case <-ch:
|
||||
}
|
||||
}
|
||||
if _, err := task.Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
deleteOpts := []containerd.DeleteOpts{containerd.WithSnapshotCleanup}
|
||||
err = engine.Delete(ctx, deleteOpts...)
|
||||
if err != nil && errdefs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to remove existing engine container")
|
||||
}
|
||||
|
||||
// startEngineOnContainerd creates a new docker engine running on containerd
|
||||
func (c *baseClient) startEngineOnContainerd(ctx context.Context, imageName, configFile string) error {
|
||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||
image, err := c.cclient.GetImage(ctx, imageName)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return fmt.Errorf("engine image missing: %s", imageName)
|
||||
}
|
||||
return errors.Wrap(err, "failed to check for engine image")
|
||||
}
|
||||
|
||||
// Make sure we have a valid config file
|
||||
err = c.verifyDockerConfig(configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
engineSpec.Process.Args = append(engineSpec.Process.Args,
|
||||
"--config-file", configFile,
|
||||
)
|
||||
|
||||
cOpts := []containerd.NewContainerOpts{
|
||||
containerized.WithNewSnapshot(image),
|
||||
restart.WithStatus(containerd.Running),
|
||||
restart.WithLogPath("/var/log/engine.log"), // TODO - better!
|
||||
genSpec(),
|
||||
containerd.WithRuntime("io.containerd.runtime.process.v1", nil),
|
||||
}
|
||||
|
||||
_, err = c.cclient.NewContainer(
|
||||
ctx,
|
||||
engineContainerName,
|
||||
cOpts...,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create engine container")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,570 +0,0 @@
|
|||
package containerizedengine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/docker/cli/cli/command"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func healthfnHappy(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
func healthfnError(ctx context.Context) error {
|
||||
return fmt.Errorf("ping failure")
|
||||
}
|
||||
|
||||
func TestInitGetEngineFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: clitypes.CommunityEngineImage,
|
||||
}
|
||||
container := &fakeContainer{}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.InitEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{}, healthfnHappy)
|
||||
assert.Assert(t, err == ErrEngineAlreadyPresent)
|
||||
}
|
||||
|
||||
func TestInitCheckImageFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: clitypes.CommunityEngineImage,
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("something went wrong")
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.InitEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "unable to check for image")
|
||||
assert.ErrorContains(t, err, "something went wrong")
|
||||
}
|
||||
|
||||
func TestInitPullFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: clitypes.CommunityEngineImage,
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, errdefs.ErrNotFound
|
||||
|
||||
},
|
||||
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("pull failure")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.InitEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "unable to pull image")
|
||||
assert.ErrorContains(t, err, "pull failure")
|
||||
}
|
||||
|
||||
func TestInitStartFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := clitypes.EngineInitOptions{
|
||||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
EngineImage: clitypes.CommunityEngineImage,
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, errdefs.ErrNotFound
|
||||
|
||||
},
|
||||
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.InitEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "failed to create docker daemon")
|
||||
}
|
||||
|
||||
func TestGetEngineFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return nil, fmt.Errorf("container failure")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.GetEngine(ctx)
|
||||
assert.ErrorContains(t, err, "failure")
|
||||
}
|
||||
|
||||
func TestGetEngineNotPresent(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.GetEngine(ctx)
|
||||
assert.Assert(t, err == ErrEngineNotPresent)
|
||||
}
|
||||
|
||||
func TestGetEngineFound(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
container := &fakeContainer{}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
c, err := client.GetEngine(ctx)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, c, container)
|
||||
}
|
||||
|
||||
func TestGetEngineImageFail(t *testing.T) {
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("failure")
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.getEngineImage(container)
|
||||
assert.ErrorContains(t, err, "failure")
|
||||
}
|
||||
|
||||
func TestGetEngineImagePass(t *testing.T) {
|
||||
client := baseClient{}
|
||||
image := &fakeImage{
|
||||
nameFunc: func() string {
|
||||
return "imagenamehere"
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||
return image, nil
|
||||
},
|
||||
}
|
||||
|
||||
name, err := client.getEngineImage(container)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, name, "imagenamehere")
|
||||
}
|
||||
|
||||
func TestWaitForEngineNeverShowsUp(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
engineWaitInterval = 1 * time.Millisecond
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.waitForEngine(ctx, command.NewOutStream(&bytes.Buffer{}), healthfnError)
|
||||
assert.ErrorContains(t, err, "timeout waiting")
|
||||
}
|
||||
|
||||
func TestWaitForEnginePingFail(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
engineWaitInterval = 1 * time.Millisecond
|
||||
container := &fakeContainer{}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.waitForEngine(ctx, command.NewOutStream(&bytes.Buffer{}), healthfnError)
|
||||
assert.ErrorContains(t, err, "ping fail")
|
||||
}
|
||||
|
||||
func TestWaitForEngineHealthy(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
engineWaitInterval = 1 * time.Millisecond
|
||||
container := &fakeContainer{}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
return []containerd.Container{container}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := client.waitForEngine(ctx, command.NewOutStream(&bytes.Buffer{}), healthfnHappy)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveEngineBadTaskBadDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
deleteFunc: func(context.Context, ...containerd.DeleteOpts) error {
|
||||
return fmt.Errorf("delete failure")
|
||||
},
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return nil, errdefs.ErrNotFound
|
||||
},
|
||||
}
|
||||
|
||||
err := client.removeEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "failed to remove existing engine")
|
||||
assert.ErrorContains(t, err, "delete failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskNoStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{}, fmt.Errorf("task status failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.removeEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "task status failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskNotRunningDeleteFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Unknown}, nil
|
||||
},
|
||||
deleteFunc: func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
|
||||
return nil, fmt.Errorf("task delete failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.removeEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "task delete failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskRunningKillFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
killFunc: func(context.Context, syscall.Signal, ...containerd.KillOpts) error {
|
||||
return fmt.Errorf("task kill failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.removeEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "task kill failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskRunningWaitFail(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
return nil, fmt.Errorf("task wait failure")
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.removeEngine(ctx, container)
|
||||
assert.ErrorContains(t, err, "task wait failure")
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskRunningHappyPath(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
ch := make(chan containerd.ExitStatus, 1)
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
ch <- containerd.ExitStatus{}
|
||||
return ch, nil
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.removeEngine(ctx, container)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveEngineTaskKillTimeout(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ch := make(chan containerd.ExitStatus, 1)
|
||||
client := baseClient{}
|
||||
engineWaitTimeout = 10 * time.Millisecond
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
//ch <- containerd.ExitStatus{} // let it timeout
|
||||
return ch, nil
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
|
||||
err := client.removeEngine(ctx, container)
|
||||
assert.Assert(t, err == ErrEngineShutdownTimeout)
|
||||
}
|
||||
|
||||
func TestStartEngineOnContainerdImageErr(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
imageName := "testnamegoeshere"
|
||||
configFile := "/tmp/configfilegoeshere"
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, fmt.Errorf("some image lookup failure")
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||
assert.ErrorContains(t, err, "some image lookup failure")
|
||||
}
|
||||
|
||||
func TestStartEngineOnContainerdImageNotFound(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
imageName := "testnamegoeshere"
|
||||
configFile := "/tmp/configfilegoeshere"
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, errdefs.ErrNotFound
|
||||
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||
assert.ErrorContains(t, err, "engine image missing")
|
||||
}
|
||||
|
||||
func TestStartEngineOnContainerdHappy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
imageName := "testnamegoeshere"
|
||||
configFile := "/tmp/configfilegoeshere"
|
||||
ch := make(chan containerd.ExitStatus, 1)
|
||||
streams := cio.Streams{}
|
||||
task := &fakeTask{
|
||||
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||
return containerd.Status{Status: containerd.Running}, nil
|
||||
},
|
||||
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
ch <- containerd.ExitStatus{}
|
||||
return ch, nil
|
||||
},
|
||||
}
|
||||
container := &fakeContainer{
|
||||
newTaskFunc: func(ctx context.Context, creator cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
|
||||
if streams.Stdout != nil {
|
||||
streams.Stdout.Write([]byte("{}"))
|
||||
}
|
||||
return task, nil
|
||||
},
|
||||
}
|
||||
client := baseClient{
|
||||
cclient: &fakeContainerdClient{
|
||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||
return nil, nil
|
||||
|
||||
},
|
||||
newContainerFunc: func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
|
||||
return container, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestGetEngineConfigFilePathBadSpec(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||
return nil, fmt.Errorf("spec error")
|
||||
},
|
||||
}
|
||||
_, err := client.getEngineConfigFilePath(ctx, container)
|
||||
assert.ErrorContains(t, err, "spec error")
|
||||
}
|
||||
|
||||
func TestGetEngineConfigFilePathDistinct(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||
return &oci.Spec{
|
||||
Process: &specs.Process{
|
||||
Args: []string{
|
||||
"--another-flag",
|
||||
"foo",
|
||||
"--config-file",
|
||||
"configpath",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
configFile, err := client.getEngineConfigFilePath(ctx, container)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, err, configFile == "configpath")
|
||||
}
|
||||
|
||||
func TestGetEngineConfigFilePathEquals(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||
return &oci.Spec{
|
||||
Process: &specs.Process{
|
||||
Args: []string{
|
||||
"--another-flag=foo",
|
||||
"--config-file=configpath",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
configFile, err := client.getEngineConfigFilePath(ctx, container)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, err, configFile == "configpath")
|
||||
}
|
||||
|
||||
func TestGetEngineConfigFilePathMalformed1(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := baseClient{}
|
||||
container := &fakeContainer{
|
||||
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||
return &oci.Spec{
|
||||
Process: &specs.Process{
|
||||
Args: []string{
|
||||
"--another-flag",
|
||||
"--config-file",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
_, err := client.getEngineConfigFilePath(ctx, container)
|
||||
assert.Assert(t, err == ErrMalformedConfigFileParam)
|
||||
}
|
||||
|
||||
// getEngineConfigFilePath will extract the config file location from the engine flags
|
||||
func (c baseClient) getEngineConfigFilePath(ctx context.Context, engine containerd.Container) (string, error) {
|
||||
spec, err := engine.Spec(ctx)
|
||||
configFile := ""
|
||||
if err != nil {
|
||||
return configFile, err
|
||||
}
|
||||
for i := 0; i < len(spec.Process.Args); i++ {
|
||||
arg := spec.Process.Args[i]
|
||||
if strings.HasPrefix(arg, "--config-file") {
|
||||
if strings.Contains(arg, "=") {
|
||||
split := strings.SplitN(arg, "=", 2)
|
||||
configFile = split[1]
|
||||
} else {
|
||||
if i+1 >= len(spec.Process.Args) {
|
||||
return configFile, ErrMalformedConfigFileParam
|
||||
}
|
||||
configFile = spec.Process.Args[i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if configFile == "" {
|
||||
// TODO - any more diagnostics to offer?
|
||||
return configFile, ErrEngineConfigLookupFailure
|
||||
}
|
||||
return configFile, nil
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/docker/cli/internal/pkg/containerized"
|
||||
)
|
||||
|
||||
func genSpec() containerd.NewContainerOpts {
|
||||
return containerd.WithSpec(&engineSpec,
|
||||
containerized.WithAllCapabilities,
|
||||
oci.WithParentCgroupDevices,
|
||||
)
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/docker/cli/internal/pkg/containerized"
|
||||
)
|
||||
|
||||
func genSpec() containerd.NewContainerOpts {
|
||||
return containerd.WithSpec(&engineSpec,
|
||||
containerized.WithAllCapabilities,
|
||||
)
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package containerizedengine
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
func (c baseClient) verifyDockerConfig(configFile string) error {
|
||||
|
||||
// TODO - in the future consider leveraging containerd and a host runtime
|
||||
// to create the file. For now, just create it locally since we have to be
|
||||
// local to talk to containerd
|
||||
|
||||
configDir := path.Dir(configFile)
|
||||
err := os.MkdirAll(configDir, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(configFile, os.O_RDWR|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
info, err := fd.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Size() == 0 {
|
||||
_, err := fd.Write([]byte("{}"))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
// SIGKILL maps to unix.SIGKILL
|
||||
SIGKILL = unix.SIGKILL
|
||||
)
|
|
@ -1,12 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
// SIGKILL all signals are ignored by containerd kill windows
|
||||
SIGKILL = syscall.Signal(0)
|
||||
)
|
|
@ -14,6 +14,10 @@ const (
|
|||
containerdSockPath = "/run/containerd/containerd.sock"
|
||||
engineContainerName = "dockerd"
|
||||
engineNamespace = "com.docker"
|
||||
|
||||
// runtimeMetadataName is the name of the runtime metadata file
|
||||
// When stored as a label on the container it is prefixed by "com.docker."
|
||||
runtimeMetadataName = "distribution_based_engine"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -32,9 +36,6 @@ var (
|
|||
// ErrEngineShutdownTimeout returned if the engine failed to shutdown in time
|
||||
ErrEngineShutdownTimeout = errors.New("timeout waiting for engine to exit")
|
||||
|
||||
// ErrEngineImageMissingTag returned if the engine image is missing the version tag
|
||||
ErrEngineImageMissingTag = errors.New("malformed engine image missing tag")
|
||||
|
||||
engineSpec = specs.Spec{
|
||||
Root: &specs.Root{
|
||||
Path: "rootfs",
|
||||
|
@ -64,12 +65,6 @@ var (
|
|||
NoNewPrivileges: false,
|
||||
},
|
||||
}
|
||||
|
||||
// RuntimeMetadataName is the name of the runtime metadata file
|
||||
RuntimeMetadataName = "distribution_based_engine"
|
||||
|
||||
// ReleaseNotePrefix is where to point users to for release notes
|
||||
ReleaseNotePrefix = "https://docs.docker.com/releasenotes"
|
||||
)
|
||||
|
||||
type baseClient struct {
|
||||
|
|
|
@ -40,21 +40,21 @@ func (c *baseClient) DoUpdate(ctx context.Context, opts clitypes.EngineInitOptio
|
|||
// current engine version and automatically apply it so users
|
||||
// could stay in sync by simply having a scheduled
|
||||
// `docker engine update`
|
||||
return fmt.Errorf("please pick the version you want to update to")
|
||||
return fmt.Errorf("pick the version you want to update to with --version")
|
||||
}
|
||||
|
||||
localMetadata, err := c.GetCurrentRuntimeMetadata(ctx, "")
|
||||
if err == nil {
|
||||
if opts.EngineImage == "" {
|
||||
if strings.Contains(strings.ToLower(localMetadata.Platform), "enterprise") {
|
||||
opts.EngineImage = "engine-enterprise"
|
||||
if strings.Contains(strings.ToLower(localMetadata.Platform), "community") {
|
||||
opts.EngineImage = clitypes.CommunityEngineImage
|
||||
} else {
|
||||
opts.EngineImage = "engine-community"
|
||||
opts.EngineImage = clitypes.EnterpriseEngineImage
|
||||
}
|
||||
}
|
||||
}
|
||||
if opts.EngineImage == "" {
|
||||
return fmt.Errorf("please pick the engine image to update with (engine-community or engine-enterprise)")
|
||||
return fmt.Errorf("unable to determine the installed engine version. Specify which engine image to update with --engine-image set to 'engine-community' or 'engine-enterprise'")
|
||||
}
|
||||
|
||||
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
||||
|
@ -80,16 +80,15 @@ func (c *baseClient) DoUpdate(ctx context.Context, opts clitypes.EngineInitOptio
|
|||
// Grab current metadata for comparison purposes
|
||||
if localMetadata != nil {
|
||||
if localMetadata.Platform != newMetadata.Platform {
|
||||
fmt.Fprintf(out, "\nNotice: you have switched to \"%s\". Please refer to %s for update instructions.\n\n", newMetadata.Platform, c.GetReleaseNotesURL(imageName))
|
||||
fmt.Fprintf(out, "\nNotice: you have switched to \"%s\". Refer to %s for update instructions.\n\n", newMetadata.Platform, getReleaseNotesURL(imageName))
|
||||
}
|
||||
}
|
||||
|
||||
err = c.cclient.Install(ctx, image, containerd.WithInstallReplace, containerd.WithInstallPath("/usr"))
|
||||
if err != nil {
|
||||
if err := c.cclient.Install(ctx, image, containerd.WithInstallReplace, containerd.WithInstallPath("/usr")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.WriteRuntimeMetadata(ctx, "", newMetadata)
|
||||
return c.WriteRuntimeMetadata("", newMetadata)
|
||||
}
|
||||
|
||||
var defaultDockerRoot = "/var/lib/docker"
|
||||
|
@ -99,7 +98,7 @@ func (c *baseClient) GetCurrentRuntimeMetadata(_ context.Context, dockerRoot str
|
|||
if dockerRoot == "" {
|
||||
dockerRoot = defaultDockerRoot
|
||||
}
|
||||
filename := filepath.Join(dockerRoot, RuntimeMetadataName+".json")
|
||||
filename := filepath.Join(dockerRoot, runtimeMetadataName+".json")
|
||||
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
|
@ -113,11 +112,12 @@ func (c *baseClient) GetCurrentRuntimeMetadata(_ context.Context, dockerRoot str
|
|||
return &res, nil
|
||||
}
|
||||
|
||||
func (c *baseClient) WriteRuntimeMetadata(_ context.Context, dockerRoot string, metadata *RuntimeMetadata) error {
|
||||
// WriteRuntimeMetadata stores the metadata on the local system
|
||||
func (c *baseClient) WriteRuntimeMetadata(dockerRoot string, metadata *RuntimeMetadata) error {
|
||||
if dockerRoot == "" {
|
||||
dockerRoot = defaultDockerRoot
|
||||
}
|
||||
filename := filepath.Join(dockerRoot, RuntimeMetadataName+".json")
|
||||
filename := filepath.Join(dockerRoot, runtimeMetadataName+".json")
|
||||
|
||||
data, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
|
@ -154,9 +154,9 @@ func (c *baseClient) PreflightCheck(ctx context.Context, image containerd.Image)
|
|||
return nil, fmt.Errorf("unknown image %s config media type %s", image.Name(), ic.MediaType)
|
||||
}
|
||||
|
||||
metadataString, ok := config.Labels[RuntimeMetadataName]
|
||||
metadataString, ok := config.Labels["com.docker."+runtimeMetadataName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("image %s does not contain runtime metadata label %s", image.Name(), RuntimeMetadataName)
|
||||
return nil, fmt.Errorf("image %s does not contain runtime metadata label %s", image.Name(), runtimeMetadataName)
|
||||
}
|
||||
err = json.Unmarshal([]byte(metadataString), &metadata)
|
||||
if err != nil {
|
||||
|
@ -165,7 +165,7 @@ func (c *baseClient) PreflightCheck(ctx context.Context, image containerd.Image)
|
|||
|
||||
// Current CLI only supports host install runtime
|
||||
if metadata.Runtime != "host_install" {
|
||||
return nil, fmt.Errorf("unsupported runtime: %s\nPlease consult the release notes at %s for upgrade instructions", metadata.Runtime, c.GetReleaseNotesURL(image.Name()))
|
||||
return nil, fmt.Errorf("unsupported daemon image: %s\nConsult the release notes at %s for upgrade instructions", metadata.Runtime, getReleaseNotesURL(image.Name()))
|
||||
}
|
||||
|
||||
// Verify local containerd is new enough
|
||||
|
@ -183,8 +183,8 @@ func (c *baseClient) PreflightCheck(ctx context.Context, image containerd.Image)
|
|||
return nil, err
|
||||
}
|
||||
if lv.LessThan(mv) {
|
||||
return nil, fmt.Errorf("local containerd is too old: %s - this engine version requires %s or newer.\nPlease consult the release notes at %s for upgrade instructions",
|
||||
localVersion.Version, metadata.ContainerdMinVersion, c.GetReleaseNotesURL(image.Name()))
|
||||
return nil, fmt.Errorf("local containerd is too old: %s - this engine version requires %s or newer.\nConsult the release notes at %s for upgrade instructions",
|
||||
localVersion.Version, metadata.ContainerdMinVersion, getReleaseNotesURL(image.Name()))
|
||||
}
|
||||
} // If omitted on metadata, no hard dependency on containerd version beyond 18.09 baseline
|
||||
|
||||
|
@ -192,9 +192,9 @@ func (c *baseClient) PreflightCheck(ctx context.Context, image containerd.Image)
|
|||
return &metadata, nil
|
||||
}
|
||||
|
||||
// GetReleaseNotesURL returns a release notes url
|
||||
// getReleaseNotesURL returns a release notes url
|
||||
// If the image name does not contain a version tag, the base release notes URL is returned
|
||||
func (c *baseClient) GetReleaseNotesURL(imageName string) string {
|
||||
func getReleaseNotesURL(imageName string) string {
|
||||
versionTag := ""
|
||||
distributionRef, err := reference.ParseNormalizedNamed(imageName)
|
||||
if err == nil {
|
||||
|
@ -203,5 +203,5 @@ func (c *baseClient) GetReleaseNotesURL(imageName string) string {
|
|||
versionTag = taggedRef.Tag()
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", ReleaseNotePrefix, versionTag)
|
||||
return fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, versionTag)
|
||||
}
|
||||
|
|
|
@ -19,6 +19,10 @@ import (
|
|||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func healthfnHappy(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestActivateConfigFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
registryPrefix := "registryprefixgoeshere"
|
||||
|
@ -108,7 +112,7 @@ func TestDoUpdateNoVersion(t *testing.T) {
|
|||
}
|
||||
client := baseClient{}
|
||||
err := client.DoUpdate(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "please pick the version you")
|
||||
assert.ErrorContains(t, err, "pick the version you")
|
||||
}
|
||||
|
||||
func TestDoUpdateImageMiscError(t *testing.T) {
|
||||
|
@ -186,26 +190,29 @@ func TestActivateDoUpdateVerifyImageName(t *testing.T) {
|
|||
EngineVersion: "engineversiongoeshere",
|
||||
RegistryPrefix: "registryprefixgoeshere",
|
||||
ConfigFile: "/tmp/configfilegoeshere",
|
||||
//EngineImage: clitypes.EnterpriseEngineImage,
|
||||
}
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "docker-root")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
tmpDockerRoot := defaultDockerRoot
|
||||
defaultDockerRoot = tmpdir
|
||||
defer func() {
|
||||
defaultDockerRoot = tmpDockerRoot
|
||||
}()
|
||||
metadata := RuntimeMetadata{Platform: "platformgoeshere"}
|
||||
err = client.WriteRuntimeMetadata(ctx, tmpdir, &metadata)
|
||||
err = client.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = client.ActivateEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{}, healthfnHappy)
|
||||
assert.ErrorContains(t, err, "check for image")
|
||||
assert.ErrorContains(t, err, "something went wrong")
|
||||
expectedImage := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, "engine-community", opts.EngineVersion)
|
||||
expectedImage := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, "engine-enterprise", opts.EngineVersion)
|
||||
assert.Assert(t, requestedImage == expectedImage, "%s != %s", requestedImage, expectedImage)
|
||||
|
||||
// Redo with enterprise set
|
||||
metadata = RuntimeMetadata{Platform: "Docker Engine - Enterprise"}
|
||||
err = client.WriteRuntimeMetadata(ctx, tmpdir, &metadata)
|
||||
err = client.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = client.ActivateEngine(ctx, opts, command.NewOutStream(&bytes.Buffer{}), &types.AuthConfig{}, healthfnHappy)
|
||||
|
@ -230,7 +237,7 @@ func TestGetCurrentRuntimeMetadataBadJson(t *testing.T) {
|
|||
tmpdir, err := ioutil.TempDir("", "docker-root")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
filename := filepath.Join(tmpdir, RuntimeMetadataName+".json")
|
||||
filename := filepath.Join(tmpdir, runtimeMetadataName+".json")
|
||||
err = ioutil.WriteFile(filename, []byte("not json"), 0644)
|
||||
assert.NilError(t, err)
|
||||
client := baseClient{}
|
||||
|
@ -245,7 +252,7 @@ func TestGetCurrentRuntimeMetadataHappyPath(t *testing.T) {
|
|||
defer os.RemoveAll(tmpdir)
|
||||
client := baseClient{}
|
||||
metadata := RuntimeMetadata{Platform: "platformgoeshere"}
|
||||
err = client.WriteRuntimeMetadata(ctx, tmpdir, &metadata)
|
||||
err = client.WriteRuntimeMetadata(tmpdir, &metadata)
|
||||
assert.NilError(t, err)
|
||||
|
||||
res, err := client.GetCurrentRuntimeMetadata(ctx, tmpdir)
|
||||
|
@ -254,14 +261,13 @@ func TestGetCurrentRuntimeMetadataHappyPath(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetReleaseNotesURL(t *testing.T) {
|
||||
client := baseClient{}
|
||||
imageName := "bogus image name #$%&@!"
|
||||
url := client.GetReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, ReleaseNotePrefix+"/")
|
||||
url := getReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/")
|
||||
imageName = "foo.bar/valid/repowithouttag"
|
||||
url = client.GetReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, ReleaseNotePrefix+"/")
|
||||
url = getReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/")
|
||||
imageName = "foo.bar/valid/repowithouttag:tag123"
|
||||
url = client.GetReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, ReleaseNotePrefix+"/tag123")
|
||||
url = getReleaseNotesURL(imageName)
|
||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/tag123")
|
||||
}
|
||||
|
|
|
@ -32,19 +32,21 @@ func GetEngineVersions(ctx context.Context, registryClient registryclient.Regist
|
|||
}
|
||||
|
||||
func getEngineImage(registryPrefix string, serverVersion types.Version) string {
|
||||
communityImage := "engine-community"
|
||||
enterpriseImage := "engine-enterprise"
|
||||
platform := strings.ToLower(serverVersion.Platform.Name)
|
||||
if platform != "" {
|
||||
if strings.Contains(platform, "enterprise") {
|
||||
return path.Join(registryPrefix, enterpriseImage)
|
||||
return path.Join(registryPrefix, clitypes.EnterpriseEngineImage)
|
||||
}
|
||||
return path.Join(registryPrefix, communityImage)
|
||||
return path.Join(registryPrefix, clitypes.CommunityEngineImage)
|
||||
}
|
||||
|
||||
// TODO This check is only applicable for early 18.09 builds that had some packaging bugs
|
||||
// and can be removed once we're no longer testing with them
|
||||
if strings.Contains(serverVersion.Version, "ee") {
|
||||
return path.Join(registryPrefix, enterpriseImage)
|
||||
return path.Join(registryPrefix, clitypes.EnterpriseEngineImage)
|
||||
}
|
||||
return path.Join(registryPrefix, communityImage)
|
||||
|
||||
return path.Join(registryPrefix, clitypes.CommunityEngineImage)
|
||||
}
|
||||
|
||||
func parseTags(tags []string, currentVersion string) (clitypes.AvailableVersions, error) {
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# TODO fetch images?
|
||||
./scripts/test/engine/wrapper
|
|
@ -1,107 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# Run engine specific integration tests against the latest containerd-in-docker
|
||||
set -eu -o pipefail
|
||||
|
||||
function container_ip {
|
||||
local cid=$1
|
||||
local network=$2
|
||||
docker inspect \
|
||||
-f "{{.NetworkSettings.Networks.${network}.IPAddress}}" "$cid"
|
||||
}
|
||||
|
||||
function fetch_images {
|
||||
## TODO - not yet implemented
|
||||
./scripts/test/engine/load-image fetch-only
|
||||
}
|
||||
|
||||
function setup {
|
||||
### start containerd and log to a file
|
||||
echo "Starting containerd in the background"
|
||||
containerd 2&> /tmp/containerd.err &
|
||||
echo "Waiting for containerd to be responsive"
|
||||
# shellcheck disable=SC2034
|
||||
for i in $(seq 1 60); do
|
||||
if ctr namespace ls > /dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
ctr namespace ls > /dev/null
|
||||
echo "containerd is ready"
|
||||
|
||||
# TODO Once https://github.com/moby/moby/pull/33355 or equivalent
|
||||
# is merged, then this can be optimized to preload the image
|
||||
# saved during the build phase
|
||||
}
|
||||
|
||||
function cleanup {
|
||||
#### if testexit is non-zero dump the containerd logs with a banner
|
||||
if [ "${testexit}" -ne 0 ] ; then
|
||||
echo "FAIL: dumping containerd logs"
|
||||
echo ""
|
||||
cat /tmp/containerd.err
|
||||
if [ -f /var/log/engine.log ] ; then
|
||||
echo ""
|
||||
echo "FAIL: dumping engine log"
|
||||
echo ""
|
||||
else
|
||||
echo ""
|
||||
echo "FAIL: engine log missing"
|
||||
echo ""
|
||||
fi
|
||||
echo "FAIL: remaining namespaces"
|
||||
ctr namespace ls || /bin/tru
|
||||
echo "FAIL: remaining containers"
|
||||
ctr --namespace docker container ls || /bin/tru
|
||||
echo "FAIL: remaining tasks"
|
||||
ctr --namespace docker task ls || /bin/tru
|
||||
echo "FAIL: remaining snapshots"
|
||||
ctr --namespace docker snapshots ls || /bin/tru
|
||||
echo "FAIL: remaining images"
|
||||
ctr --namespace docker image ls || /bin/tru
|
||||
fi
|
||||
}
|
||||
|
||||
function runtests {
|
||||
# shellcheck disable=SC2086
|
||||
env -i \
|
||||
GOPATH="$GOPATH" \
|
||||
PATH="$PWD/build/:${PATH}" \
|
||||
VERSION=${VERSION} \
|
||||
"$(which go)" test -p 1 -parallel 1 -v ./e2eengine/... ${TESTFLAGS-}
|
||||
}
|
||||
|
||||
cmd=${1-}
|
||||
|
||||
case "$cmd" in
|
||||
setup)
|
||||
setup
|
||||
exit
|
||||
;;
|
||||
cleanup)
|
||||
cleanup
|
||||
exit
|
||||
;;
|
||||
fetch-images)
|
||||
fetch_images
|
||||
exit
|
||||
;;
|
||||
test)
|
||||
runtests
|
||||
;;
|
||||
run|"")
|
||||
testexit=0
|
||||
runtests || testexit=$?
|
||||
cleanup
|
||||
exit $testexit
|
||||
;;
|
||||
shell)
|
||||
$SHELL
|
||||
;;
|
||||
*)
|
||||
echo "Unknown command: $cmd"
|
||||
echo "Usage: "
|
||||
echo " $0 [setup | cleanup | test | run]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# Setup, run and teardown engine test suite in containers.
|
||||
set -eu -o pipefail
|
||||
|
||||
./scripts/test/engine/run setup
|
||||
|
||||
testexit=0
|
||||
|
||||
test_cmd="test"
|
||||
if [[ -n "${TEST_DEBUG-}" ]]; then
|
||||
test_cmd="shell"
|
||||
fi
|
||||
|
||||
./scripts/test/engine/run "$test_cmd" || testexit="$?"
|
||||
|
||||
export testexit
|
||||
./scripts/test/engine/run cleanup
|
||||
exit "$testexit"
|
|
@ -14,6 +14,12 @@ const (
|
|||
|
||||
// EnterpriseEngineImage is the repo name for the enterprise engine
|
||||
EnterpriseEngineImage = "engine-enterprise"
|
||||
|
||||
// RegistryPrefix is the default prefix used to pull engine images
|
||||
RegistryPrefix = "docker.io/store/docker"
|
||||
|
||||
// ReleaseNotePrefix is where to point users to for release notes
|
||||
ReleaseNotePrefix = "https://docs.docker.com/releasenotes"
|
||||
)
|
||||
|
||||
// ContainerizedClient can be used to manage the lifecycle of
|
||||
|
@ -25,11 +31,6 @@ type ContainerizedClient interface {
|
|||
out OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
InitEngine(ctx context.Context,
|
||||
opts EngineInitOptions,
|
||||
out OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
DoUpdate(ctx context.Context,
|
||||
opts EngineInitOptions,
|
||||
out OutStream,
|
||||
|
|
Loading…
Reference in New Issue