mirror of https://github.com/docker/cli.git
Add engine commands built on containerd
This new collection of commands supports initializing a local engine using containerd, updating that engine, and activating the EE product Signed-off-by: Daniel Hiltgen <daniel.hiltgen@docker.com>
This commit is contained in:
parent
11a312118f
commit
fd2f1b3b66
4
Makefile
4
Makefile
|
@ -12,14 +12,14 @@ clean: ## remove build artifacts
|
||||||
|
|
||||||
.PHONY: test-unit
|
.PHONY: test-unit
|
||||||
test-unit: ## run unit test
|
test-unit: ## run unit test
|
||||||
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: test-unit ## run tests
|
test: test-unit ## run tests
|
||||||
|
|
||||||
.PHONY: test-coverage
|
.PHONY: test-coverage
|
||||||
test-coverage: ## run test coverage
|
test-coverage: ## run test coverage
|
||||||
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint: ## run all the lint tools
|
lint: ## run all the lint tools
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
manifeststore "github.com/docker/cli/cli/manifest/store"
|
manifeststore "github.com/docker/cli/cli/manifest/store"
|
||||||
registryclient "github.com/docker/cli/cli/registry/client"
|
registryclient "github.com/docker/cli/cli/registry/client"
|
||||||
"github.com/docker/cli/cli/trust"
|
"github.com/docker/cli/cli/trust"
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
dopts "github.com/docker/cli/opts"
|
dopts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/docker/api"
|
"github.com/docker/docker/api"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
@ -54,6 +55,7 @@ type Cli interface {
|
||||||
ManifestStore() manifeststore.Store
|
ManifestStore() manifeststore.Store
|
||||||
RegistryClient(bool) registryclient.RegistryClient
|
RegistryClient(bool) registryclient.RegistryClient
|
||||||
ContentTrustEnabled() bool
|
ContentTrustEnabled() bool
|
||||||
|
NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DockerCli is an instance the docker command line client.
|
// DockerCli is an instance the docker command line client.
|
||||||
|
@ -229,6 +231,11 @@ func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions
|
||||||
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewContainerizedEngineClient returns a containerized engine client
|
||||||
|
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error) {
|
||||||
|
return containerizedengine.NewClient(sockPath)
|
||||||
|
}
|
||||||
|
|
||||||
// ServerInfo stores details about the supported features and platform of the
|
// ServerInfo stores details about the supported features and platform of the
|
||||||
// server
|
// server
|
||||||
type ServerInfo struct {
|
type ServerInfo struct {
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/docker/cli/cli/command/checkpoint"
|
"github.com/docker/cli/cli/command/checkpoint"
|
||||||
"github.com/docker/cli/cli/command/config"
|
"github.com/docker/cli/cli/command/config"
|
||||||
"github.com/docker/cli/cli/command/container"
|
"github.com/docker/cli/cli/command/container"
|
||||||
|
"github.com/docker/cli/cli/command/engine"
|
||||||
"github.com/docker/cli/cli/command/image"
|
"github.com/docker/cli/cli/command/image"
|
||||||
"github.com/docker/cli/cli/command/manifest"
|
"github.com/docker/cli/cli/command/manifest"
|
||||||
"github.com/docker/cli/cli/command/network"
|
"github.com/docker/cli/cli/command/network"
|
||||||
|
@ -84,6 +85,9 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||||
// volume
|
// volume
|
||||||
volume.NewVolumeCommand(dockerCli),
|
volume.NewVolumeCommand(dockerCli),
|
||||||
|
|
||||||
|
// engine
|
||||||
|
engine.NewEngineCommand(dockerCli),
|
||||||
|
|
||||||
// legacy commands may be hidden
|
// legacy commands may be hidden
|
||||||
hide(system.NewEventsCommand(dockerCli)),
|
hide(system.NewEventsCommand(dockerCli)),
|
||||||
hide(system.NewInfoCommand(dockerCli)),
|
hide(system.NewInfoCommand(dockerCli)),
|
||||||
|
|
|
@ -0,0 +1,181 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"github.com/docker/cli/internal/licenseutils"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/licensing/model"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type activateOptions struct {
|
||||||
|
licenseFile string
|
||||||
|
version string
|
||||||
|
registryPrefix string
|
||||||
|
format string
|
||||||
|
image string
|
||||||
|
quiet bool
|
||||||
|
displayOnly bool
|
||||||
|
sockPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// newActivateCommand creates a new `docker engine activate` command
|
||||||
|
func newActivateCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
var options activateOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "activate [OPTIONS]",
|
||||||
|
Short: "Activate Enterprise Edition",
|
||||||
|
Long: `Activate Enterprise Edition.
|
||||||
|
|
||||||
|
With this command you may apply an existing Docker enterprise license, or
|
||||||
|
interactively download one from Docker. In the interactive exchange, you can
|
||||||
|
sign up for a new trial, or download an existing license. If you are
|
||||||
|
currently running a Community Edition engine, the daemon will be updated to
|
||||||
|
the Enterprise Edition Docker engine with additional capabilities and long
|
||||||
|
term support.
|
||||||
|
|
||||||
|
For more information about different Docker Enterprise license types visit
|
||||||
|
https://www.docker.com/licenses
|
||||||
|
|
||||||
|
For non-interactive scriptable deployments, download your license from
|
||||||
|
https://hub.docker.com/ then specify the file with the '--license' flag.
|
||||||
|
`,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runActivate(dockerCli, options)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
|
||||||
|
flags.StringVar(&options.licenseFile, "license", "", "License File")
|
||||||
|
flags.StringVar(&options.version, "version", "", "Specify engine version (default is to use currently running version)")
|
||||||
|
flags.StringVar(&options.registryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
|
||||||
|
flags.StringVar(&options.image, "engine-image", containerizedengine.EnterpriseEngineImage, "Specify engine image")
|
||||||
|
flags.StringVar(&options.format, "format", "", "Pretty-print licenses using a Go template")
|
||||||
|
flags.BoolVar(&options.displayOnly, "display-only", false, "only display the available licenses and exit")
|
||||||
|
flags.BoolVar(&options.quiet, "quiet", false, "Only display available licenses by ID")
|
||||||
|
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func runActivate(cli command.Cli, options activateOptions) error {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := cli.NewContainerizedEngineClient(options.sockPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
authConfig, err := getRegistryAuth(cli, options.registryPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var license *model.IssuedLicense
|
||||||
|
|
||||||
|
// Lookup on hub if no license provided via params
|
||||||
|
if options.licenseFile == "" {
|
||||||
|
if license, err = getLicenses(ctx, authConfig, cli, options); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if options.displayOnly {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if license, err = licenseutils.LoadLocalIssuedLicense(ctx, options.licenseFile); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = licenseutils.ApplyLicense(ctx, cli.Client(), license); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := containerizedengine.EngineInitOptions{
|
||||||
|
RegistryPrefix: options.registryPrefix,
|
||||||
|
EngineImage: options.image,
|
||||||
|
EngineVersion: options.version,
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.ActivateEngine(ctx, opts, cli.Out(), authConfig,
|
||||||
|
func(ctx context.Context) error {
|
||||||
|
client := cli.Client()
|
||||||
|
_, err := client.Ping(ctx)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLicenses(ctx context.Context, authConfig *types.AuthConfig, cli command.Cli, options activateOptions) (*model.IssuedLicense, error) {
|
||||||
|
user, err := licenseutils.Login(ctx, authConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fmt.Fprintf(cli.Out(), "Looking for existing licenses for %s...\n", user.User.Username)
|
||||||
|
subs, err := user.GetAvailableLicenses(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(subs) == 0 {
|
||||||
|
return doTrialFlow(ctx, cli, user)
|
||||||
|
}
|
||||||
|
|
||||||
|
format := options.format
|
||||||
|
if len(format) == 0 {
|
||||||
|
format = formatter.TableFormatKey
|
||||||
|
}
|
||||||
|
|
||||||
|
updatesCtx := formatter.Context{
|
||||||
|
Output: cli.Out(),
|
||||||
|
Format: formatter.NewSubscriptionsFormat(format, options.quiet),
|
||||||
|
Trunc: false,
|
||||||
|
}
|
||||||
|
if err := formatter.SubscriptionsWrite(updatesCtx, subs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if options.displayOnly {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
fmt.Fprintf(cli.Out(), "Please pick a license by number: ")
|
||||||
|
var num int
|
||||||
|
if _, err := fmt.Fscan(cli.In(), &num); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to read user input")
|
||||||
|
}
|
||||||
|
if num < 0 || num >= len(subs) {
|
||||||
|
return nil, fmt.Errorf("invalid choice")
|
||||||
|
}
|
||||||
|
return user.GetIssuedLicense(ctx, subs[num].ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doTrialFlow(ctx context.Context, cli command.Cli, user licenseutils.HubUser) (*model.IssuedLicense, error) {
|
||||||
|
if !command.PromptForConfirmation(cli.In(), cli.Out(),
|
||||||
|
"No existing licenses found, would you like to set up a new Enterprise Basic Trial license?") {
|
||||||
|
return nil, fmt.Errorf("you must have an existing enterprise license or generate a new trial to use the Enterprise Docker Engine")
|
||||||
|
}
|
||||||
|
targetID := user.User.ID
|
||||||
|
// If the user is a member of any organizations, allow trials generated against them
|
||||||
|
if len(user.Orgs) > 0 {
|
||||||
|
fmt.Fprintf(cli.Out(), "%d\t%s\n", 0, user.User.Username)
|
||||||
|
for i, org := range user.Orgs {
|
||||||
|
fmt.Fprintf(cli.Out(), "%d\t%s\n", i+1, org.Orgname)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(cli.Out(), "Please choose an account to generate the trial in:")
|
||||||
|
var num int
|
||||||
|
if _, err := fmt.Fscan(cli.In(), &num); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to read user input")
|
||||||
|
}
|
||||||
|
if num < 0 || num > len(user.Orgs) {
|
||||||
|
return nil, fmt.Errorf("invalid choice")
|
||||||
|
}
|
||||||
|
if num > 0 {
|
||||||
|
targetID = user.Orgs[num-1].ID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return user.GenerateTrialLicense(ctx, targetID)
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestActivateNoContainerd(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return nil, fmt.Errorf("some error")
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newActivateCommand(testCli)
|
||||||
|
cmd.Flags().Set("license", "invalidpath")
|
||||||
|
cmd.SilenceUsage = true
|
||||||
|
cmd.SilenceErrors = true
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateBadLicense(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return &fakeContainerizedEngineClient{}, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newActivateCommand(testCli)
|
||||||
|
cmd.SilenceUsage = true
|
||||||
|
cmd.SilenceErrors = true
|
||||||
|
cmd.Flags().Set("license", "invalidpath")
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.Error(t, err, "open invalidpath: no such file or directory")
|
||||||
|
}
|
|
@ -0,0 +1,33 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/trust"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
registrytypes "github.com/docker/docker/api/types/registry"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getRegistryAuth(cli command.Cli, registryPrefix string) (*types.AuthConfig, error) {
|
||||||
|
if registryPrefix == "" {
|
||||||
|
registryPrefix = "docker.io/docker"
|
||||||
|
}
|
||||||
|
distributionRef, err := reference.ParseNormalizedNamed(registryPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to parse image name: %s", registryPrefix)
|
||||||
|
}
|
||||||
|
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(context.Background(), nil, authResolver(cli), distributionRef.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to get imgRefAndAuth")
|
||||||
|
}
|
||||||
|
return imgRefAndAuth.AuthConfig(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func authResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||||
|
return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||||
|
return command.ResolveAuthConfig(ctx, cli, index)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,133 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/formatter"
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
releaseNotePrefix = "https://docs.docker.com/releasenotes"
|
||||||
|
)
|
||||||
|
|
||||||
|
type checkOptions struct {
|
||||||
|
registryPrefix string
|
||||||
|
preReleases bool
|
||||||
|
downgrades bool
|
||||||
|
upgrades bool
|
||||||
|
format string
|
||||||
|
quiet bool
|
||||||
|
sockPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
var options checkOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "check [OPTIONS]",
|
||||||
|
Short: "Check for available engine updates",
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runCheck(dockerCli, options)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.registryPrefix, "registry-prefix", "", "Override the existing location where engine images are pulled")
|
||||||
|
flags.BoolVar(&options.downgrades, "downgrades", false, "Report downgrades (default omits older versions)")
|
||||||
|
flags.BoolVar(&options.preReleases, "pre-releases", false, "Include pre-release versions")
|
||||||
|
flags.BoolVar(&options.upgrades, "upgrades", true, "Report available upgrades")
|
||||||
|
flags.StringVar(&options.format, "format", "", "Pretty-print updates using a Go template")
|
||||||
|
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display available versions")
|
||||||
|
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCheck(dockerCli command.Cli, options checkOptions) error {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
currentOpts, err := client.GetCurrentEngineVersion(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// override with user provided prefix if specified
|
||||||
|
if options.registryPrefix != "" {
|
||||||
|
currentOpts.RegistryPrefix = options.registryPrefix
|
||||||
|
}
|
||||||
|
imageName := currentOpts.RegistryPrefix + "/" + currentOpts.EngineImage
|
||||||
|
currentVersion := currentOpts.EngineVersion
|
||||||
|
versions, err := client.GetEngineVersions(ctx, dockerCli.RegistryClient(false), currentVersion, imageName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
availUpdates := []containerizedengine.Update{
|
||||||
|
{Type: "current", Version: currentVersion},
|
||||||
|
}
|
||||||
|
if len(versions.Patches) > 0 {
|
||||||
|
availUpdates = append(availUpdates,
|
||||||
|
processVersions(
|
||||||
|
currentVersion,
|
||||||
|
"patch",
|
||||||
|
options.preReleases,
|
||||||
|
versions.Patches)...)
|
||||||
|
}
|
||||||
|
if options.upgrades {
|
||||||
|
availUpdates = append(availUpdates,
|
||||||
|
processVersions(
|
||||||
|
currentVersion,
|
||||||
|
"upgrade",
|
||||||
|
options.preReleases,
|
||||||
|
versions.Upgrades)...)
|
||||||
|
}
|
||||||
|
if options.downgrades {
|
||||||
|
availUpdates = append(availUpdates,
|
||||||
|
processVersions(
|
||||||
|
currentVersion,
|
||||||
|
"downgrade",
|
||||||
|
options.preReleases,
|
||||||
|
versions.Downgrades)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
format := options.format
|
||||||
|
if len(format) == 0 {
|
||||||
|
format = formatter.TableFormatKey
|
||||||
|
}
|
||||||
|
|
||||||
|
updatesCtx := formatter.Context{
|
||||||
|
Output: dockerCli.Out(),
|
||||||
|
Format: formatter.NewUpdatesFormat(format, options.quiet),
|
||||||
|
Trunc: false,
|
||||||
|
}
|
||||||
|
return formatter.UpdatesWrite(updatesCtx, availUpdates)
|
||||||
|
}
|
||||||
|
|
||||||
|
func processVersions(currentVersion, verType string,
|
||||||
|
includePrerelease bool,
|
||||||
|
versions []containerizedengine.DockerVersion) []containerizedengine.Update {
|
||||||
|
availUpdates := []containerizedengine.Update{}
|
||||||
|
for _, ver := range versions {
|
||||||
|
if !includePrerelease && ver.Prerelease() != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ver.Tag != currentVersion {
|
||||||
|
availUpdates = append(availUpdates, containerizedengine.Update{
|
||||||
|
Type: verType,
|
||||||
|
Version: ver.Tag,
|
||||||
|
Notes: fmt.Sprintf("%s/%s", releaseNotePrefix, ver.Tag),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return availUpdates
|
||||||
|
}
|
|
@ -0,0 +1,143 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
registryclient "github.com/docker/cli/cli/registry/client"
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"github.com/docker/cli/internal/test"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
ver "github.com/hashicorp/go-version"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
"gotest.tools/golden"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
testCli = test.NewFakeCli(&client.Client{})
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCheckForUpdatesNoContainerd(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return nil, fmt.Errorf("some error")
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newCheckForUpdatesCommand(testCli)
|
||||||
|
cmd.SilenceUsage = true
|
||||||
|
cmd.SilenceErrors = true
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckForUpdatesNoCurrentVersion(t *testing.T) {
|
||||||
|
retErr := fmt.Errorf("some failure")
|
||||||
|
getCurrentEngineVersionFunc := func(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||||
|
return containerizedengine.EngineInitOptions{}, retErr
|
||||||
|
}
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return &fakeContainerizedEngineClient{
|
||||||
|
getCurrentEngineVersionFunc: getCurrentEngineVersionFunc,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newCheckForUpdatesCommand(testCli)
|
||||||
|
cmd.SilenceUsage = true
|
||||||
|
cmd.SilenceErrors = true
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.Assert(t, err == retErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckForUpdatesGetEngineVersionsFail(t *testing.T) {
|
||||||
|
retErr := fmt.Errorf("some failure")
|
||||||
|
getEngineVersionsFunc := func(ctx context.Context,
|
||||||
|
registryClient registryclient.RegistryClient,
|
||||||
|
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||||
|
return containerizedengine.AvailableVersions{}, retErr
|
||||||
|
}
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return &fakeContainerizedEngineClient{
|
||||||
|
getEngineVersionsFunc: getEngineVersionsFunc,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newCheckForUpdatesCommand(testCli)
|
||||||
|
cmd.SilenceUsage = true
|
||||||
|
cmd.SilenceErrors = true
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.Assert(t, err == retErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckForUpdatesGetEngineVersionsHappy(t *testing.T) {
|
||||||
|
getCurrentEngineVersionFunc := func(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||||
|
return containerizedengine.EngineInitOptions{
|
||||||
|
EngineImage: "current engine",
|
||||||
|
EngineVersion: "1.1.0",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
getEngineVersionsFunc := func(ctx context.Context,
|
||||||
|
registryClient registryclient.RegistryClient,
|
||||||
|
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||||
|
return containerizedengine.AvailableVersions{
|
||||||
|
Downgrades: parseVersions(t, "1.0.1", "1.0.2", "1.0.3-beta1"),
|
||||||
|
Patches: parseVersions(t, "1.1.1", "1.1.2", "1.1.3-beta1"),
|
||||||
|
Upgrades: parseVersions(t, "1.2.0", "2.0.0", "2.1.0-beta1"),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return &fakeContainerizedEngineClient{
|
||||||
|
getEngineVersionsFunc: getEngineVersionsFunc,
|
||||||
|
getCurrentEngineVersionFunc: getCurrentEngineVersionFunc,
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newCheckForUpdatesCommand(testCli)
|
||||||
|
cmd.Flags().Set("pre-releases", "true")
|
||||||
|
cmd.Flags().Set("downgrades", "true")
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
golden.Assert(t, testCli.OutBuffer().String(), "check-all.golden")
|
||||||
|
|
||||||
|
testCli.OutBuffer().Reset()
|
||||||
|
cmd.Flags().Set("pre-releases", "false")
|
||||||
|
cmd.Flags().Set("downgrades", "true")
|
||||||
|
err = cmd.Execute()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
fmt.Println(testCli.OutBuffer().String())
|
||||||
|
golden.Assert(t, testCli.OutBuffer().String(), "check-no-prerelease.golden")
|
||||||
|
|
||||||
|
testCli.OutBuffer().Reset()
|
||||||
|
cmd.Flags().Set("pre-releases", "false")
|
||||||
|
cmd.Flags().Set("downgrades", "false")
|
||||||
|
err = cmd.Execute()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
fmt.Println(testCli.OutBuffer().String())
|
||||||
|
golden.Assert(t, testCli.OutBuffer().String(), "check-no-downgrades.golden")
|
||||||
|
|
||||||
|
testCli.OutBuffer().Reset()
|
||||||
|
cmd.Flags().Set("pre-releases", "false")
|
||||||
|
cmd.Flags().Set("downgrades", "false")
|
||||||
|
cmd.Flags().Set("upgrades", "false")
|
||||||
|
err = cmd.Execute()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
fmt.Println(testCli.OutBuffer().String())
|
||||||
|
golden.Assert(t, testCli.OutBuffer().String(), "check-patches-only.golden")
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeVersion(t *testing.T, tag string) containerizedengine.DockerVersion {
|
||||||
|
v, err := ver.NewVersion(tag)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
return containerizedengine.DockerVersion{Version: *v, Tag: tag}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseVersions(t *testing.T, tags ...string) []containerizedengine.DockerVersion {
|
||||||
|
ret := make([]containerizedengine.DockerVersion, len(tags))
|
||||||
|
for i, tag := range tags {
|
||||||
|
ret[i] = makeVersion(t, tag)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -0,0 +1,105 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
registryclient "github.com/docker/cli/cli/registry/client"
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
fakeContainerizedEngineClient struct {
|
||||||
|
closeFunc func() error
|
||||||
|
activateEngineFunc func(ctx context.Context,
|
||||||
|
opts containerizedengine.EngineInitOptions,
|
||||||
|
out containerizedengine.OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error
|
||||||
|
initEngineFunc func(ctx context.Context,
|
||||||
|
opts containerizedengine.EngineInitOptions,
|
||||||
|
out containerizedengine.OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error
|
||||||
|
doUpdateFunc func(ctx context.Context,
|
||||||
|
opts containerizedengine.EngineInitOptions,
|
||||||
|
out containerizedengine.OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error
|
||||||
|
getEngineVersionsFunc func(ctx context.Context,
|
||||||
|
registryClient registryclient.RegistryClient,
|
||||||
|
currentVersion,
|
||||||
|
imageName string) (containerizedengine.AvailableVersions, error)
|
||||||
|
|
||||||
|
getEngineFunc func(ctx context.Context) (containerd.Container, error)
|
||||||
|
removeEngineFunc func(ctx context.Context, engine containerd.Container) error
|
||||||
|
getCurrentEngineVersionFunc func(ctx context.Context) (containerizedengine.EngineInitOptions, error)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (w *fakeContainerizedEngineClient) Close() error {
|
||||||
|
if w.closeFunc != nil {
|
||||||
|
return w.closeFunc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fakeContainerizedEngineClient) ActivateEngine(ctx context.Context,
|
||||||
|
opts containerizedengine.EngineInitOptions,
|
||||||
|
out containerizedengine.OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error {
|
||||||
|
if w.activateEngineFunc != nil {
|
||||||
|
return w.activateEngineFunc(ctx, opts, out, authConfig, healthfn)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context,
|
||||||
|
opts containerizedengine.EngineInitOptions,
|
||||||
|
out containerizedengine.OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error {
|
||||||
|
if w.initEngineFunc != nil {
|
||||||
|
return w.initEngineFunc(ctx, opts, out, authConfig, healthfn)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerizedEngineClient) DoUpdate(ctx context.Context,
|
||||||
|
opts containerizedengine.EngineInitOptions,
|
||||||
|
out containerizedengine.OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error {
|
||||||
|
if w.doUpdateFunc != nil {
|
||||||
|
return w.doUpdateFunc(ctx, opts, out, authConfig, healthfn)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerizedEngineClient) GetEngineVersions(ctx context.Context,
|
||||||
|
registryClient registryclient.RegistryClient,
|
||||||
|
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||||
|
|
||||||
|
if w.getEngineVersionsFunc != nil {
|
||||||
|
return w.getEngineVersionsFunc(ctx, registryClient, currentVersion, imageName)
|
||||||
|
}
|
||||||
|
return containerizedengine.AvailableVersions{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containerd.Container, error) {
|
||||||
|
if w.getEngineFunc != nil {
|
||||||
|
return w.getEngineFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context, engine containerd.Container) error {
|
||||||
|
if w.removeEngineFunc != nil {
|
||||||
|
return w.removeEngineFunc(ctx, engine)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||||
|
if w.getCurrentEngineVersionFunc != nil {
|
||||||
|
return w.getCurrentEngineVersionFunc(ctx)
|
||||||
|
}
|
||||||
|
return containerizedengine.EngineInitOptions{}, nil
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewEngineCommand returns a cobra command for `engine` subcommands
|
||||||
|
func NewEngineCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "engine COMMAND",
|
||||||
|
Short: "Manage the docker engine",
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: command.ShowHelp(dockerCli.Err()),
|
||||||
|
}
|
||||||
|
cmd.AddCommand(
|
||||||
|
newInitCommand(dockerCli),
|
||||||
|
newActivateCommand(dockerCli),
|
||||||
|
newCheckForUpdatesCommand(dockerCli),
|
||||||
|
newUpdateCommand(dockerCli),
|
||||||
|
newRmCommand(dockerCli),
|
||||||
|
)
|
||||||
|
return cmd
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewEngineCommand(t *testing.T) {
|
||||||
|
cmd := NewEngineCommand(testCli)
|
||||||
|
|
||||||
|
subcommands := cmd.Commands()
|
||||||
|
assert.Assert(t, len(subcommands) == 5)
|
||||||
|
}
|
|
@ -0,0 +1,62 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type extendedEngineInitOptions struct {
|
||||||
|
containerizedengine.EngineInitOptions
|
||||||
|
sockPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInitCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
var options extendedEngineInitOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "init [OPTIONS]",
|
||||||
|
Short: "Initialize a local engine",
|
||||||
|
Long: `This command will initialize a local engine running on containerd.
|
||||||
|
|
||||||
|
Configuration of the engine is managed through the daemon.json configuration
|
||||||
|
file on the host and may be pre-created before running the 'init' command.
|
||||||
|
`,
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runInit(dockerCli, options)
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{"experimentalCLI": ""},
|
||||||
|
}
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.EngineVersion, "version", cli.Version, "Specify engine version")
|
||||||
|
flags.StringVar(&options.EngineImage, "engine-image", containerizedengine.CommunityEngineImage, "Specify engine image")
|
||||||
|
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
|
||||||
|
flags.StringVar(&options.ConfigFile, "config-file", "/etc/docker/daemon.json", "Specify the location of the daemon configuration file on the host")
|
||||||
|
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func runInit(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return client.InitEngine(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig,
|
||||||
|
func(ctx context.Context) error {
|
||||||
|
client := dockerCli.Client()
|
||||||
|
_, err := client.Ping(ctx)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,33 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInitNoContainerd(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return nil, fmt.Errorf("some error")
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newInitCommand(testCli)
|
||||||
|
cmd.SilenceUsage = true
|
||||||
|
cmd.SilenceErrors = true
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitHappy(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return &fakeContainerizedEngineClient{}, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newInitCommand(testCli)
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
|
@ -0,0 +1,54 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO - consider adding a "purge" flag that also removes
|
||||||
|
// configuration files and the docker root dir.
|
||||||
|
|
||||||
|
type rmOptions struct {
|
||||||
|
sockPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRmCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
var options rmOptions
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "rm [OPTIONS]",
|
||||||
|
Short: "Remove the local engine",
|
||||||
|
Long: `This command will remove the local engine running on containerd.
|
||||||
|
|
||||||
|
No state files will be removed from the host filesystem.
|
||||||
|
`,
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runRm(dockerCli, options)
|
||||||
|
},
|
||||||
|
Annotations: map[string]string{"experimentalCLI": ""},
|
||||||
|
}
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func runRm(dockerCli command.Cli, options rmOptions) error {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
engine, err := client.GetEngine(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.RemoveEngine(ctx, engine)
|
||||||
|
}
|
|
@ -0,0 +1,33 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRmNoContainerd(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return nil, fmt.Errorf("some error")
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newRmCommand(testCli)
|
||||||
|
cmd.SilenceUsage = true
|
||||||
|
cmd.SilenceErrors = true
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRmHappy(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return &fakeContainerizedEngineClient{}, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newRmCommand(testCli)
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
TYPE VERSION NOTES
|
||||||
|
current 1.1.0
|
||||||
|
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
||||||
|
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
||||||
|
patch 1.1.3-beta1 https://docs.docker.com/releasenotes/1.1.3-beta1
|
||||||
|
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
||||||
|
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
||||||
|
upgrade 2.1.0-beta1 https://docs.docker.com/releasenotes/2.1.0-beta1
|
||||||
|
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
|
||||||
|
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
|
||||||
|
downgrade 1.0.3-beta1 https://docs.docker.com/releasenotes/1.0.3-beta1
|
|
@ -0,0 +1,6 @@
|
||||||
|
TYPE VERSION NOTES
|
||||||
|
current 1.1.0
|
||||||
|
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
||||||
|
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
||||||
|
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
||||||
|
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
|
@ -0,0 +1,8 @@
|
||||||
|
TYPE VERSION NOTES
|
||||||
|
current 1.1.0
|
||||||
|
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
||||||
|
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
||||||
|
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
||||||
|
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
||||||
|
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
|
||||||
|
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
|
|
@ -0,0 +1,4 @@
|
||||||
|
TYPE VERSION NOTES
|
||||||
|
current 1.1.0
|
||||||
|
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
||||||
|
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
|
@ -0,0 +1,68 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
var options extendedEngineInitOptions
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "update [OPTIONS]",
|
||||||
|
Short: "Update a local engine",
|
||||||
|
Args: cli.NoArgs,
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
return runUpdate(dockerCli, options)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
flags := cmd.Flags()
|
||||||
|
|
||||||
|
flags.StringVar(&options.EngineVersion, "version", "", "Specify engine version")
|
||||||
|
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image")
|
||||||
|
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "", "Override the current location where engine images are pulled")
|
||||||
|
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
if options.EngineImage == "" || options.RegistryPrefix == "" {
|
||||||
|
currentOpts, err := client.GetCurrentEngineVersion(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if options.EngineImage == "" {
|
||||||
|
options.EngineImage = currentOpts.EngineImage
|
||||||
|
}
|
||||||
|
if options.RegistryPrefix == "" {
|
||||||
|
options.RegistryPrefix = currentOpts.RegistryPrefix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig,
|
||||||
|
func(ctx context.Context) error {
|
||||||
|
client := dockerCli.Client()
|
||||||
|
_, err := client.Ping(ctx)
|
||||||
|
return err
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), "Success! The docker engine is now running.")
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
package engine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUpdateNoContainerd(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return nil, fmt.Errorf("some error")
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newUpdateCommand(testCli)
|
||||||
|
cmd.SilenceUsage = true
|
||||||
|
cmd.SilenceErrors = true
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateHappy(t *testing.T) {
|
||||||
|
testCli.SetContainerizedEngineClient(
|
||||||
|
func(string) (containerizedengine.Client, error) {
|
||||||
|
return &fakeContainerizedEngineClient{}, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
cmd := newUpdateCommand(testCli)
|
||||||
|
cmd.Flags().Set("registry-prefix", "docker.io/docker")
|
||||||
|
cmd.Flags().Set("version", "someversion")
|
||||||
|
err := cmd.Execute()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
|
@ -0,0 +1,154 @@
|
||||||
|
package formatter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/licenseutils"
|
||||||
|
"github.com/docker/licensing/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultSubscriptionsTableFormat = "table {{.Num}}\t{{.Owner}}\t{{.ProductID}}\t{{.Expires}}\t{{.ComponentsString}}"
|
||||||
|
defaultSubscriptionsQuietFormat = "{{.Num}}:{{.Summary}}"
|
||||||
|
|
||||||
|
numHeader = "NUM"
|
||||||
|
ownerHeader = "OWNER"
|
||||||
|
licenseNameHeader = "NAME"
|
||||||
|
idHeader = "ID"
|
||||||
|
dockerIDHeader = "DOCKER ID"
|
||||||
|
productIDHeader = "PRODUCT ID"
|
||||||
|
productRatePlanHeader = "PRODUCT RATE PLAN"
|
||||||
|
productRatePlanIDHeader = "PRODUCT RATE PLAN ID"
|
||||||
|
startHeader = "START"
|
||||||
|
expiresHeader = "EXPIRES"
|
||||||
|
stateHeader = "STATE"
|
||||||
|
eusaHeader = "EUSA"
|
||||||
|
pricingComponentsHeader = "PRICING COMPONENTS"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewSubscriptionsFormat returns a Format for rendering using a license Context
|
||||||
|
func NewSubscriptionsFormat(source string, quiet bool) Format {
|
||||||
|
switch source {
|
||||||
|
case TableFormatKey:
|
||||||
|
if quiet {
|
||||||
|
return defaultSubscriptionsQuietFormat
|
||||||
|
}
|
||||||
|
return defaultSubscriptionsTableFormat
|
||||||
|
case RawFormatKey:
|
||||||
|
if quiet {
|
||||||
|
return `license: {{.ID}}`
|
||||||
|
}
|
||||||
|
return `license: {{.ID}}\nname: {{.Name}}\nowner: {{.Owner}}\ncomponents: {{.ComponentsString}}\n`
|
||||||
|
}
|
||||||
|
return Format(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscriptionsWrite writes the context
|
||||||
|
func SubscriptionsWrite(ctx Context, subs []licenseutils.LicenseDisplay) error {
|
||||||
|
render := func(format func(subContext subContext) error) error {
|
||||||
|
for _, sub := range subs {
|
||||||
|
licenseCtx := &licenseContext{trunc: ctx.Trunc, l: sub}
|
||||||
|
if err := format(licenseCtx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
licenseCtx := licenseContext{}
|
||||||
|
licenseCtx.header = map[string]string{
|
||||||
|
"Num": numHeader,
|
||||||
|
"Owner": ownerHeader,
|
||||||
|
"Name": licenseNameHeader,
|
||||||
|
"ID": idHeader,
|
||||||
|
"DockerID": dockerIDHeader,
|
||||||
|
"ProductID": productIDHeader,
|
||||||
|
"ProductRatePlan": productRatePlanHeader,
|
||||||
|
"ProductRatePlanID": productRatePlanIDHeader,
|
||||||
|
"Start": startHeader,
|
||||||
|
"Expires": expiresHeader,
|
||||||
|
"State": stateHeader,
|
||||||
|
"Eusa": eusaHeader,
|
||||||
|
"ComponentsString": pricingComponentsHeader,
|
||||||
|
}
|
||||||
|
return ctx.Write(&licenseCtx, render)
|
||||||
|
}
|
||||||
|
|
||||||
|
type licenseContext struct {
|
||||||
|
HeaderContext
|
||||||
|
trunc bool
|
||||||
|
l licenseutils.LicenseDisplay
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshalJSON(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) Num() int {
|
||||||
|
return c.l.Num
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) Owner() string {
|
||||||
|
return c.l.Owner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) ComponentsString() string {
|
||||||
|
return c.l.ComponentsString
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) Summary() string {
|
||||||
|
return c.l.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) Name() string {
|
||||||
|
return c.l.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) ID() string {
|
||||||
|
return c.l.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) DockerID() string {
|
||||||
|
return c.l.DockerID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) ProductID() string {
|
||||||
|
return c.l.ProductID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) ProductRatePlan() string {
|
||||||
|
return c.l.ProductRatePlan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) ProductRatePlanID() string {
|
||||||
|
return c.l.ProductRatePlanID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) Start() *time.Time {
|
||||||
|
return c.l.Start
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) Expires() *time.Time {
|
||||||
|
return c.l.Expires
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) State() string {
|
||||||
|
return c.l.State
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) Eusa() *model.EusaState {
|
||||||
|
return c.l.Eusa
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *licenseContext) PricingComponents() []model.SubscriptionPricingComponent {
|
||||||
|
// Dereference the pricing component pointers in the pricing components
|
||||||
|
// so it can be rendered properly with the template formatter
|
||||||
|
|
||||||
|
var ret []model.SubscriptionPricingComponent
|
||||||
|
for _, spc := range c.l.PricingComponents {
|
||||||
|
if spc == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ret = append(ret, *spc)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -0,0 +1,256 @@
|
||||||
|
package formatter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/licenseutils"
|
||||||
|
"github.com/docker/licensing/model"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
is "gotest.tools/assert/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSubscriptionContextWrite(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
context Context
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
// Errors
|
||||||
|
{
|
||||||
|
Context{Format: "{{InvalidFunction}}"},
|
||||||
|
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: "{{nil}}"},
|
||||||
|
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
// Table format
|
||||||
|
{
|
||||||
|
Context{Format: NewSubscriptionsFormat("table", false)},
|
||||||
|
`NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS
|
||||||
|
1 owner1 productid1 2020-01-01 10:00:00 +0000 UTC compstring
|
||||||
|
2 owner2 productid2 2020-01-01 10:00:00 +0000 UTC compstring
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: NewSubscriptionsFormat("table", true)},
|
||||||
|
`1:License Name: name1 Quantity: 10 nodes Expiration date: 2020-01-01
|
||||||
|
2:License Name: name2 Quantity: 20 nodes Expiration date: 2020-01-01
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: NewSubscriptionsFormat("table {{.Owner}}", false)},
|
||||||
|
`OWNER
|
||||||
|
owner1
|
||||||
|
owner2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: NewSubscriptionsFormat("table {{.Owner}}", true)},
|
||||||
|
`OWNER
|
||||||
|
owner1
|
||||||
|
owner2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
// Raw Format
|
||||||
|
{
|
||||||
|
Context{Format: NewSubscriptionsFormat("raw", false)},
|
||||||
|
`license: id1
|
||||||
|
name: name1
|
||||||
|
owner: owner1
|
||||||
|
components: compstring
|
||||||
|
|
||||||
|
license: id2
|
||||||
|
name: name2
|
||||||
|
owner: owner2
|
||||||
|
components: compstring
|
||||||
|
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: NewSubscriptionsFormat("raw", true)},
|
||||||
|
`license: id1
|
||||||
|
license: id2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
// Custom Format
|
||||||
|
{
|
||||||
|
Context{Format: NewSubscriptionsFormat("{{.Owner}}", false)},
|
||||||
|
`owner1
|
||||||
|
owner2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC")
|
||||||
|
|
||||||
|
for _, testcase := range cases {
|
||||||
|
subscriptions := []licenseutils.LicenseDisplay{
|
||||||
|
{
|
||||||
|
Num: 1,
|
||||||
|
Owner: "owner1",
|
||||||
|
Subscription: model.Subscription{
|
||||||
|
ID: "id1",
|
||||||
|
Name: "name1",
|
||||||
|
ProductID: "productid1",
|
||||||
|
Expires: &expiration,
|
||||||
|
PricingComponents: model.PricingComponents{
|
||||||
|
&model.SubscriptionPricingComponent{
|
||||||
|
Name: "nodes",
|
||||||
|
Value: 10,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ComponentsString: "compstring",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Num: 2,
|
||||||
|
Owner: "owner2",
|
||||||
|
Subscription: model.Subscription{
|
||||||
|
ID: "id2",
|
||||||
|
Name: "name2",
|
||||||
|
ProductID: "productid2",
|
||||||
|
Expires: &expiration,
|
||||||
|
PricingComponents: model.PricingComponents{
|
||||||
|
&model.SubscriptionPricingComponent{
|
||||||
|
Name: "nodes",
|
||||||
|
Value: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ComponentsString: "compstring",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
out := &bytes.Buffer{}
|
||||||
|
testcase.context.Output = out
|
||||||
|
err := SubscriptionsWrite(testcase.context, subscriptions)
|
||||||
|
if err != nil {
|
||||||
|
assert.Error(t, err, testcase.expected)
|
||||||
|
} else {
|
||||||
|
assert.Check(t, is.Equal(testcase.expected, out.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSubscriptionContextWriteJSON(t *testing.T) {
|
||||||
|
expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC")
|
||||||
|
subscriptions := []licenseutils.LicenseDisplay{
|
||||||
|
{
|
||||||
|
Num: 1,
|
||||||
|
Owner: "owner1",
|
||||||
|
Subscription: model.Subscription{
|
||||||
|
ID: "id1",
|
||||||
|
Name: "name1",
|
||||||
|
ProductID: "productid1",
|
||||||
|
Expires: &expiration,
|
||||||
|
PricingComponents: model.PricingComponents{
|
||||||
|
&model.SubscriptionPricingComponent{
|
||||||
|
Name: "nodes",
|
||||||
|
Value: 10,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ComponentsString: "compstring",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Num: 2,
|
||||||
|
Owner: "owner2",
|
||||||
|
Subscription: model.Subscription{
|
||||||
|
ID: "id2",
|
||||||
|
Name: "name2",
|
||||||
|
ProductID: "productid2",
|
||||||
|
Expires: &expiration,
|
||||||
|
PricingComponents: model.PricingComponents{
|
||||||
|
&model.SubscriptionPricingComponent{
|
||||||
|
Name: "nodes",
|
||||||
|
Value: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ComponentsString: "compstring",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
expectedJSONs := []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"Owner": "owner1",
|
||||||
|
"ComponentsString": "compstring",
|
||||||
|
"Expires": "2020-01-01T10:00:00Z",
|
||||||
|
"DockerID": "",
|
||||||
|
"Eusa": nil,
|
||||||
|
"ID": "id1",
|
||||||
|
"Start": nil,
|
||||||
|
"Name": "name1",
|
||||||
|
"Num": float64(1),
|
||||||
|
"PricingComponents": []interface{}{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "nodes",
|
||||||
|
"value": float64(10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"ProductID": "productid1",
|
||||||
|
"ProductRatePlan": "",
|
||||||
|
"ProductRatePlanID": "",
|
||||||
|
"State": "",
|
||||||
|
"Summary": "License Name: name1\tQuantity: 10 nodes\tExpiration date: 2020-01-01",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Owner": "owner2",
|
||||||
|
"ComponentsString": "compstring",
|
||||||
|
"Expires": "2020-01-01T10:00:00Z",
|
||||||
|
"DockerID": "",
|
||||||
|
"Eusa": nil,
|
||||||
|
"ID": "id2",
|
||||||
|
"Start": nil,
|
||||||
|
"Name": "name2",
|
||||||
|
"Num": float64(2),
|
||||||
|
"PricingComponents": []interface{}{
|
||||||
|
map[string]interface{}{
|
||||||
|
"name": "nodes",
|
||||||
|
"value": float64(20),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"ProductID": "productid2",
|
||||||
|
"ProductRatePlan": "",
|
||||||
|
"ProductRatePlanID": "",
|
||||||
|
"State": "",
|
||||||
|
"Summary": "License Name: name2\tQuantity: 20 nodes\tExpiration date: 2020-01-01",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
out := &bytes.Buffer{}
|
||||||
|
err := SubscriptionsWrite(Context{Format: "{{json .}}", Output: out}, subscriptions)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||||
|
var m map[string]interface{}
|
||||||
|
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Check(t, is.DeepEqual(expectedJSONs[i], m))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSubscriptionContextWriteJSONField(t *testing.T) {
|
||||||
|
subscriptions := []licenseutils.LicenseDisplay{
|
||||||
|
{Num: 1, Owner: "owner1"},
|
||||||
|
{Num: 2, Owner: "owner2"},
|
||||||
|
}
|
||||||
|
out := &bytes.Buffer{}
|
||||||
|
err := SubscriptionsWrite(Context{Format: "{{json .Owner}}", Output: out}, subscriptions)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||||
|
var s string
|
||||||
|
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Check(t, is.Equal(subscriptions[i].Owner, s))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
package formatter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultUpdatesTableFormat = "table {{.Type}}\t{{.Version}}\t{{.Notes}}"
|
||||||
|
defaultUpdatesQuietFormat = "{{.Version}}"
|
||||||
|
|
||||||
|
updatesTypeHeader = "TYPE"
|
||||||
|
versionHeader = "VERSION"
|
||||||
|
notesHeader = "NOTES"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewUpdatesFormat returns a Format for rendering using a updates context
|
||||||
|
func NewUpdatesFormat(source string, quiet bool) Format {
|
||||||
|
switch source {
|
||||||
|
case TableFormatKey:
|
||||||
|
if quiet {
|
||||||
|
return defaultUpdatesQuietFormat
|
||||||
|
}
|
||||||
|
return defaultUpdatesTableFormat
|
||||||
|
case RawFormatKey:
|
||||||
|
if quiet {
|
||||||
|
return `update_version: {{.Version}}`
|
||||||
|
}
|
||||||
|
return `update_version: {{.Version}}\ntype: {{.Type}}\nnotes: {{.Notes}}\n`
|
||||||
|
}
|
||||||
|
return Format(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatesWrite writes the context
|
||||||
|
func UpdatesWrite(ctx Context, availableUpdates []containerizedengine.Update) error {
|
||||||
|
render := func(format func(subContext subContext) error) error {
|
||||||
|
for _, update := range availableUpdates {
|
||||||
|
updatesCtx := &updateContext{trunc: ctx.Trunc, u: update}
|
||||||
|
if err := format(updatesCtx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
updatesCtx := updateContext{}
|
||||||
|
updatesCtx.header = map[string]string{
|
||||||
|
"Type": updatesTypeHeader,
|
||||||
|
"Version": versionHeader,
|
||||||
|
"Notes": notesHeader,
|
||||||
|
}
|
||||||
|
return ctx.Write(&updatesCtx, render)
|
||||||
|
}
|
||||||
|
|
||||||
|
type updateContext struct {
|
||||||
|
HeaderContext
|
||||||
|
trunc bool
|
||||||
|
u containerizedengine.Update
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *updateContext) MarshalJSON() ([]byte, error) {
|
||||||
|
return marshalJSON(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *updateContext) Type() string {
|
||||||
|
return c.u.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *updateContext) Version() string {
|
||||||
|
return c.u.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *updateContext) Notes() string {
|
||||||
|
return c.u.Notes
|
||||||
|
}
|
|
@ -0,0 +1,143 @@
|
||||||
|
package formatter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
is "gotest.tools/assert/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUpdateContextWrite(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
context Context
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
// Errors
|
||||||
|
{
|
||||||
|
Context{Format: "{{InvalidFunction}}"},
|
||||||
|
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: "{{nil}}"},
|
||||||
|
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
// Table format
|
||||||
|
{
|
||||||
|
Context{Format: NewUpdatesFormat("table", false)},
|
||||||
|
`TYPE VERSION NOTES
|
||||||
|
updateType1 version1 description 1
|
||||||
|
updateType2 version2 description 2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: NewUpdatesFormat("table", true)},
|
||||||
|
`version1
|
||||||
|
version2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: NewUpdatesFormat("table {{.Version}}", false)},
|
||||||
|
`VERSION
|
||||||
|
version1
|
||||||
|
version2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: NewUpdatesFormat("table {{.Version}}", true)},
|
||||||
|
`VERSION
|
||||||
|
version1
|
||||||
|
version2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
// Raw Format
|
||||||
|
{
|
||||||
|
Context{Format: NewUpdatesFormat("raw", false)},
|
||||||
|
`update_version: version1
|
||||||
|
type: updateType1
|
||||||
|
notes: description 1
|
||||||
|
|
||||||
|
update_version: version2
|
||||||
|
type: updateType2
|
||||||
|
notes: description 2
|
||||||
|
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Context{Format: NewUpdatesFormat("raw", true)},
|
||||||
|
`update_version: version1
|
||||||
|
update_version: version2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
// Custom Format
|
||||||
|
{
|
||||||
|
Context{Format: NewUpdatesFormat("{{.Version}}", false)},
|
||||||
|
`version1
|
||||||
|
version2
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testcase := range cases {
|
||||||
|
updates := []containerizedengine.Update{
|
||||||
|
{Type: "updateType1", Version: "version1", Notes: "description 1"},
|
||||||
|
{Type: "updateType2", Version: "version2", Notes: "description 2"},
|
||||||
|
}
|
||||||
|
out := &bytes.Buffer{}
|
||||||
|
testcase.context.Output = out
|
||||||
|
err := UpdatesWrite(testcase.context, updates)
|
||||||
|
if err != nil {
|
||||||
|
assert.Error(t, err, testcase.expected)
|
||||||
|
} else {
|
||||||
|
assert.Check(t, is.Equal(testcase.expected, out.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateContextWriteJSON(t *testing.T) {
|
||||||
|
updates := []containerizedengine.Update{
|
||||||
|
{Type: "updateType1", Version: "version1", Notes: "note1"},
|
||||||
|
{Type: "updateType2", Version: "version2", Notes: "note2"},
|
||||||
|
}
|
||||||
|
expectedJSONs := []map[string]interface{}{
|
||||||
|
{"Version": "version1", "Notes": "note1", "Type": "updateType1"},
|
||||||
|
{"Version": "version2", "Notes": "note2", "Type": "updateType2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
out := &bytes.Buffer{}
|
||||||
|
err := UpdatesWrite(Context{Format: "{{json .}}", Output: out}, updates)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||||
|
var m map[string]interface{}
|
||||||
|
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Check(t, is.DeepEqual(expectedJSONs[i], m))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateContextWriteJSONField(t *testing.T) {
|
||||||
|
updates := []containerizedengine.Update{
|
||||||
|
{Type: "updateType1", Version: "version1"},
|
||||||
|
{Type: "updateType2", Version: "version2"},
|
||||||
|
}
|
||||||
|
out := &bytes.Buffer{}
|
||||||
|
err := UpdatesWrite(Context{Format: "{{json .Type}}", Output: out}, updates)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
||||||
|
var s string
|
||||||
|
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assert.Check(t, is.Equal(updates[i].Type, s))
|
||||||
|
}
|
||||||
|
}
|
|
@ -15,6 +15,7 @@ type fakeRegistryClient struct {
|
||||||
getManifestListFunc func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
|
getManifestListFunc func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
|
||||||
mountBlobFunc func(ctx context.Context, source reference.Canonical, target reference.Named) error
|
mountBlobFunc func(ctx context.Context, source reference.Canonical, target reference.Named) error
|
||||||
putManifestFunc func(ctx context.Context, source reference.Named, mf distribution.Manifest) (digest.Digest, error)
|
putManifestFunc func(ctx context.Context, source reference.Named, mf distribution.Manifest) (digest.Digest, error)
|
||||||
|
getTagsFunc func(ctx context.Context, ref reference.Named) ([]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *fakeRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
|
func (c *fakeRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
|
||||||
|
@ -45,4 +46,11 @@ func (c *fakeRegistryClient) PutManifest(ctx context.Context, ref reference.Name
|
||||||
return digest.Digest(""), nil
|
return digest.Digest(""), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *fakeRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
|
||||||
|
if c.getTagsFunc != nil {
|
||||||
|
return c.getTagsFunc(ctx, ref)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ client.RegistryClient = &fakeRegistryClient{}
|
var _ client.RegistryClient = &fakeRegistryClient{}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/debug"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
registrytypes "github.com/docker/docker/api/types/registry"
|
registrytypes "github.com/docker/docker/api/types/registry"
|
||||||
|
@ -26,9 +27,10 @@ func ElectAuthServer(ctx context.Context, cli Cli) string {
|
||||||
// example a Linux client might be interacting with a Windows daemon, hence
|
// example a Linux client might be interacting with a Windows daemon, hence
|
||||||
// the default registry URL might be Windows specific.
|
// the default registry URL might be Windows specific.
|
||||||
serverAddress := registry.IndexServer
|
serverAddress := registry.IndexServer
|
||||||
if info, err := cli.Client().Info(ctx); err != nil {
|
if info, err := cli.Client().Info(ctx); err != nil && debug.IsEnabled() {
|
||||||
|
// Only report the warning if we're in debug mode to prevent nagging during engine initialization workflows
|
||||||
fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress)
|
fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress)
|
||||||
} else if info.IndexServerAddress == "" {
|
} else if info.IndexServerAddress == "" && debug.IsEnabled() {
|
||||||
fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress)
|
fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress)
|
||||||
} else {
|
} else {
|
||||||
serverAddress = info.IndexServerAddress
|
serverAddress = info.IndexServerAddress
|
||||||
|
|
|
@ -125,6 +125,11 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err = clnt.RegistryLogin(ctx, *authConfig)
|
response, err = clnt.RegistryLogin(ctx, *authConfig)
|
||||||
|
if err != nil && client.IsErrConnectionFailed(err) {
|
||||||
|
// If the server isn't responding (yet) attempt to login purely client side
|
||||||
|
response, err = loginClientSide(ctx, *authConfig)
|
||||||
|
}
|
||||||
|
// If we (still) have an error, give up
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -167,3 +172,17 @@ func loginWithCredStoreCreds(ctx context.Context, dockerCli command.Cli, authCon
|
||||||
}
|
}
|
||||||
return response, err
|
return response, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func loginClientSide(ctx context.Context, auth types.AuthConfig) (registrytypes.AuthenticateOKBody, error) {
|
||||||
|
svc, err := registry.NewService(registry.ServiceOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return registrytypes.AuthenticateOKBody{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
status, token, err := svc.Auth(ctx, &auth, command.UserAgent())
|
||||||
|
|
||||||
|
return registrytypes.AuthenticateOKBody{
|
||||||
|
Status: status,
|
||||||
|
IdentityToken: token,
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
// Prevents a circular import with "github.com/docker/cli/internal/test"
|
// Prevents a circular import with "github.com/docker/cli/internal/test"
|
||||||
|
|
||||||
. "github.com/docker/cli/cli/command"
|
. "github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/debug"
|
||||||
"github.com/docker/cli/internal/test"
|
"github.com/docker/cli/internal/test"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
@ -78,6 +79,8 @@ func TestElectAuthServer(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
// Enable debug to see warnings we're checking for
|
||||||
|
debug.Enable()
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
cli := test.NewFakeCli(&fakeClient{infoFunc: tc.infoFunc})
|
cli := test.NewFakeCli(&fakeClient{infoFunc: tc.infoFunc})
|
||||||
server := ElectAuthServer(context.Background(), cli)
|
server := ElectAuthServer(context.Background(), cli)
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
manifesttypes "github.com/docker/cli/cli/manifest/types"
|
manifesttypes "github.com/docker/cli/cli/manifest/types"
|
||||||
|
"github.com/docker/cli/cli/trust"
|
||||||
"github.com/docker/distribution"
|
"github.com/docker/distribution"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
distributionclient "github.com/docker/distribution/registry/client"
|
distributionclient "github.com/docker/distribution/registry/client"
|
||||||
|
@ -24,6 +25,7 @@ type RegistryClient interface {
|
||||||
GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
|
GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
|
||||||
MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error
|
MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error
|
||||||
PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error)
|
PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error)
|
||||||
|
GetTags(ctx context.Context, ref reference.Named) ([]string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRegistryClient returns a new RegistryClient with a resolver
|
// NewRegistryClient returns a new RegistryClient with a resolver
|
||||||
|
@ -122,6 +124,19 @@ func (c *client) PutManifest(ctx context.Context, ref reference.Named, manifest
|
||||||
return dgst, errors.Wrapf(err, "failed to put manifest %s", ref)
|
return dgst, errors.Wrapf(err, "failed to put manifest %s", ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *client) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
|
||||||
|
repoEndpoint, err := newDefaultRepositoryEndpoint(ref, c.insecureRegistry)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
repo, err := c.getRepositoryForReference(ctx, ref, repoEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return repo.Tags(ctx).All(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Named, repoEndpoint repositoryEndpoint) (distribution.Repository, error) {
|
func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Named, repoEndpoint repositoryEndpoint) (distribution.Repository, error) {
|
||||||
httpTransport, err := c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint)
|
httpTransport, err := c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -181,3 +196,16 @@ func getManifestOptionsFromReference(ref reference.Named) (digest.Digest, []dist
|
||||||
}
|
}
|
||||||
return "", nil, errors.Errorf("%s no tag or digest", ref)
|
return "", nil, errors.Errorf("%s no tag or digest", ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRegistryAuth returns the auth config given an input image
|
||||||
|
func GetRegistryAuth(ctx context.Context, resolver AuthConfigResolver, imageName string) (*types.AuthConfig, error) {
|
||||||
|
distributionRef, err := reference.ParseNormalizedNamed(imageName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to parse image name: %s: %s", imageName, err)
|
||||||
|
}
|
||||||
|
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, resolver, distributionRef.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to get imgRefAndAuth: %s", err)
|
||||||
|
}
|
||||||
|
return imgRefAndAuth.AuthConfig(), nil
|
||||||
|
}
|
||||||
|
|
|
@ -105,7 +105,7 @@ shellcheck: build_shell_validate_image ## run shellcheck validation
|
||||||
docker run -ti --rm $(ENVVARS) $(MOUNTS) $(VALIDATE_IMAGE_NAME) make shellcheck
|
docker run -ti --rm $(ENVVARS) $(MOUNTS) $(VALIDATE_IMAGE_NAME) make shellcheck
|
||||||
|
|
||||||
.PHONY: test-e2e ## run e2e tests
|
.PHONY: test-e2e ## run e2e tests
|
||||||
test-e2e: test-e2e-non-experimental test-e2e-experimental
|
test-e2e: test-e2e-non-experimental test-e2e-experimental test-e2e-containerized
|
||||||
|
|
||||||
.PHONY: test-e2e-experimental
|
.PHONY: test-e2e-experimental
|
||||||
test-e2e-experimental: build_e2e_image
|
test-e2e-experimental: build_e2e_image
|
||||||
|
@ -115,6 +115,14 @@ test-e2e-experimental: build_e2e_image
|
||||||
test-e2e-non-experimental: build_e2e_image
|
test-e2e-non-experimental: build_e2e_image
|
||||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock $(E2E_IMAGE_NAME)
|
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock $(E2E_IMAGE_NAME)
|
||||||
|
|
||||||
|
.PHONY: test-e2e-containerized
|
||||||
|
test-e2e-containerized: build_e2e_image
|
||||||
|
docker run --rm --privileged \
|
||||||
|
-v /var/lib/docker \
|
||||||
|
-v /var/lib/containerd \
|
||||||
|
-v /lib/modules:/lib/modules \
|
||||||
|
$(E2E_IMAGE_NAME) /go/src/github.com/docker/cli/scripts/test/engine/entry
|
||||||
|
|
||||||
.PHONY: help
|
.PHONY: help
|
||||||
help: ## print this help
|
help: ## print this help
|
||||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
ARG GO_VERSION=1.10.3
|
ARG GO_VERSION=1.10.3
|
||||||
|
|
||||||
|
FROM docker/containerd-shim-process:a4d1531 AS containerd-shim-process
|
||||||
|
|
||||||
# Use Debian based image as docker-compose requires glibc.
|
# Use Debian based image as docker-compose requires glibc.
|
||||||
FROM golang:${GO_VERSION}
|
FROM golang:${GO_VERSION}
|
||||||
|
|
||||||
|
@ -6,8 +9,34 @@ RUN apt-get update && apt-get install -y \
|
||||||
build-essential \
|
build-essential \
|
||||||
curl \
|
curl \
|
||||||
openssl \
|
openssl \
|
||||||
|
btrfs-tools \
|
||||||
|
libapparmor-dev \
|
||||||
|
libseccomp-dev \
|
||||||
|
iptables \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
|
||||||
|
#ARG CONTAINERD_VERSION=v1.1.2
|
||||||
|
#ARG CONTAINERD_VERSION=47a128d
|
||||||
|
#ARG CONTAINERD_VERSION=6c3e782f
|
||||||
|
ARG CONTAINERD_VERSION=65839a47a88b0a1c5dc34981f1741eccefc9f2b0
|
||||||
|
RUN git clone https://github.com/containerd/containerd.git /go/src/github.com/containerd/containerd && \
|
||||||
|
cd /go/src/github.com/containerd/containerd && \
|
||||||
|
git checkout ${CONTAINERD_VERSION} && \
|
||||||
|
make && \
|
||||||
|
make install
|
||||||
|
COPY e2eengine/config.toml /etc/containerd/config.toml
|
||||||
|
COPY --from=containerd-shim-process /bin/containerd-shim-process-v1 /bin/
|
||||||
|
|
||||||
|
|
||||||
|
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
|
||||||
|
ARG RUNC_VERSION=v1.0.0-rc5
|
||||||
|
RUN git clone https://github.com/opencontainers/runc.git /go/src/github.com/opencontainers/runc && \
|
||||||
|
cd /go/src/github.com/opencontainers/runc && \
|
||||||
|
git checkout ${RUNC_VERSION} && \
|
||||||
|
make && \
|
||||||
|
make install
|
||||||
|
|
||||||
ARG COMPOSE_VERSION=1.21.2
|
ARG COMPOSE_VERSION=1.21.2
|
||||||
RUN curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose \
|
RUN curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose \
|
||||||
&& chmod +x /usr/local/bin/docker-compose
|
&& chmod +x /usr/local/bin/docker-compose
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
package check
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/e2eengine"
|
||||||
|
|
||||||
|
"gotest.tools/icmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDockerEngineOnContainerdAltRootConfig(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
err := e2eengine.CleanupEngine(t)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to cleanup engine: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Log("First engine init")
|
||||||
|
// First init
|
||||||
|
result := icmd.RunCmd(icmd.Command("docker", "engine", "init", "--config-file", "/tmp/etc/docker/daemon.json"),
|
||||||
|
func(c *icmd.Cmd) {
|
||||||
|
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||||
|
})
|
||||||
|
result.Assert(t, icmd.Expected{
|
||||||
|
Out: "Success! The docker engine is now running.",
|
||||||
|
Err: "",
|
||||||
|
ExitCode: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Make sure update doesn't blow up with alternate config path
|
||||||
|
t.Log("perform update")
|
||||||
|
// Now update and succeed
|
||||||
|
targetVersion := os.Getenv("VERSION")
|
||||||
|
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
|
||||||
|
result.Assert(t, icmd.Expected{
|
||||||
|
Out: "Success! The docker engine is now running.",
|
||||||
|
Err: "",
|
||||||
|
ExitCode: 0,
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
root = "/var/lib/containerd"
|
||||||
|
state = "/run/containerd"
|
||||||
|
oom_score = 0
|
||||||
|
|
||||||
|
[grpc]
|
||||||
|
address = "/run/containerd/containerd.sock"
|
||||||
|
uid = 0
|
||||||
|
gid = 0
|
||||||
|
|
||||||
|
[debug]
|
||||||
|
address = "/run/containerd/debug.sock"
|
||||||
|
uid = 0
|
||||||
|
gid = 0
|
||||||
|
level = "debug"
|
|
@ -0,0 +1,85 @@
|
||||||
|
package multi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/e2eengine"
|
||||||
|
|
||||||
|
"gotest.tools/icmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDockerEngineOnContainerdMultiTest(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
err := e2eengine.CleanupEngine(t)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to cleanup engine: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Log("Attempt engine init without experimental")
|
||||||
|
// First init
|
||||||
|
result := icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||||
|
func(c *icmd.Cmd) {
|
||||||
|
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=disabled")
|
||||||
|
})
|
||||||
|
result.Assert(t, icmd.Expected{
|
||||||
|
Out: "",
|
||||||
|
Err: "docker engine init is only supported",
|
||||||
|
ExitCode: 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Log("First engine init")
|
||||||
|
// First init
|
||||||
|
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||||
|
func(c *icmd.Cmd) {
|
||||||
|
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||||
|
})
|
||||||
|
result.Assert(t, icmd.Expected{
|
||||||
|
Out: "Success! The docker engine is now running.",
|
||||||
|
Err: "",
|
||||||
|
ExitCode: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Log("checking for updates")
|
||||||
|
// Check for updates
|
||||||
|
result = icmd.RunCmd(icmd.Command("docker", "engine", "check", "--downgrades", "--pre-releases"))
|
||||||
|
result.Assert(t, icmd.Expected{
|
||||||
|
Out: "VERSION",
|
||||||
|
Err: "",
|
||||||
|
ExitCode: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Log("attempt second init (should fail)")
|
||||||
|
// Attempt to init a second time and fail
|
||||||
|
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||||
|
func(c *icmd.Cmd) {
|
||||||
|
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||||
|
})
|
||||||
|
result.Assert(t, icmd.Expected{
|
||||||
|
Out: "",
|
||||||
|
Err: "engine already present",
|
||||||
|
ExitCode: 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Log("perform update")
|
||||||
|
// Now update and succeed
|
||||||
|
targetVersion := os.Getenv("VERSION")
|
||||||
|
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
|
||||||
|
result.Assert(t, icmd.Expected{
|
||||||
|
Out: "Success! The docker engine is now running.",
|
||||||
|
Err: "",
|
||||||
|
ExitCode: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Log("remove engine")
|
||||||
|
result = icmd.RunCmd(icmd.Command("docker", "engine", "rm"),
|
||||||
|
func(c *icmd.Cmd) {
|
||||||
|
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||||
|
})
|
||||||
|
result.Assert(t, icmd.Expected{
|
||||||
|
Out: "",
|
||||||
|
Err: "",
|
||||||
|
ExitCode: 0,
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
package e2eengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CleanupEngine ensures the local engine has been removed between testcases
|
||||||
|
func CleanupEngine(t *testing.T) error {
|
||||||
|
t.Log("doing engine cleanup")
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
client, err := containerizedengine.NewClient("")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// See if the engine exists first
|
||||||
|
engine, err := client.GetEngine(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "not present") {
|
||||||
|
t.Log("engine was not detected, no cleanup to perform")
|
||||||
|
// Nothing to do, it's not defined
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
t.Logf("failed to lookup engine: %s", err)
|
||||||
|
// Any other error is not good...
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// TODO Consider nuking the docker dir too so there's no cached content between test cases
|
||||||
|
err = client.RemoveEngine(ctx, engine)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Failed to remove engine: %s", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,348 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
containerdtypes "github.com/containerd/containerd/api/types"
|
||||||
|
"github.com/containerd/containerd/cio"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/oci"
|
||||||
|
prototypes "github.com/gogo/protobuf/types"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
fakeContainerdClient struct {
|
||||||
|
containersFunc func(ctx context.Context, filters ...string) ([]containerd.Container, error)
|
||||||
|
newContainerFunc func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
|
||||||
|
pullFunc func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
|
||||||
|
getImageFunc func(ctx context.Context, ref string) (containerd.Image, error)
|
||||||
|
contentStoreFunc func() content.Store
|
||||||
|
containerServiceFunc func() containers.Store
|
||||||
|
}
|
||||||
|
fakeContainer struct {
|
||||||
|
idFunc func() string
|
||||||
|
infoFunc func(context.Context) (containers.Container, error)
|
||||||
|
deleteFunc func(context.Context, ...containerd.DeleteOpts) error
|
||||||
|
newTaskFunc func(context.Context, cio.Creator, ...containerd.NewTaskOpts) (containerd.Task, error)
|
||||||
|
specFunc func(context.Context) (*oci.Spec, error)
|
||||||
|
taskFunc func(context.Context, cio.Attach) (containerd.Task, error)
|
||||||
|
imageFunc func(context.Context) (containerd.Image, error)
|
||||||
|
labelsFunc func(context.Context) (map[string]string, error)
|
||||||
|
setLabelsFunc func(context.Context, map[string]string) (map[string]string, error)
|
||||||
|
extensionsFunc func(context.Context) (map[string]prototypes.Any, error)
|
||||||
|
updateFunc func(context.Context, ...containerd.UpdateContainerOpts) error
|
||||||
|
}
|
||||||
|
fakeImage struct {
|
||||||
|
nameFunc func() string
|
||||||
|
targetFunc func() ocispec.Descriptor
|
||||||
|
unpackFunc func(context.Context, string) error
|
||||||
|
rootFSFunc func(ctx context.Context) ([]digest.Digest, error)
|
||||||
|
sizeFunc func(ctx context.Context) (int64, error)
|
||||||
|
configFunc func(ctx context.Context) (ocispec.Descriptor, error)
|
||||||
|
isUnpackedFunc func(context.Context, string) (bool, error)
|
||||||
|
contentStoreFunc func() content.Store
|
||||||
|
}
|
||||||
|
fakeTask struct {
|
||||||
|
idFunc func() string
|
||||||
|
pidFunc func() uint32
|
||||||
|
startFunc func(context.Context) error
|
||||||
|
deleteFunc func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error)
|
||||||
|
killFunc func(context.Context, syscall.Signal, ...containerd.KillOpts) error
|
||||||
|
waitFunc func(context.Context) (<-chan containerd.ExitStatus, error)
|
||||||
|
closeIOFunc func(context.Context, ...containerd.IOCloserOpts) error
|
||||||
|
resizeFunc func(ctx context.Context, w, h uint32) error
|
||||||
|
ioFunc func() cio.IO
|
||||||
|
statusFunc func(context.Context) (containerd.Status, error)
|
||||||
|
pauseFunc func(context.Context) error
|
||||||
|
resumeFunc func(context.Context) error
|
||||||
|
execFunc func(context.Context, string, *specs.Process, cio.Creator) (containerd.Process, error)
|
||||||
|
pidsFunc func(context.Context) ([]containerd.ProcessInfo, error)
|
||||||
|
checkpointFunc func(context.Context, ...containerd.CheckpointTaskOpts) (containerd.Image, error)
|
||||||
|
updateFunc func(context.Context, ...containerd.UpdateTaskOpts) error
|
||||||
|
loadProcessFunc func(context.Context, string, cio.Attach) (containerd.Process, error)
|
||||||
|
metricsFunc func(context.Context) (*containerdtypes.Metric, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
testOutStream struct {
|
||||||
|
bytes.Buffer
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (w *fakeContainerdClient) Containers(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
if w.containersFunc != nil {
|
||||||
|
return w.containersFunc(ctx, filters...)
|
||||||
|
}
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerdClient) NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
|
||||||
|
if w.newContainerFunc != nil {
|
||||||
|
return w.newContainerFunc(ctx, id, opts...)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerdClient) Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||||
|
if w.pullFunc != nil {
|
||||||
|
return w.pullFunc(ctx, ref, opts...)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerdClient) GetImage(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
if w.getImageFunc != nil {
|
||||||
|
return w.getImageFunc(ctx, ref)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerdClient) ContentStore() content.Store {
|
||||||
|
if w.contentStoreFunc != nil {
|
||||||
|
return w.contentStoreFunc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerdClient) ContainerService() containers.Store {
|
||||||
|
if w.containerServiceFunc != nil {
|
||||||
|
return w.containerServiceFunc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (w *fakeContainerdClient) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeContainer) ID() string {
|
||||||
|
if c.idFunc != nil {
|
||||||
|
return c.idFunc()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) Info(ctx context.Context) (containers.Container, error) {
|
||||||
|
if c.infoFunc != nil {
|
||||||
|
return c.infoFunc(ctx)
|
||||||
|
}
|
||||||
|
return containers.Container{}, nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) Delete(ctx context.Context, opts ...containerd.DeleteOpts) error {
|
||||||
|
if c.deleteFunc != nil {
|
||||||
|
return c.deleteFunc(ctx, opts...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) NewTask(ctx context.Context, ioc cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
|
||||||
|
if c.newTaskFunc != nil {
|
||||||
|
return c.newTaskFunc(ctx, ioc, opts...)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) Spec(ctx context.Context) (*oci.Spec, error) {
|
||||||
|
if c.specFunc != nil {
|
||||||
|
return c.specFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) Task(ctx context.Context, attach cio.Attach) (containerd.Task, error) {
|
||||||
|
if c.taskFunc != nil {
|
||||||
|
return c.taskFunc(ctx, attach)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) Image(ctx context.Context) (containerd.Image, error) {
|
||||||
|
if c.imageFunc != nil {
|
||||||
|
return c.imageFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) Labels(ctx context.Context) (map[string]string, error) {
|
||||||
|
if c.labelsFunc != nil {
|
||||||
|
return c.labelsFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
|
||||||
|
if c.setLabelsFunc != nil {
|
||||||
|
return c.setLabelsFunc(ctx, labels)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
|
||||||
|
if c.extensionsFunc != nil {
|
||||||
|
return c.extensionsFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (c *fakeContainer) Update(ctx context.Context, opts ...containerd.UpdateContainerOpts) error {
|
||||||
|
if c.updateFunc != nil {
|
||||||
|
return c.updateFunc(ctx, opts...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *fakeImage) Name() string {
|
||||||
|
if i.nameFunc != nil {
|
||||||
|
return i.nameFunc()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
func (i *fakeImage) Target() ocispec.Descriptor {
|
||||||
|
if i.targetFunc != nil {
|
||||||
|
return i.targetFunc()
|
||||||
|
}
|
||||||
|
return ocispec.Descriptor{}
|
||||||
|
}
|
||||||
|
func (i *fakeImage) Unpack(ctx context.Context, name string) error {
|
||||||
|
if i.unpackFunc != nil {
|
||||||
|
return i.unpackFunc(ctx, name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (i *fakeImage) RootFS(ctx context.Context) ([]digest.Digest, error) {
|
||||||
|
if i.rootFSFunc != nil {
|
||||||
|
return i.rootFSFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (i *fakeImage) Size(ctx context.Context) (int64, error) {
|
||||||
|
if i.sizeFunc != nil {
|
||||||
|
return i.sizeFunc(ctx)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
func (i *fakeImage) Config(ctx context.Context) (ocispec.Descriptor, error) {
|
||||||
|
if i.configFunc != nil {
|
||||||
|
return i.configFunc(ctx)
|
||||||
|
}
|
||||||
|
return ocispec.Descriptor{}, nil
|
||||||
|
}
|
||||||
|
func (i *fakeImage) IsUnpacked(ctx context.Context, name string) (bool, error) {
|
||||||
|
if i.isUnpackedFunc != nil {
|
||||||
|
return i.isUnpackedFunc(ctx, name)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
func (i *fakeImage) ContentStore() content.Store {
|
||||||
|
if i.contentStoreFunc != nil {
|
||||||
|
return i.contentStoreFunc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *fakeTask) ID() string {
|
||||||
|
if t.idFunc != nil {
|
||||||
|
return t.idFunc()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Pid() uint32 {
|
||||||
|
if t.pidFunc != nil {
|
||||||
|
return t.pidFunc()
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Start(ctx context.Context) error {
|
||||||
|
if t.startFunc != nil {
|
||||||
|
return t.startFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Delete(ctx context.Context, opts ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
|
||||||
|
if t.deleteFunc != nil {
|
||||||
|
return t.deleteFunc(ctx, opts...)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Kill(ctx context.Context, signal syscall.Signal, opts ...containerd.KillOpts) error {
|
||||||
|
if t.killFunc != nil {
|
||||||
|
return t.killFunc(ctx, signal, opts...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Wait(ctx context.Context) (<-chan containerd.ExitStatus, error) {
|
||||||
|
if t.waitFunc != nil {
|
||||||
|
return t.waitFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) CloseIO(ctx context.Context, opts ...containerd.IOCloserOpts) error {
|
||||||
|
if t.closeIOFunc != nil {
|
||||||
|
return t.closeIOFunc(ctx, opts...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Resize(ctx context.Context, w, h uint32) error {
|
||||||
|
if t.resizeFunc != nil {
|
||||||
|
return t.resizeFunc(ctx, w, h)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) IO() cio.IO {
|
||||||
|
if t.ioFunc != nil {
|
||||||
|
return t.ioFunc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Status(ctx context.Context) (containerd.Status, error) {
|
||||||
|
if t.statusFunc != nil {
|
||||||
|
return t.statusFunc(ctx)
|
||||||
|
}
|
||||||
|
return containerd.Status{}, nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Pause(ctx context.Context) error {
|
||||||
|
if t.pauseFunc != nil {
|
||||||
|
return t.pauseFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Resume(ctx context.Context) error {
|
||||||
|
if t.resumeFunc != nil {
|
||||||
|
return t.resumeFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Exec(ctx context.Context, cmd string, proc *specs.Process, ioc cio.Creator) (containerd.Process, error) {
|
||||||
|
if t.execFunc != nil {
|
||||||
|
return t.execFunc(ctx, cmd, proc, ioc)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Pids(ctx context.Context) ([]containerd.ProcessInfo, error) {
|
||||||
|
if t.pidsFunc != nil {
|
||||||
|
return t.pidsFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Checkpoint(ctx context.Context, opts ...containerd.CheckpointTaskOpts) (containerd.Image, error) {
|
||||||
|
if t.checkpointFunc != nil {
|
||||||
|
return t.checkpointFunc(ctx, opts...)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Update(ctx context.Context, opts ...containerd.UpdateTaskOpts) error {
|
||||||
|
if t.updateFunc != nil {
|
||||||
|
return t.updateFunc(ctx, opts...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) LoadProcess(ctx context.Context, name string, attach cio.Attach) (containerd.Process, error) {
|
||||||
|
if t.loadProcessFunc != nil {
|
||||||
|
return t.loadProcessFunc(ctx, name, attach)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (t *fakeTask) Metrics(ctx context.Context) (*containerdtypes.Metric, error) {
|
||||||
|
if t.metricsFunc != nil {
|
||||||
|
return t.metricsFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *testOutStream) FD() uintptr {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (o *testOutStream) IsTerminal() bool {
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,77 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/images"
|
||||||
|
"github.com/containerd/containerd/remotes/docker"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewClient returns a new containerizedengine client
|
||||||
|
// This client can be used to manage the lifecycle of
|
||||||
|
// dockerd running as a container on containerd.
|
||||||
|
func NewClient(sockPath string) (Client, error) {
|
||||||
|
if sockPath == "" {
|
||||||
|
sockPath = containerdSockPath
|
||||||
|
}
|
||||||
|
cclient, err := containerd.New(sockPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return baseClient{
|
||||||
|
cclient: cclient,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close will close the underlying clients
|
||||||
|
func (c baseClient) Close() error {
|
||||||
|
return c.cclient.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c baseClient) pullWithAuth(ctx context.Context, imageName string, out OutStream,
|
||||||
|
authConfig *types.AuthConfig) (containerd.Image, error) {
|
||||||
|
|
||||||
|
resolver := docker.NewResolver(docker.ResolverOptions{
|
||||||
|
Credentials: func(string) (string, string, error) {
|
||||||
|
return authConfig.Username, authConfig.Password, nil
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
ongoing := newJobs(imageName)
|
||||||
|
pctx, stopProgress := context.WithCancel(ctx)
|
||||||
|
progress := make(chan struct{})
|
||||||
|
bufin, bufout := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
showProgress(pctx, ongoing, c.cclient.ContentStore(), bufout)
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
jsonmessage.DisplayJSONMessagesToStream(bufin, out, nil)
|
||||||
|
close(progress)
|
||||||
|
}()
|
||||||
|
|
||||||
|
h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||||
|
if desc.MediaType != images.MediaTypeDockerSchema1Manifest {
|
||||||
|
ongoing.add(desc)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
image, err := c.cclient.Pull(ctx, imageName,
|
||||||
|
containerd.WithResolver(resolver),
|
||||||
|
containerd.WithImageHandler(h),
|
||||||
|
containerd.WithPullUnpack)
|
||||||
|
stopProgress()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
<-progress
|
||||||
|
return image, nil
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPullWithAuthPullFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("pull failure")
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
imageName := "testnamegoeshere"
|
||||||
|
|
||||||
|
_, err := client.pullWithAuth(ctx, imageName, &testOutStream{}, &types.AuthConfig{})
|
||||||
|
assert.ErrorContains(t, err, "pull failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPullWithAuthPullPass(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||||
|
return nil, nil
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
imageName := "testnamegoeshere"
|
||||||
|
|
||||||
|
_, err := client.pullWithAuth(ctx, imageName, &testOutStream{}, &types.AuthConfig{})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
|
@ -0,0 +1,261 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/namespaces"
|
||||||
|
"github.com/containerd/containerd/runtime/restart"
|
||||||
|
"github.com/docker/cli/internal/pkg/containerized"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InitEngine is the main entrypoint for `docker engine init`
|
||||||
|
func (c baseClient) InitEngine(ctx context.Context, opts EngineInitOptions, out OutStream,
|
||||||
|
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
|
||||||
|
|
||||||
|
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||||
|
// Verify engine isn't already running
|
||||||
|
_, err := c.GetEngine(ctx)
|
||||||
|
if err == nil {
|
||||||
|
return ErrEngineAlreadyPresent
|
||||||
|
} else if err != ErrEngineNotPresent {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
||||||
|
// Look for desired image
|
||||||
|
_, err = c.cclient.GetImage(ctx, imageName)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
_, err = c.pullWithAuth(ctx, imageName, out, authConfig)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "unable to pull image %s", imageName)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return errors.Wrapf(err, "unable to check for image %s", imageName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spin up the engine
|
||||||
|
err = c.startEngineOnContainerd(ctx, imageName, opts.ConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to create docker daemon")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the daemon to start, verify it's responsive
|
||||||
|
fmt.Fprintf(out, "Waiting for engine to start... ")
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
|
||||||
|
defer cancel()
|
||||||
|
if err := c.waitForEngine(ctx, out, healthfn); err != nil {
|
||||||
|
// TODO once we have the logging strategy sorted out
|
||||||
|
// this should likely gather the last few lines of logs to report
|
||||||
|
// why the daemon failed to initialize
|
||||||
|
return errors.Wrap(err, "failed to start docker daemon")
|
||||||
|
}
|
||||||
|
fmt.Fprintf(out, "Success! The docker engine is now running.\n")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEngine will return the containerd container running the engine (or error)
|
||||||
|
func (c baseClient) GetEngine(ctx context.Context) (containerd.Container, error) {
|
||||||
|
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||||
|
containers, err := c.cclient.Containers(ctx, "id=="+engineContainerName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(containers) == 0 {
|
||||||
|
return nil, ErrEngineNotPresent
|
||||||
|
}
|
||||||
|
return containers[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getEngineImage will return the current image used by the engine
|
||||||
|
func (c baseClient) getEngineImage(engine containerd.Container) (string, error) {
|
||||||
|
ctx := namespaces.WithNamespace(context.Background(), engineNamespace)
|
||||||
|
image, err := engine.Image(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return image.Name(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getEngineConfigFilePath will extract the config file location from the engine flags
|
||||||
|
func (c baseClient) getEngineConfigFilePath(ctx context.Context, engine containerd.Container) (string, error) {
|
||||||
|
spec, err := engine.Spec(ctx)
|
||||||
|
configFile := ""
|
||||||
|
if err != nil {
|
||||||
|
return configFile, err
|
||||||
|
}
|
||||||
|
for i := 0; i < len(spec.Process.Args); i++ {
|
||||||
|
arg := spec.Process.Args[i]
|
||||||
|
if strings.HasPrefix(arg, "--config-file") {
|
||||||
|
if strings.Contains(arg, "=") {
|
||||||
|
split := strings.SplitN(arg, "=", 2)
|
||||||
|
configFile = split[1]
|
||||||
|
} else {
|
||||||
|
if i+1 >= len(spec.Process.Args) {
|
||||||
|
return configFile, ErrMalformedConfigFileParam
|
||||||
|
}
|
||||||
|
configFile = spec.Process.Args[i+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if configFile == "" {
|
||||||
|
// TODO - any more diagnostics to offer?
|
||||||
|
return configFile, ErrEngineConfigLookupFailure
|
||||||
|
}
|
||||||
|
return configFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
engineWaitInterval = 500 * time.Millisecond
|
||||||
|
engineWaitTimeout = 60 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// waitForEngine will wait for the engine to start
|
||||||
|
func (c baseClient) waitForEngine(ctx context.Context, out io.Writer, healthfn func(context.Context) error) error {
|
||||||
|
ticker := time.NewTicker(engineWaitInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
defer func() {
|
||||||
|
fmt.Fprintf(out, "\n")
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := c.waitForEngineContainer(ctx, ticker)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Fprintf(out, "waiting for engine to be responsive... ")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
err = healthfn(ctx)
|
||||||
|
if err == nil {
|
||||||
|
fmt.Fprintf(out, "engine is online.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return errors.Wrap(err, "timeout waiting for engine to be responsive")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c baseClient) waitForEngineContainer(ctx context.Context, ticker *time.Ticker) error {
|
||||||
|
var ret error
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
engine, err := c.GetEngine(ctx)
|
||||||
|
if engine != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ret = err
|
||||||
|
case <-ctx.Done():
|
||||||
|
return errors.Wrap(ret, "timeout waiting for engine to be responsive")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveEngine gracefully unwinds the current engine
|
||||||
|
func (c baseClient) RemoveEngine(ctx context.Context, engine containerd.Container) error {
|
||||||
|
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||||
|
|
||||||
|
// Make sure the container isn't being restarted while we unwind it
|
||||||
|
stopLabel := map[string]string{}
|
||||||
|
stopLabel[restart.StatusLabel] = string(containerd.Stopped)
|
||||||
|
engine.SetLabels(ctx, stopLabel)
|
||||||
|
|
||||||
|
// Wind down the existing engine
|
||||||
|
task, err := engine.Task(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
if !errdefs.IsNotFound(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
status, err := task.Status(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if status.Status == containerd.Running {
|
||||||
|
// It's running, so kill it
|
||||||
|
err := task.Kill(ctx, syscall.SIGTERM, []containerd.KillOpts{}...)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "task kill error")
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, err := task.Wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
timeout := time.NewTimer(engineWaitTimeout)
|
||||||
|
select {
|
||||||
|
case <-timeout.C:
|
||||||
|
// TODO - consider a force flag in the future to allow a more aggressive
|
||||||
|
// kill of the engine via
|
||||||
|
// task.Kill(ctx, syscall.SIGKILL, containerd.WithKillAll)
|
||||||
|
return ErrEngineShutdownTimeout
|
||||||
|
case <-ch:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, err := task.Delete(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
deleteOpts := []containerd.DeleteOpts{containerd.WithSnapshotCleanup}
|
||||||
|
err = engine.Delete(ctx, deleteOpts...)
|
||||||
|
if err != nil && errdefs.IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "failed to remove existing engine container")
|
||||||
|
}
|
||||||
|
|
||||||
|
// startEngineOnContainerd creates a new docker engine running on containerd
|
||||||
|
func (c baseClient) startEngineOnContainerd(ctx context.Context, imageName, configFile string) error {
|
||||||
|
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||||
|
image, err := c.cclient.GetImage(ctx, imageName)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
return fmt.Errorf("engine image missing: %s", imageName)
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "failed to check for engine image")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure we have a valid config file
|
||||||
|
err = c.verifyDockerConfig(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
engineSpec.Process.Args = append(engineSpec.Process.Args,
|
||||||
|
"--config-file", configFile,
|
||||||
|
)
|
||||||
|
|
||||||
|
cOpts := []containerd.NewContainerOpts{
|
||||||
|
containerized.WithNewSnapshot(image),
|
||||||
|
restart.WithStatus(containerd.Running),
|
||||||
|
restart.WithLogPath("/var/log/engine.log"), // TODO - better!
|
||||||
|
genSpec(),
|
||||||
|
containerd.WithRuntime("io.containerd.runtime.process.v1", nil),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.cclient.NewContainer(
|
||||||
|
ctx,
|
||||||
|
engineContainerName,
|
||||||
|
cOpts...,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to create engine container")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,537 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/cio"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/oci"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func healthfnHappy(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func healthfnError(ctx context.Context) error {
|
||||||
|
return fmt.Errorf("ping failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitGetEngineFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: CommunityEngineImage,
|
||||||
|
}
|
||||||
|
container := &fakeContainer{}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.Assert(t, err == ErrEngineAlreadyPresent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitCheckImageFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: CommunityEngineImage,
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
},
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("something went wrong")
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "unable to check for image")
|
||||||
|
assert.ErrorContains(t, err, "something went wrong")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitPullFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: CommunityEngineImage,
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
},
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, errdefs.ErrNotFound
|
||||||
|
|
||||||
|
},
|
||||||
|
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("pull failure")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "unable to pull image")
|
||||||
|
assert.ErrorContains(t, err, "pull failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitStartFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: CommunityEngineImage,
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
},
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, errdefs.ErrNotFound
|
||||||
|
|
||||||
|
},
|
||||||
|
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "failed to create docker daemon")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return nil, fmt.Errorf("container failure")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.GetEngine(ctx)
|
||||||
|
assert.ErrorContains(t, err, "failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineNotPresent(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.GetEngine(ctx)
|
||||||
|
assert.Assert(t, err == ErrEngineNotPresent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineFound(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
container := &fakeContainer{}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := client.GetEngine(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Equal(t, c, container)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineImageFail(t *testing.T) {
|
||||||
|
client := baseClient{}
|
||||||
|
container := &fakeContainer{
|
||||||
|
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("failure")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.getEngineImage(container)
|
||||||
|
assert.ErrorContains(t, err, "failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineImagePass(t *testing.T) {
|
||||||
|
client := baseClient{}
|
||||||
|
image := &fakeImage{
|
||||||
|
nameFunc: func() string {
|
||||||
|
return "imagenamehere"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||||
|
return image, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
name, err := client.getEngineImage(container)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Equal(t, name, "imagenamehere")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWaitForEngineNeverShowsUp(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
engineWaitInterval = 1 * time.Millisecond
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.waitForEngine(ctx, &testOutStream{}, healthfnError)
|
||||||
|
assert.ErrorContains(t, err, "timeout waiting")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWaitForEnginePingFail(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
engineWaitInterval = 1 * time.Millisecond
|
||||||
|
container := &fakeContainer{}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.waitForEngine(ctx, &testOutStream{}, healthfnError)
|
||||||
|
assert.ErrorContains(t, err, "ping fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWaitForEngineHealthy(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
engineWaitInterval = 1 * time.Millisecond
|
||||||
|
container := &fakeContainer{}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.waitForEngine(ctx, &testOutStream{}, healthfnHappy)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveEngineBadTaskBadDelete(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
container := &fakeContainer{
|
||||||
|
deleteFunc: func(context.Context, ...containerd.DeleteOpts) error {
|
||||||
|
return fmt.Errorf("delete failure")
|
||||||
|
},
|
||||||
|
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||||
|
return nil, errdefs.ErrNotFound
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.RemoveEngine(ctx, container)
|
||||||
|
assert.ErrorContains(t, err, "failed to remove existing engine")
|
||||||
|
assert.ErrorContains(t, err, "delete failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveEngineTaskNoStatus(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
task := &fakeTask{
|
||||||
|
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||||
|
return containerd.Status{}, fmt.Errorf("task status failure")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||||
|
return task, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.RemoveEngine(ctx, container)
|
||||||
|
assert.ErrorContains(t, err, "task status failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveEngineTaskNotRunningDeleteFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
task := &fakeTask{
|
||||||
|
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||||
|
return containerd.Status{Status: containerd.Unknown}, nil
|
||||||
|
},
|
||||||
|
deleteFunc: func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
|
||||||
|
return nil, fmt.Errorf("task delete failure")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||||
|
return task, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.RemoveEngine(ctx, container)
|
||||||
|
assert.ErrorContains(t, err, "task delete failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveEngineTaskRunningKillFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
task := &fakeTask{
|
||||||
|
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||||
|
return containerd.Status{Status: containerd.Running}, nil
|
||||||
|
},
|
||||||
|
killFunc: func(context.Context, syscall.Signal, ...containerd.KillOpts) error {
|
||||||
|
return fmt.Errorf("task kill failure")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||||
|
return task, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.RemoveEngine(ctx, container)
|
||||||
|
assert.ErrorContains(t, err, "task kill failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveEngineTaskRunningWaitFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
task := &fakeTask{
|
||||||
|
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||||
|
return containerd.Status{Status: containerd.Running}, nil
|
||||||
|
},
|
||||||
|
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||||
|
return nil, fmt.Errorf("task wait failure")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||||
|
return task, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.RemoveEngine(ctx, container)
|
||||||
|
assert.ErrorContains(t, err, "task wait failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveEngineTaskRunningHappyPath(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
ch := make(chan containerd.ExitStatus, 1)
|
||||||
|
task := &fakeTask{
|
||||||
|
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||||
|
return containerd.Status{Status: containerd.Running}, nil
|
||||||
|
},
|
||||||
|
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||||
|
ch <- containerd.ExitStatus{}
|
||||||
|
return ch, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||||
|
return task, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.RemoveEngine(ctx, container)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveEngineTaskKillTimeout(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
ch := make(chan containerd.ExitStatus, 1)
|
||||||
|
client := baseClient{}
|
||||||
|
engineWaitTimeout = 10 * time.Millisecond
|
||||||
|
task := &fakeTask{
|
||||||
|
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||||
|
return containerd.Status{Status: containerd.Running}, nil
|
||||||
|
},
|
||||||
|
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||||
|
//ch <- containerd.ExitStatus{} // let it timeout
|
||||||
|
return ch, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||||
|
return task, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.RemoveEngine(ctx, container)
|
||||||
|
assert.Assert(t, err == ErrEngineShutdownTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStartEngineOnContainerdImageErr(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
imageName := "testnamegoeshere"
|
||||||
|
configFile := "/tmp/configfilegoeshere"
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("some image lookup failure")
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||||
|
assert.ErrorContains(t, err, "some image lookup failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStartEngineOnContainerdImageNotFound(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
imageName := "testnamegoeshere"
|
||||||
|
configFile := "/tmp/configfilegoeshere"
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, errdefs.ErrNotFound
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||||
|
assert.ErrorContains(t, err, "engine image missing")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStartEngineOnContainerdHappy(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
imageName := "testnamegoeshere"
|
||||||
|
configFile := "/tmp/configfilegoeshere"
|
||||||
|
ch := make(chan containerd.ExitStatus, 1)
|
||||||
|
streams := cio.Streams{}
|
||||||
|
task := &fakeTask{
|
||||||
|
statusFunc: func(context.Context) (containerd.Status, error) {
|
||||||
|
return containerd.Status{Status: containerd.Running}, nil
|
||||||
|
},
|
||||||
|
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
|
||||||
|
ch <- containerd.ExitStatus{}
|
||||||
|
return ch, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
newTaskFunc: func(ctx context.Context, creator cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
|
||||||
|
if streams.Stdout != nil {
|
||||||
|
streams.Stdout.Write([]byte("{}"))
|
||||||
|
}
|
||||||
|
return task, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, nil
|
||||||
|
|
||||||
|
},
|
||||||
|
newContainerFunc: func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
|
||||||
|
return container, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := client.startEngineOnContainerd(ctx, imageName, configFile)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineConfigFilePathBadSpec(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
container := &fakeContainer{
|
||||||
|
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||||
|
return nil, fmt.Errorf("spec error")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err := client.getEngineConfigFilePath(ctx, container)
|
||||||
|
assert.ErrorContains(t, err, "spec error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineConfigFilePathDistinct(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
container := &fakeContainer{
|
||||||
|
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||||
|
return &oci.Spec{
|
||||||
|
Process: &specs.Process{
|
||||||
|
Args: []string{
|
||||||
|
"--another-flag",
|
||||||
|
"foo",
|
||||||
|
"--config-file",
|
||||||
|
"configpath",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
configFile, err := client.getEngineConfigFilePath(ctx, container)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, err, configFile == "configpath")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineConfigFilePathEquals(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
container := &fakeContainer{
|
||||||
|
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||||
|
return &oci.Spec{
|
||||||
|
Process: &specs.Process{
|
||||||
|
Args: []string{
|
||||||
|
"--another-flag=foo",
|
||||||
|
"--config-file=configpath",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
configFile, err := client.getEngineConfigFilePath(ctx, container)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, err, configFile == "configpath")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEngineConfigFilePathMalformed1(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
container := &fakeContainer{
|
||||||
|
specFunc: func(context.Context) (*oci.Spec, error) {
|
||||||
|
return &oci.Spec{
|
||||||
|
Process: &specs.Process{
|
||||||
|
Args: []string{
|
||||||
|
"--another-flag",
|
||||||
|
"--config-file",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err := client.getEngineConfigFilePath(ctx, container)
|
||||||
|
assert.Assert(t, err == ErrMalformedConfigFileParam)
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/oci"
|
||||||
|
"github.com/docker/cli/internal/pkg/containerized"
|
||||||
|
)
|
||||||
|
|
||||||
|
func genSpec() containerd.NewContainerOpts {
|
||||||
|
return containerd.WithSpec(&engineSpec,
|
||||||
|
containerized.WithAllCapabilities,
|
||||||
|
oci.WithParentCgroupDevices,
|
||||||
|
)
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/docker/cli/internal/pkg/containerized"
|
||||||
|
)
|
||||||
|
|
||||||
|
func genSpec() containerd.NewContainerOpts {
|
||||||
|
return containerd.WithSpec(&engineSpec,
|
||||||
|
containerized.WithAllCapabilities,
|
||||||
|
)
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c baseClient) verifyDockerConfig(configFile string) error {
|
||||||
|
|
||||||
|
// TODO - in the future consider leveraging containerd and a host runtime
|
||||||
|
// to create the file. For now, just create it locally since we have to be
|
||||||
|
// local to talk to containerd
|
||||||
|
|
||||||
|
configDir := path.Dir(configFile)
|
||||||
|
err := os.MkdirAll(configDir, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := os.OpenFile(configFile, os.O_RDWR|os.O_CREATE, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
|
||||||
|
info, err := fd.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if info.Size() == 0 {
|
||||||
|
_, err := fd.Write([]byte("{}"))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,215 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/content"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/remotes"
|
||||||
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, out io.WriteCloser) {
|
||||||
|
var (
|
||||||
|
ticker = time.NewTicker(100 * time.Millisecond)
|
||||||
|
start = time.Now()
|
||||||
|
enc = json.NewEncoder(out)
|
||||||
|
statuses = map[string]statusInfo{}
|
||||||
|
done bool
|
||||||
|
)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
|
||||||
|
resolved := "resolved"
|
||||||
|
if !ongoing.isResolved() {
|
||||||
|
resolved = "resolving"
|
||||||
|
}
|
||||||
|
statuses[ongoing.name] = statusInfo{
|
||||||
|
Ref: ongoing.name,
|
||||||
|
Status: resolved,
|
||||||
|
}
|
||||||
|
keys := []string{ongoing.name}
|
||||||
|
|
||||||
|
activeSeen := map[string]struct{}{}
|
||||||
|
if !done {
|
||||||
|
active, err := cs.ListStatuses(ctx, "")
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("active check failed: %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// update status of active entries!
|
||||||
|
for _, active := range active {
|
||||||
|
statuses[active.Ref] = statusInfo{
|
||||||
|
Ref: active.Ref,
|
||||||
|
Status: "downloading",
|
||||||
|
Offset: active.Offset,
|
||||||
|
Total: active.Total,
|
||||||
|
StartedAt: active.StartedAt,
|
||||||
|
UpdatedAt: active.UpdatedAt,
|
||||||
|
}
|
||||||
|
activeSeen[active.Ref] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := updateNonActive(ctx, ongoing, cs, statuses, keys, activeSeen, &done, start)
|
||||||
|
if err != nil {
|
||||||
|
continue outer
|
||||||
|
}
|
||||||
|
|
||||||
|
var ordered []statusInfo
|
||||||
|
for _, key := range keys {
|
||||||
|
ordered = append(ordered, statuses[key])
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, si := range ordered {
|
||||||
|
jm := si.JSONMessage()
|
||||||
|
err := enc.Encode(jm)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("failed to encode progress message: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if done {
|
||||||
|
out.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
done = true // allow ui to update once more
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateNonActive(ctx context.Context, ongoing *jobs, cs content.Store, statuses map[string]statusInfo, keys []string, activeSeen map[string]struct{}, done *bool, start time.Time) error {
|
||||||
|
|
||||||
|
for _, j := range ongoing.jobs() {
|
||||||
|
key := remotes.MakeRefKey(ctx, j)
|
||||||
|
keys = append(keys, key)
|
||||||
|
if _, ok := activeSeen[key]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
status, ok := statuses[key]
|
||||||
|
if !*done && (!ok || status.Status == "downloading") {
|
||||||
|
info, err := cs.Info(ctx, j.Digest)
|
||||||
|
if err != nil {
|
||||||
|
if !errdefs.IsNotFound(err) {
|
||||||
|
logrus.Debugf("failed to get content info: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
statuses[key] = statusInfo{
|
||||||
|
Ref: key,
|
||||||
|
Status: "waiting",
|
||||||
|
}
|
||||||
|
} else if info.CreatedAt.After(start) {
|
||||||
|
statuses[key] = statusInfo{
|
||||||
|
Ref: key,
|
||||||
|
Status: "done",
|
||||||
|
Offset: info.Size,
|
||||||
|
Total: info.Size,
|
||||||
|
UpdatedAt: info.CreatedAt,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
statuses[key] = statusInfo{
|
||||||
|
Ref: key,
|
||||||
|
Status: "exists",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if *done {
|
||||||
|
if ok {
|
||||||
|
if status.Status != "done" && status.Status != "exists" {
|
||||||
|
status.Status = "done"
|
||||||
|
statuses[key] = status
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
statuses[key] = statusInfo{
|
||||||
|
Ref: key,
|
||||||
|
Status: "done",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type jobs struct {
|
||||||
|
name string
|
||||||
|
added map[digest.Digest]struct{}
|
||||||
|
descs []ocispec.Descriptor
|
||||||
|
mu sync.Mutex
|
||||||
|
resolved bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newJobs(name string) *jobs {
|
||||||
|
return &jobs{
|
||||||
|
name: name,
|
||||||
|
added: map[digest.Digest]struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *jobs) add(desc ocispec.Descriptor) {
|
||||||
|
j.mu.Lock()
|
||||||
|
defer j.mu.Unlock()
|
||||||
|
j.resolved = true
|
||||||
|
|
||||||
|
if _, ok := j.added[desc.Digest]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
j.descs = append(j.descs, desc)
|
||||||
|
j.added[desc.Digest] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *jobs) jobs() []ocispec.Descriptor {
|
||||||
|
j.mu.Lock()
|
||||||
|
defer j.mu.Unlock()
|
||||||
|
|
||||||
|
var descs []ocispec.Descriptor
|
||||||
|
return append(descs, j.descs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *jobs) isResolved() bool {
|
||||||
|
j.mu.Lock()
|
||||||
|
defer j.mu.Unlock()
|
||||||
|
return j.resolved
|
||||||
|
}
|
||||||
|
|
||||||
|
// statusInfo holds the status info for an upload or download
|
||||||
|
type statusInfo struct {
|
||||||
|
Ref string
|
||||||
|
Status string
|
||||||
|
Offset int64
|
||||||
|
Total int64
|
||||||
|
StartedAt time.Time
|
||||||
|
UpdatedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s statusInfo) JSONMessage() jsonmessage.JSONMessage {
|
||||||
|
// Shorten the ID to use up less width on the display
|
||||||
|
id := s.Ref
|
||||||
|
if strings.Contains(id, ":") {
|
||||||
|
split := strings.SplitN(id, ":", 2)
|
||||||
|
id = split[1]
|
||||||
|
}
|
||||||
|
id = fmt.Sprintf("%.12s", id)
|
||||||
|
|
||||||
|
return jsonmessage.JSONMessage{
|
||||||
|
ID: id,
|
||||||
|
Status: s.Status,
|
||||||
|
Progress: &jsonmessage.JSONProgress{
|
||||||
|
Current: s.Offset,
|
||||||
|
Total: s.Total,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// SIGKILL maps to unix.SIGKILL
|
||||||
|
SIGKILL = unix.SIGKILL
|
||||||
|
)
|
|
@ -0,0 +1,12 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// SIGKILL all signals are ignored by containerd kill windows
|
||||||
|
SIGKILL = syscall.Signal(0)
|
||||||
|
)
|
|
@ -0,0 +1,159 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
|
"github.com/containerd/containerd/content"
|
||||||
|
registryclient "github.com/docker/cli/cli/registry/client"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
ver "github.com/hashicorp/go-version"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// CommunityEngineImage is the repo name for the community engine
|
||||||
|
CommunityEngineImage = "engine-community"
|
||||||
|
|
||||||
|
// EnterpriseEngineImage is the repo name for the enterprise engine
|
||||||
|
EnterpriseEngineImage = "engine-enterprise"
|
||||||
|
|
||||||
|
containerdSockPath = "/run/containerd/containerd.sock"
|
||||||
|
engineContainerName = "dockerd"
|
||||||
|
engineNamespace = "docker"
|
||||||
|
|
||||||
|
// Used to signal the containerd-proxy if it should manage
|
||||||
|
proxyLabel = "com.docker/containerd-proxy.scope"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrEngineAlreadyPresent returned when engine already present and should not be
|
||||||
|
ErrEngineAlreadyPresent = errors.New("engine already present, use the update command to change versions")
|
||||||
|
|
||||||
|
// ErrEngineNotPresent returned when the engine is not present and should be
|
||||||
|
ErrEngineNotPresent = errors.New("engine not present")
|
||||||
|
|
||||||
|
// ErrMalformedConfigFileParam returned if the engine config file parameter is malformed
|
||||||
|
ErrMalformedConfigFileParam = errors.New("malformed --config-file param on engine")
|
||||||
|
|
||||||
|
// ErrEngineConfigLookupFailure returned if unable to lookup existing engine configuration
|
||||||
|
ErrEngineConfigLookupFailure = errors.New("unable to lookup existing engine configuration")
|
||||||
|
|
||||||
|
// ErrEngineShutdownTimeout returned if the engine failed to shutdown in time
|
||||||
|
ErrEngineShutdownTimeout = errors.New("timeout waiting for engine to exit")
|
||||||
|
|
||||||
|
// ErrEngineImageMissingTag returned if the engine image is missing the version tag
|
||||||
|
ErrEngineImageMissingTag = errors.New("malformed engine image missing tag")
|
||||||
|
|
||||||
|
engineSpec = specs.Spec{
|
||||||
|
Root: &specs.Root{
|
||||||
|
Path: "rootfs",
|
||||||
|
},
|
||||||
|
Process: &specs.Process{
|
||||||
|
Cwd: "/",
|
||||||
|
Args: []string{
|
||||||
|
// In general, configuration should be driven by the config file, not these flags
|
||||||
|
// TODO - consider moving more of these to the config file, and make sure the defaults are set if not present.
|
||||||
|
"/sbin/dockerd",
|
||||||
|
"-s",
|
||||||
|
"overlay2",
|
||||||
|
"--containerd",
|
||||||
|
"/run/containerd/containerd.sock",
|
||||||
|
"--default-runtime",
|
||||||
|
"containerd",
|
||||||
|
"--add-runtime",
|
||||||
|
"containerd=runc",
|
||||||
|
},
|
||||||
|
User: specs.User{
|
||||||
|
UID: 0,
|
||||||
|
GID: 0,
|
||||||
|
},
|
||||||
|
Env: []string{
|
||||||
|
"PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
|
||||||
|
},
|
||||||
|
NoNewPrivileges: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client can be used to manage the lifecycle of
|
||||||
|
// dockerd running as a container on containerd.
|
||||||
|
type Client interface {
|
||||||
|
Close() error
|
||||||
|
ActivateEngine(ctx context.Context,
|
||||||
|
opts EngineInitOptions,
|
||||||
|
out OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error
|
||||||
|
InitEngine(ctx context.Context,
|
||||||
|
opts EngineInitOptions,
|
||||||
|
out OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error
|
||||||
|
DoUpdate(ctx context.Context,
|
||||||
|
opts EngineInitOptions,
|
||||||
|
out OutStream,
|
||||||
|
authConfig *types.AuthConfig,
|
||||||
|
healthfn func(context.Context) error) error
|
||||||
|
GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, currentVersion, imageName string) (AvailableVersions, error)
|
||||||
|
|
||||||
|
GetEngine(ctx context.Context) (containerd.Container, error)
|
||||||
|
RemoveEngine(ctx context.Context, engine containerd.Container) error
|
||||||
|
GetCurrentEngineVersion(ctx context.Context) (EngineInitOptions, error)
|
||||||
|
}
|
||||||
|
type baseClient struct {
|
||||||
|
cclient containerdClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// EngineInitOptions contains the configuration settings
|
||||||
|
// use during initialization of a containerized docker engine
|
||||||
|
type EngineInitOptions struct {
|
||||||
|
RegistryPrefix string
|
||||||
|
EngineImage string
|
||||||
|
EngineVersion string
|
||||||
|
ConfigFile string
|
||||||
|
scope string
|
||||||
|
}
|
||||||
|
|
||||||
|
// containerdClient abstracts the containerd client to aid in testability
|
||||||
|
type containerdClient interface {
|
||||||
|
Containers(ctx context.Context, filters ...string) ([]containerd.Container, error)
|
||||||
|
NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
|
||||||
|
Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
|
||||||
|
GetImage(ctx context.Context, ref string) (containerd.Image, error)
|
||||||
|
Close() error
|
||||||
|
ContentStore() content.Store
|
||||||
|
ContainerService() containers.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
// AvailableVersions groups the available versions which were discovered
|
||||||
|
type AvailableVersions struct {
|
||||||
|
Downgrades []DockerVersion
|
||||||
|
Patches []DockerVersion
|
||||||
|
Upgrades []DockerVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// DockerVersion wraps a semantic version to retain the original tag
|
||||||
|
// since the docker date based versions don't strictly follow semantic
|
||||||
|
// versioning (leading zeros, etc.)
|
||||||
|
type DockerVersion struct {
|
||||||
|
ver.Version
|
||||||
|
Tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update stores available updates for rendering in a table
|
||||||
|
type Update struct {
|
||||||
|
Type string
|
||||||
|
Version string
|
||||||
|
Notes string
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutStream is an output stream used to write normal program output.
|
||||||
|
type OutStream interface {
|
||||||
|
io.Writer
|
||||||
|
FD() uintptr
|
||||||
|
IsTerminal() bool
|
||||||
|
}
|
|
@ -0,0 +1,130 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/namespaces"
|
||||||
|
"github.com/docker/cli/internal/pkg/containerized"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetCurrentEngineVersion determines the current type of engine (image) and version
|
||||||
|
func (c baseClient) GetCurrentEngineVersion(ctx context.Context) (EngineInitOptions, error) {
|
||||||
|
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||||
|
ret := EngineInitOptions{}
|
||||||
|
currentEngine := CommunityEngineImage
|
||||||
|
engine, err := c.GetEngine(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrEngineNotPresent {
|
||||||
|
return ret, errors.Wrap(err, "failed to find existing engine")
|
||||||
|
}
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
imageName, err := c.getEngineImage(engine)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
distributionRef, err := reference.ParseNormalizedNamed(imageName)
|
||||||
|
if err != nil {
|
||||||
|
return ret, errors.Wrapf(err, "failed to parse image name: %s", imageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(distributionRef.Name(), EnterpriseEngineImage) {
|
||||||
|
currentEngine = EnterpriseEngineImage
|
||||||
|
}
|
||||||
|
taggedRef, ok := distributionRef.(reference.NamedTagged)
|
||||||
|
if !ok {
|
||||||
|
return ret, ErrEngineImageMissingTag
|
||||||
|
}
|
||||||
|
ret.EngineImage = currentEngine
|
||||||
|
ret.EngineVersion = taggedRef.Tag()
|
||||||
|
ret.RegistryPrefix = reference.Domain(taggedRef) + "/" + path.Dir(reference.Path(taggedRef))
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActivateEngine will switch the image from the CE to EE image
|
||||||
|
func (c baseClient) ActivateEngine(ctx context.Context, opts EngineInitOptions, out OutStream,
|
||||||
|
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
|
||||||
|
|
||||||
|
// set the proxy scope to "ee" for activate flows
|
||||||
|
opts.scope = "ee"
|
||||||
|
|
||||||
|
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||||
|
|
||||||
|
// If version is unspecified, use the existing engine version
|
||||||
|
if opts.EngineVersion == "" {
|
||||||
|
currentOpts, err := c.GetCurrentEngineVersion(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.EngineVersion = currentOpts.EngineVersion
|
||||||
|
if currentOpts.EngineImage == EnterpriseEngineImage {
|
||||||
|
// This is a "no-op" activation so the only change would be the license - don't update the engine itself
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c.DoUpdate(ctx, opts, out, authConfig, healthfn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoUpdate performs the underlying engine update
|
||||||
|
func (c baseClient) DoUpdate(ctx context.Context, opts EngineInitOptions, out OutStream,
|
||||||
|
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
|
||||||
|
|
||||||
|
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
||||||
|
if opts.EngineVersion == "" {
|
||||||
|
// TODO - Future enhancement: This could be improved to be
|
||||||
|
// smart about figuring out the latest patch rev for the
|
||||||
|
// current engine version and automatically apply it so users
|
||||||
|
// could stay in sync by simply having a scheduled
|
||||||
|
// `docker engine update`
|
||||||
|
return fmt.Errorf("please pick the version you want to update to")
|
||||||
|
}
|
||||||
|
|
||||||
|
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
||||||
|
|
||||||
|
// Look for desired image
|
||||||
|
image, err := c.cclient.GetImage(ctx, imageName)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
image, err = c.pullWithAuth(ctx, imageName, out, authConfig)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "unable to pull image %s", imageName)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return errors.Wrapf(err, "unable to check for image %s", imageName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather information about the existing engine so we can recreate it
|
||||||
|
engine, err := c.GetEngine(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrEngineNotPresent {
|
||||||
|
return errors.Wrap(err, "unable to find existing engine - please use init")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO verify the image has changed and don't update if nothing has changed
|
||||||
|
|
||||||
|
err = containerized.AtomicImageUpdate(ctx, engine, image, func() error {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
|
||||||
|
defer cancel()
|
||||||
|
return c.waitForEngine(ctx, out, healthfn)
|
||||||
|
})
|
||||||
|
if err == nil && opts.scope != "" {
|
||||||
|
var labels map[string]string
|
||||||
|
labels, err = engine.Labels(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
labels[proxyLabel] = opts.scope
|
||||||
|
_, err = engine.SetLabels(ctx, labels)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -0,0 +1,318 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/cio"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetCurrentEngineVersionHappy(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
image := &fakeImage{
|
||||||
|
nameFunc: func() string {
|
||||||
|
return "acme.com/dockermirror/" + CommunityEngineImage + ":engineversion"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||||
|
return image, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
opts, err := client.GetCurrentEngineVersion(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Equal(t, opts.EngineImage, CommunityEngineImage)
|
||||||
|
assert.Equal(t, opts.RegistryPrefix, "acme.com/dockermirror")
|
||||||
|
assert.Equal(t, opts.EngineVersion, "engineversion")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetCurrentEngineVersionEnterpriseHappy(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
image := &fakeImage{
|
||||||
|
nameFunc: func() string {
|
||||||
|
return "docker.io/docker/" + EnterpriseEngineImage + ":engineversion"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||||
|
return image, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
opts, err := client.GetCurrentEngineVersion(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Equal(t, opts.EngineImage, EnterpriseEngineImage)
|
||||||
|
assert.Equal(t, opts.EngineVersion, "engineversion")
|
||||||
|
assert.Equal(t, opts.RegistryPrefix, "docker.io/docker")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetCurrentEngineVersionNoEngine(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.GetCurrentEngineVersion(ctx)
|
||||||
|
assert.ErrorContains(t, err, "failed to find existing engine")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetCurrentEngineVersionMiscEngineError(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
expectedError := fmt.Errorf("some container lookup error")
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return nil, expectedError
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.GetCurrentEngineVersion(ctx)
|
||||||
|
assert.Assert(t, err == expectedError)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetCurrentEngineVersionImageFailure(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
container := &fakeContainer{
|
||||||
|
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("container image failure")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.GetCurrentEngineVersion(ctx)
|
||||||
|
assert.ErrorContains(t, err, "container image failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetCurrentEngineVersionMalformed(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
image := &fakeImage{
|
||||||
|
nameFunc: func() string {
|
||||||
|
return "imagename"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||||
|
return image, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := client.GetCurrentEngineVersion(ctx)
|
||||||
|
assert.Assert(t, err == ErrEngineImageMissingTag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateNoEngine(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: EnterpriseEngineImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "unable to find")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateNoChange(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
registryPrefix := "registryprefixgoeshere"
|
||||||
|
image := &fakeImage{
|
||||||
|
nameFunc: func() string {
|
||||||
|
return registryPrefix + "/" + EnterpriseEngineImage + ":engineversion"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||||
|
return image, nil
|
||||||
|
},
|
||||||
|
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
||||||
|
return nil, errdefs.ErrNotFound
|
||||||
|
},
|
||||||
|
labelsFunc: func(context.Context) (map[string]string, error) {
|
||||||
|
return map[string]string{}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: EnterpriseEngineImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestActivateDoUpdateFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
registryPrefix := "registryprefixgoeshere"
|
||||||
|
image := &fakeImage{
|
||||||
|
nameFunc: func() string {
|
||||||
|
return registryPrefix + "/ce-engine:engineversion"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container := &fakeContainer{
|
||||||
|
imageFunc: func(context.Context) (containerd.Image, error) {
|
||||||
|
return image, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{container}, nil
|
||||||
|
},
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("something went wrong")
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: EnterpriseEngineImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "check for image")
|
||||||
|
assert.ErrorContains(t, err, "something went wrong")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDoUpdateNoVersion(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: EnterpriseEngineImage,
|
||||||
|
}
|
||||||
|
client := baseClient{}
|
||||||
|
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "please pick the version you")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDoUpdateImageMiscError(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: "testnamegoeshere",
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("something went wrong")
|
||||||
|
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "check for image")
|
||||||
|
assert.ErrorContains(t, err, "something went wrong")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDoUpdatePullFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: "testnamegoeshere",
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return nil, errdefs.ErrNotFound
|
||||||
|
|
||||||
|
},
|
||||||
|
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
||||||
|
return nil, fmt.Errorf("pull failure")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "unable to pull")
|
||||||
|
assert.ErrorContains(t, err, "pull failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDoUpdateEngineMissing(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := EngineInitOptions{
|
||||||
|
EngineVersion: "engineversiongoeshere",
|
||||||
|
RegistryPrefix: "registryprefixgoeshere",
|
||||||
|
ConfigFile: "/tmp/configfilegoeshere",
|
||||||
|
EngineImage: "testnamegoeshere",
|
||||||
|
}
|
||||||
|
image := &fakeImage{
|
||||||
|
nameFunc: func() string {
|
||||||
|
return "imagenamehere"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client := baseClient{
|
||||||
|
cclient: &fakeContainerdClient{
|
||||||
|
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
||||||
|
return image, nil
|
||||||
|
|
||||||
|
},
|
||||||
|
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||||
|
return []containerd.Container{}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
|
||||||
|
assert.ErrorContains(t, err, "unable to find existing engine")
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
registryclient "github.com/docker/cli/cli/registry/client"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
ver "github.com/hashicorp/go-version"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetEngineVersions reports the versions of the engine that are available
|
||||||
|
func (c baseClient) GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, currentVersion, imageName string) (AvailableVersions, error) {
|
||||||
|
imageRef, err := reference.ParseNormalizedNamed(imageName)
|
||||||
|
if err != nil {
|
||||||
|
return AvailableVersions{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tags, err := registryClient.GetTags(ctx, imageRef)
|
||||||
|
if err != nil {
|
||||||
|
return AvailableVersions{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseTags(tags, currentVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTags(tags []string, currentVersion string) (AvailableVersions, error) {
|
||||||
|
var ret AvailableVersions
|
||||||
|
currentVer, err := ver.NewVersion(currentVersion)
|
||||||
|
if err != nil {
|
||||||
|
return ret, errors.Wrapf(err, "failed to parse existing version %s", currentVersion)
|
||||||
|
}
|
||||||
|
downgrades := []DockerVersion{}
|
||||||
|
patches := []DockerVersion{}
|
||||||
|
upgrades := []DockerVersion{}
|
||||||
|
currentSegments := currentVer.Segments()
|
||||||
|
for _, tag := range tags {
|
||||||
|
tmp, err := ver.NewVersion(tag)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("Unable to parse %s: %s", tag, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
testVersion := DockerVersion{Version: *tmp, Tag: tag}
|
||||||
|
if testVersion.LessThan(currentVer) {
|
||||||
|
downgrades = append(downgrades, testVersion)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
testSegments := testVersion.Segments()
|
||||||
|
// lib always provides min 3 segments
|
||||||
|
if testSegments[0] == currentSegments[0] &&
|
||||||
|
testSegments[1] == currentSegments[1] {
|
||||||
|
patches = append(patches, testVersion)
|
||||||
|
} else {
|
||||||
|
upgrades = append(upgrades, testVersion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Slice(downgrades, func(i, j int) bool {
|
||||||
|
return downgrades[i].Version.LessThan(&downgrades[j].Version)
|
||||||
|
})
|
||||||
|
sort.Slice(patches, func(i, j int) bool {
|
||||||
|
return patches[i].Version.LessThan(&patches[j].Version)
|
||||||
|
})
|
||||||
|
sort.Slice(upgrades, func(i, j int) bool {
|
||||||
|
return upgrades[i].Version.LessThan(&upgrades[j].Version)
|
||||||
|
})
|
||||||
|
ret.Downgrades = downgrades
|
||||||
|
ret.Patches = patches
|
||||||
|
ret.Upgrades = upgrades
|
||||||
|
return ret, nil
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
package containerizedengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetEngineVersionsBadImage(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client := baseClient{}
|
||||||
|
|
||||||
|
currentVersion := "currentversiongoeshere"
|
||||||
|
imageName := "this is an illegal image $%^&"
|
||||||
|
_, err := client.GetEngineVersions(ctx, nil, currentVersion, imageName)
|
||||||
|
assert.ErrorContains(t, err, "invalid reference format")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseTagsSimple(t *testing.T) {
|
||||||
|
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
|
||||||
|
currentVersion := "1.1.0"
|
||||||
|
res, err := parseTags(tags, currentVersion)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
assert.Assert(t, err, "already present")
|
||||||
|
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1.0.0")
|
||||||
|
assert.Assert(t, len(res.Patches) == 2 && res.Patches[0].Tag == "1.1.1" && res.Patches[1].Tag == "1.1.2")
|
||||||
|
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseConfirmMinSegments(t *testing.T) {
|
||||||
|
tags := []string{"1", "1.1.1", "2"}
|
||||||
|
currentVersion := "1.1"
|
||||||
|
res, err := parseTags(tags, currentVersion)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
assert.Assert(t, err, "already present")
|
||||||
|
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1")
|
||||||
|
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
|
||||||
|
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseTagsFilterPrerelease(t *testing.T) {
|
||||||
|
tags := []string{"1.0.0", "1.1.1", "1.2.2", "1.1.0-beta1"}
|
||||||
|
currentVersion := "1.1.0"
|
||||||
|
res, err := parseTags(tags, currentVersion)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
assert.Assert(t, err, "already present")
|
||||||
|
assert.Assert(t, len(res.Downgrades) == 2 && res.Downgrades[0].Tag == "1.0.0")
|
||||||
|
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
|
||||||
|
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseTagsBadTag(t *testing.T) {
|
||||||
|
tags := []string{"1.0.0", "1.1.1", "1.2.2", "notasemanticversion"}
|
||||||
|
currentVersion := "1.1.0"
|
||||||
|
res, err := parseTags(tags, currentVersion)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
assert.Assert(t, err, "already present")
|
||||||
|
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1.0.0")
|
||||||
|
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
|
||||||
|
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseBadCurrent(t *testing.T) {
|
||||||
|
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
|
||||||
|
currentVersion := "notasemanticversion"
|
||||||
|
_, err := parseTags(tags, currentVersion)
|
||||||
|
assert.ErrorContains(t, err, "failed to parse existing")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseBadCurrent2(t *testing.T) {
|
||||||
|
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
|
||||||
|
currentVersion := ""
|
||||||
|
_, err := parseTags(tags, currentVersion)
|
||||||
|
assert.ErrorContains(t, err, "failed to parse existing")
|
||||||
|
}
|
|
@ -0,0 +1,104 @@
|
||||||
|
package licenseutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/docker/licensing"
|
||||||
|
"github.com/docker/licensing/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
fakeLicensingClient struct {
|
||||||
|
loginViaAuthFunc func(ctx context.Context, username, password string) (authToken string, err error)
|
||||||
|
getHubUserOrgsFunc func(ctx context.Context, authToken string) (orgs []model.Org, err error)
|
||||||
|
getHubUserByNameFunc func(ctx context.Context, username string) (user *model.User, err error)
|
||||||
|
verifyLicenseFunc func(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error)
|
||||||
|
generateNewTrialSubscriptionFunc func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error)
|
||||||
|
listSubscriptionsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error)
|
||||||
|
listSubscriptionsDetailsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error)
|
||||||
|
downloadLicenseFromHubFunc func(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error)
|
||||||
|
parseLicenseFunc func(license []byte) (parsedLicense *model.IssuedLicense, err error)
|
||||||
|
storeLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error
|
||||||
|
loadLocalLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) {
|
||||||
|
if c.loginViaAuthFunc != nil {
|
||||||
|
return c.loginViaAuthFunc(ctx, username, password)
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) {
|
||||||
|
if c.getHubUserOrgsFunc != nil {
|
||||||
|
return c.getHubUserOrgsFunc(ctx, authToken)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) {
|
||||||
|
if c.getHubUserByNameFunc != nil {
|
||||||
|
return c.getHubUserByNameFunc(ctx, username)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
|
||||||
|
if c.verifyLicenseFunc != nil {
|
||||||
|
return c.verifyLicenseFunc(ctx, license)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
|
||||||
|
if c.generateNewTrialSubscriptionFunc != nil {
|
||||||
|
return c.generateNewTrialSubscriptionFunc(ctx, authToken, dockerID, email)
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||||
|
if c.listSubscriptionsFunc != nil {
|
||||||
|
return c.listSubscriptionsFunc(ctx, authToken, dockerID)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) {
|
||||||
|
if c.listSubscriptionsDetailsFunc != nil {
|
||||||
|
return c.listSubscriptionsDetailsFunc(ctx, authToken, dockerID)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) {
|
||||||
|
if c.downloadLicenseFromHubFunc != nil {
|
||||||
|
return c.downloadLicenseFromHubFunc(ctx, authToken, subscriptionID)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) {
|
||||||
|
if c.parseLicenseFunc != nil {
|
||||||
|
return c.parseLicenseFunc(license)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error {
|
||||||
|
if c.storeLicenseFunc != nil {
|
||||||
|
return c.storeLicenseFunc(ctx, dclnt, licenses, localRootDir)
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fakeLicensingClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) {
|
||||||
|
|
||||||
|
if c.loadLocalLicenseFunc != nil {
|
||||||
|
return c.loadLocalLicenseFunc(ctx, dclnt)
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
package licenseutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/licensing/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// licensingDefaultBaseURI is the default license server base URL
|
||||||
|
licensingDefaultBaseURI = "https://store.docker.com"
|
||||||
|
|
||||||
|
// licensingPublicKey is the official public license key for store.docker.com
|
||||||
|
// nolint: lll
|
||||||
|
licensingPublicKey = "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0Ka2lkOiBKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVAoKTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUF5ZEl5K2xVN283UGNlWSs0K3MrQwpRNU9FZ0N5RjhDeEljUUlXdUs4NHBJaVpjaVk2NzMweUNZbndMU0tUbHcrVTZVQy9RUmVXUmlvTU5ORTVEczVUCllFWGJHRzZvbG0ycWRXYkJ3Y0NnKzJVVUgvT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTEKSXYrbXE3bHA1blEvUnhyVDBLWlJBUVRZTEVNRWZHd20zaE1PL2dlTFBTK2hnS1B0SUhsa2c2L1djb3hUR29LUAo3OWQvd2FIWXhHTmw3V2hTbmVpQlN4YnBiUUFLazIxbGc3OThYYjd2WnlFQVRETXJSUjlNZUU2QWRqNUhKcFkzCkNveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTyt3YTJLTjhVd2dJSW0waTVJMXVXOUdrcTQKempCeTV6aGdxdVVYYkc5YldQQU9ZcnE1UWE4MUR4R2NCbEp5SFlBcCtERFBFOVRHZzR6WW1YakpueFpxSEVkdQpHcWRldlo4WE1JMHVrZmtHSUkxNHdVT2lNSUlJclhsRWNCZi80Nkk4Z1FXRHp4eWNaZS9KR1grTEF1YXlYcnlyClVGZWhWTlVkWlVsOXdYTmFKQitrYUNxejVRd2FSOTNzR3crUVNmdEQwTnZMZTdDeU9IK0U2dmc2U3QvTmVUdmcKdjhZbmhDaVhJbFo4SE9mSXdOZTd0RUYvVWN6NU9iUHlrbTN0eWxyTlVqdDBWeUFtdHRhY1ZJMmlHaWhjVVBybQprNGxWSVo3VkQvTFNXK2k3eW9TdXJ0cHNQWGNlMnBLRElvMzBsSkdoTy8zS1VtbDJTVVpDcXpKMXlFbUtweXNICjVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVQ0F3RUFBUT09Ci0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo="
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
|
||||||
|
// LicenseDisplay stores license details for display
|
||||||
|
LicenseDisplay struct {
|
||||||
|
model.Subscription
|
||||||
|
Num int
|
||||||
|
Owner string
|
||||||
|
ComponentsString string
|
||||||
|
}
|
||||||
|
)
|
|
@ -0,0 +1,189 @@
|
||||||
|
package licenseutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/licensing"
|
||||||
|
"github.com/docker/licensing/model"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HubUser wraps a licensing client and holds key information
|
||||||
|
// for a user to avoid multiple lookups
|
||||||
|
type HubUser struct {
|
||||||
|
client licensing.Client
|
||||||
|
token string
|
||||||
|
User model.User
|
||||||
|
Orgs []model.Org
|
||||||
|
}
|
||||||
|
|
||||||
|
//GetOrgByID finds the org by the ID in the users list of orgs
|
||||||
|
func (u HubUser) GetOrgByID(orgID string) (model.Org, error) {
|
||||||
|
for _, org := range u.Orgs {
|
||||||
|
if org.ID == orgID {
|
||||||
|
return org, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return model.Org{}, fmt.Errorf("org %s not found", orgID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Login to the license server and return a client that can be used to look up and download license files or generate new trial licenses
|
||||||
|
func Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {
|
||||||
|
baseURI, err := url.Parse(licensingDefaultBaseURI)
|
||||||
|
if err != nil {
|
||||||
|
return HubUser{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lclient, err := licensing.New(&licensing.Config{
|
||||||
|
BaseURI: *baseURI,
|
||||||
|
HTTPClient: &http.Client{},
|
||||||
|
PublicKey: licensingPublicKey,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return HubUser{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// For licensing we know they must have a valid login session
|
||||||
|
if authConfig.Username == "" {
|
||||||
|
return HubUser{}, fmt.Errorf("you must be logged in to access licenses. Please use 'docker login' then try again")
|
||||||
|
}
|
||||||
|
token, err := lclient.LoginViaAuth(ctx, authConfig.Username, authConfig.Password)
|
||||||
|
if err != nil {
|
||||||
|
return HubUser{}, err
|
||||||
|
}
|
||||||
|
user, err := lclient.GetHubUserByName(ctx, authConfig.Username)
|
||||||
|
if err != nil {
|
||||||
|
return HubUser{}, err
|
||||||
|
}
|
||||||
|
orgs, err := lclient.GetHubUserOrgs(ctx, token)
|
||||||
|
if err != nil {
|
||||||
|
return HubUser{}, err
|
||||||
|
}
|
||||||
|
return HubUser{
|
||||||
|
client: lclient,
|
||||||
|
token: token,
|
||||||
|
User: *user,
|
||||||
|
Orgs: orgs,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAvailableLicenses finds all available licenses for a given account and their orgs
|
||||||
|
func (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, error) {
|
||||||
|
subs, err := u.client.ListSubscriptions(ctx, u.token, u.User.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, org := range u.Orgs {
|
||||||
|
orgSub, err := u.client.ListSubscriptions(ctx, u.token, org.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
subs = append(subs, orgSub...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert the SubscriptionDetails to a more user-friendly type to render in the CLI
|
||||||
|
|
||||||
|
res := []LicenseDisplay{}
|
||||||
|
|
||||||
|
// Filter out expired licenses
|
||||||
|
i := 0
|
||||||
|
for _, s := range subs {
|
||||||
|
if s.State != "expired" && s.Expires != nil {
|
||||||
|
owner := ""
|
||||||
|
if s.DockerID == u.User.ID {
|
||||||
|
owner = u.User.Username
|
||||||
|
} else {
|
||||||
|
ownerOrg, err := u.GetOrgByID(s.DockerID)
|
||||||
|
if err == nil {
|
||||||
|
owner = ownerOrg.Orgname
|
||||||
|
} else {
|
||||||
|
owner = "unknown"
|
||||||
|
logrus.Debugf("Unable to lookup org ID %s: %s", s.DockerID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
comps := []string{}
|
||||||
|
for _, pc := range s.PricingComponents {
|
||||||
|
comps = append(comps, fmt.Sprintf("%s:%d", pc.Name, pc.Value))
|
||||||
|
}
|
||||||
|
res = append(res, LicenseDisplay{
|
||||||
|
Subscription: *s,
|
||||||
|
Num: i,
|
||||||
|
Owner: owner,
|
||||||
|
ComponentsString: strings.Join(comps, ","),
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateTrialLicense will generate a new trial license for the specified user or org
|
||||||
|
func (u HubUser) GenerateTrialLicense(ctx context.Context, targetID string) (*model.IssuedLicense, error) {
|
||||||
|
subID, err := u.client.GenerateNewTrialSubscription(ctx, u.token, targetID, u.User.Email)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return u.client.DownloadLicenseFromHub(ctx, u.token, subID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIssuedLicense will download a license by ID
|
||||||
|
func (u HubUser) GetIssuedLicense(ctx context.Context, ID string) (*model.IssuedLicense, error) {
|
||||||
|
return u.client.DownloadLicenseFromHub(ctx, u.token, ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadLocalIssuedLicense will load a local license file
|
||||||
|
func LoadLocalIssuedLicense(ctx context.Context, filename string) (*model.IssuedLicense, error) {
|
||||||
|
baseURI, err := url.Parse(licensingDefaultBaseURI)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
lclient, err := licensing.New(&licensing.Config{
|
||||||
|
BaseURI: *baseURI,
|
||||||
|
HTTPClient: &http.Client{},
|
||||||
|
PublicKey: licensingPublicKey,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return doLoadLocalIssuedLicense(ctx, filename, lclient)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doLoadLocalIssuedLicense(ctx context.Context, filename string, lclient licensing.Client) (*model.IssuedLicense, error) {
|
||||||
|
var license model.IssuedLicense
|
||||||
|
data, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(data, &license)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "malformed license file")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = lclient.VerifyLicense(ctx, license)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &license, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyLicense will store a license on the local system
|
||||||
|
func ApplyLicense(ctx context.Context, dclient licensing.WrappedDockerClient, license *model.IssuedLicense) error {
|
||||||
|
info, err := dclient.Info(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return licensing.StoreLicense(ctx, dclient, license, info.DockerRootDir)
|
||||||
|
}
|
|
@ -0,0 +1,234 @@
|
||||||
|
package licenseutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/licensing/model"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoginNoAuth(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
_, err := Login(ctx, &types.AuthConfig{})
|
||||||
|
|
||||||
|
assert.ErrorContains(t, err, "must be logged in")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetOrgByID(t *testing.T) {
|
||||||
|
orgs := []model.Org{
|
||||||
|
{ID: "id1"},
|
||||||
|
{ID: "id2"},
|
||||||
|
}
|
||||||
|
u := HubUser{
|
||||||
|
Orgs: orgs,
|
||||||
|
}
|
||||||
|
o, err := u.GetOrgByID("id1")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, o.ID == "id1")
|
||||||
|
o, err = u.GetOrgByID("id2")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, o.ID == "id2")
|
||||||
|
o, err = u.GetOrgByID("id3")
|
||||||
|
assert.ErrorContains(t, err, "not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetAvailableLicensesListFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
user := HubUser{
|
||||||
|
client: &fakeLicensingClient{
|
||||||
|
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||||
|
return nil, fmt.Errorf("list subscriptions error")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err := user.GetAvailableLicenses(ctx)
|
||||||
|
assert.ErrorContains(t, err, "list subscriptions error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetAvailableLicensesOrgFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
user := HubUser{
|
||||||
|
Orgs: []model.Org{
|
||||||
|
{ID: "orgid"},
|
||||||
|
},
|
||||||
|
client: &fakeLicensingClient{
|
||||||
|
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||||
|
if dockerID == "orgid" {
|
||||||
|
return nil, fmt.Errorf("list subscriptions org error")
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err := user.GetAvailableLicenses(ctx)
|
||||||
|
assert.ErrorContains(t, err, "list subscriptions org error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetAvailableLicensesHappy(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
expiration := time.Now().Add(3600 * time.Second)
|
||||||
|
user := HubUser{
|
||||||
|
User: model.User{
|
||||||
|
ID: "userid",
|
||||||
|
Username: "username",
|
||||||
|
},
|
||||||
|
Orgs: []model.Org{
|
||||||
|
{
|
||||||
|
ID: "orgid",
|
||||||
|
Orgname: "orgname",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
client: &fakeLicensingClient{
|
||||||
|
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||||
|
if dockerID == "orgid" {
|
||||||
|
return []*model.Subscription{
|
||||||
|
{
|
||||||
|
State: "expired",
|
||||||
|
Expires: &expiration,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
State: "active",
|
||||||
|
DockerID: "orgid",
|
||||||
|
Expires: &expiration,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
State: "active",
|
||||||
|
DockerID: "invalidid",
|
||||||
|
Expires: &expiration,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
} else if dockerID == "userid" {
|
||||||
|
return []*model.Subscription{
|
||||||
|
{
|
||||||
|
State: "expired",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
State: "active",
|
||||||
|
DockerID: "userid",
|
||||||
|
Expires: &expiration,
|
||||||
|
PricingComponents: model.PricingComponents{
|
||||||
|
{
|
||||||
|
Name: "comp1",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "comp2",
|
||||||
|
Value: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
subs, err := user.GetAvailableLicenses(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, len(subs) == 3)
|
||||||
|
assert.Assert(t, subs[0].Owner == "username")
|
||||||
|
assert.Assert(t, subs[0].State == "active")
|
||||||
|
assert.Assert(t, subs[0].ComponentsString == "comp1:1,comp2:2")
|
||||||
|
assert.Assert(t, subs[1].Owner == "orgname")
|
||||||
|
assert.Assert(t, subs[1].State == "active")
|
||||||
|
assert.Assert(t, subs[2].Owner == "unknown")
|
||||||
|
assert.Assert(t, subs[2].State == "active")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateTrialFail(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
user := HubUser{
|
||||||
|
client: &fakeLicensingClient{
|
||||||
|
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
|
||||||
|
return "", fmt.Errorf("generate trial failure")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
targetID := "targetidgoeshere"
|
||||||
|
_, err := user.GenerateTrialLicense(ctx, targetID)
|
||||||
|
assert.ErrorContains(t, err, "generate trial failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateTrialHappy(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
user := HubUser{
|
||||||
|
client: &fakeLicensingClient{
|
||||||
|
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
|
||||||
|
return "subid", nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
targetID := "targetidgoeshere"
|
||||||
|
_, err := user.GenerateTrialLicense(ctx, targetID)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetIssuedLicense(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
user := HubUser{
|
||||||
|
client: &fakeLicensingClient{},
|
||||||
|
}
|
||||||
|
id := "idgoeshere"
|
||||||
|
_, err := user.GetIssuedLicense(ctx, id)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadLocalIssuedLicenseNotExist(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
tmpdir, err := ioutil.TempDir("", "licensing-test")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
defer os.RemoveAll(tmpdir)
|
||||||
|
filename := filepath.Join(tmpdir, "subscription.lic")
|
||||||
|
_, err = LoadLocalIssuedLicense(ctx, filename)
|
||||||
|
assert.ErrorContains(t, err, "no such file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadLocalIssuedLicenseNotJson(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
tmpdir, err := ioutil.TempDir("", "licensing-test")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
defer os.RemoveAll(tmpdir)
|
||||||
|
filename := filepath.Join(tmpdir, "subscription.lic")
|
||||||
|
err = ioutil.WriteFile(filename, []byte("not json"), 0644)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
_, err = LoadLocalIssuedLicense(ctx, filename)
|
||||||
|
assert.ErrorContains(t, err, "malformed license file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadLocalIssuedLicenseNoVerify(t *testing.T) {
|
||||||
|
lclient := &fakeLicensingClient{
|
||||||
|
verifyLicenseFunc: func(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
|
||||||
|
return nil, fmt.Errorf("verification failed")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
tmpdir, err := ioutil.TempDir("", "licensing-test")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
defer os.RemoveAll(tmpdir)
|
||||||
|
filename := filepath.Join(tmpdir, "subscription.lic")
|
||||||
|
err = ioutil.WriteFile(filename, []byte("{}"), 0644)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
_, err = doLoadLocalIssuedLicense(ctx, filename, lclient)
|
||||||
|
assert.ErrorContains(t, err, "verification failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadLocalIssuedLicenseHappy(t *testing.T) {
|
||||||
|
lclient := &fakeLicensingClient{}
|
||||||
|
ctx := context.Background()
|
||||||
|
tmpdir, err := ioutil.TempDir("", "licensing-test")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
defer os.RemoveAll(tmpdir)
|
||||||
|
filename := filepath.Join(tmpdir, "subscription.lic")
|
||||||
|
err = ioutil.WriteFile(filename, []byte("{}"), 0644)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
_, err = doLoadLocalIssuedLicense(ctx, filename, lclient)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
package containerized
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
|
"github.com/containerd/containerd/oci"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithAllCapabilities enables all capabilities required to run privileged containers
|
||||||
|
func WithAllCapabilities(_ context.Context, _ oci.Client, c *containers.Container, s *specs.Spec) error {
|
||||||
|
caps := []string{
|
||||||
|
"CAP_CHOWN",
|
||||||
|
"CAP_DAC_OVERRIDE",
|
||||||
|
"CAP_DAC_READ_SEARCH",
|
||||||
|
"CAP_FOWNER",
|
||||||
|
"CAP_FSETID",
|
||||||
|
"CAP_KILL",
|
||||||
|
"CAP_SETGID",
|
||||||
|
"CAP_SETUID",
|
||||||
|
"CAP_SETPCAP",
|
||||||
|
"CAP_LINUX_IMMUTABLE",
|
||||||
|
"CAP_NET_BIND_SERVICE",
|
||||||
|
"CAP_NET_BROADCAST",
|
||||||
|
"CAP_NET_ADMIN",
|
||||||
|
"CAP_NET_RAW",
|
||||||
|
"CAP_IPC_LOCK",
|
||||||
|
"CAP_IPC_OWNER",
|
||||||
|
"CAP_SYS_MODULE",
|
||||||
|
"CAP_SYS_RAWIO",
|
||||||
|
"CAP_SYS_CHROOT",
|
||||||
|
"CAP_SYS_PTRACE",
|
||||||
|
"CAP_SYS_PACCT",
|
||||||
|
"CAP_SYS_ADMIN",
|
||||||
|
"CAP_SYS_BOOT",
|
||||||
|
"CAP_SYS_NICE",
|
||||||
|
"CAP_SYS_RESOURCE",
|
||||||
|
"CAP_SYS_TIME",
|
||||||
|
"CAP_SYS_TTY_CONFIG",
|
||||||
|
"CAP_MKNOD",
|
||||||
|
"CAP_LEASE",
|
||||||
|
"CAP_AUDIT_WRITE",
|
||||||
|
"CAP_AUDIT_CONTROL",
|
||||||
|
"CAP_SETFCAP",
|
||||||
|
"CAP_MAC_OVERRIDE",
|
||||||
|
"CAP_MAC_ADMIN",
|
||||||
|
"CAP_SYSLOG",
|
||||||
|
"CAP_WAKE_ALARM",
|
||||||
|
"CAP_BLOCK_SUSPEND",
|
||||||
|
"CAP_AUDIT_READ",
|
||||||
|
}
|
||||||
|
if s.Process.Capabilities == nil {
|
||||||
|
s.Process.Capabilities = &specs.LinuxCapabilities{}
|
||||||
|
}
|
||||||
|
s.Process.Capabilities.Bounding = caps
|
||||||
|
s.Process.Capabilities.Effective = caps
|
||||||
|
s.Process.Capabilities.Inheritable = caps
|
||||||
|
s.Process.Capabilities.Permitted = caps
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
package containerized
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithAllCapabilities(t *testing.T) {
|
||||||
|
c := &containers.Container{}
|
||||||
|
s := &specs.Spec{
|
||||||
|
Process: &specs.Process{},
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
err := WithAllCapabilities(ctx, nil, c, s)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, len(s.Process.Capabilities.Bounding) > 0)
|
||||||
|
}
|
|
@ -0,0 +1,74 @@
|
||||||
|
package containerized
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AtomicImageUpdate will perform an update of the given container with the new image
|
||||||
|
// and verify success via the provided healthcheckFn. If the healthcheck fails, the
|
||||||
|
// container will be reverted to the prior image
|
||||||
|
func AtomicImageUpdate(ctx context.Context, container containerd.Container, image containerd.Image, healthcheckFn func() error) error {
|
||||||
|
updateCompleted := false
|
||||||
|
err := pauseAndRun(ctx, container, func() error {
|
||||||
|
if err := container.Update(ctx, WithUpgrade(image)); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to update to new image")
|
||||||
|
}
|
||||||
|
updateCompleted = true
|
||||||
|
task, err := container.Task(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "failed to lookup task")
|
||||||
|
}
|
||||||
|
return task.Kill(ctx, sigTERM)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if updateCompleted {
|
||||||
|
logrus.WithError(err).Error("failed to update, rolling back")
|
||||||
|
return rollBack(ctx, container)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := healthcheckFn(); err != nil {
|
||||||
|
logrus.WithError(err).Error("failed health check, rolling back")
|
||||||
|
return rollBack(ctx, container)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func rollBack(ctx context.Context, container containerd.Container) error {
|
||||||
|
return pauseAndRun(ctx, container, func() error {
|
||||||
|
if err := container.Update(ctx, WithRollback); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
task, err := container.Task(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "failed to lookup task")
|
||||||
|
}
|
||||||
|
return task.Kill(ctx, sigTERM)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func pauseAndRun(ctx context.Context, container containerd.Container, fn func() error) error {
|
||||||
|
task, err := container.Task(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
return fn()
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "failed to lookup task")
|
||||||
|
}
|
||||||
|
if err := task.Pause(ctx); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to pause task")
|
||||||
|
}
|
||||||
|
defer task.Resume(ctx)
|
||||||
|
return fn()
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package containerized
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// sigTERM maps to unix.SIGTERM
|
||||||
|
sigTERM = unix.SIGTERM
|
||||||
|
)
|
|
@ -0,0 +1,12 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package containerized
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// sigTERM all signals are ignored by containerd kill windows
|
||||||
|
sigTERM = syscall.Signal(0)
|
||||||
|
)
|
|
@ -0,0 +1,158 @@
|
||||||
|
package containerized
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd"
|
||||||
|
"github.com/containerd/containerd/containers"
|
||||||
|
"github.com/containerd/containerd/diff/apply"
|
||||||
|
"github.com/containerd/containerd/mount"
|
||||||
|
"github.com/containerd/containerd/rootfs"
|
||||||
|
"github.com/containerd/containerd/snapshots"
|
||||||
|
"github.com/opencontainers/image-spec/identity"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
gcRoot = "containerd.io/gc.root"
|
||||||
|
timestampFormat = "01-02-2006-15:04:05"
|
||||||
|
previousRevision = "docker.com/revision.previous"
|
||||||
|
imageLabel = "docker.com/revision.image"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNoPreviousRevision returned if the container has to previous revision
|
||||||
|
var ErrNoPreviousRevision = errors.New("no previous revision")
|
||||||
|
|
||||||
|
// WithNewSnapshot creates a new snapshot managed by containerized
|
||||||
|
func WithNewSnapshot(i containerd.Image) containerd.NewContainerOpts {
|
||||||
|
return func(ctx context.Context, client *containerd.Client, c *containers.Container) error {
|
||||||
|
if c.Snapshotter == "" {
|
||||||
|
c.Snapshotter = containerd.DefaultSnapshotter
|
||||||
|
}
|
||||||
|
r, err := create(ctx, client, i, c.ID, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.SnapshotKey = r.Key
|
||||||
|
c.Image = i.Name()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUpgrade upgrades an existing container's image to a new one
|
||||||
|
func WithUpgrade(i containerd.Image) containerd.UpdateContainerOpts {
|
||||||
|
return func(ctx context.Context, client *containerd.Client, c *containers.Container) error {
|
||||||
|
revision, err := save(ctx, client, i, c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.Image = i.Name()
|
||||||
|
c.SnapshotKey = revision.Key
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRollback rolls back to the previous container's revision
|
||||||
|
func WithRollback(ctx context.Context, client *containerd.Client, c *containers.Container) error {
|
||||||
|
prev, err := previous(ctx, client, c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ss := client.SnapshotService(c.Snapshotter)
|
||||||
|
sInfo, err := ss.Stat(ctx, prev.Key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
snapshotImage, ok := sInfo.Labels[imageLabel]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("snapshot %s does not have a service image label", prev.Key)
|
||||||
|
}
|
||||||
|
if snapshotImage == "" {
|
||||||
|
return fmt.Errorf("snapshot %s has an empty service image label", prev.Key)
|
||||||
|
}
|
||||||
|
c.Image = snapshotImage
|
||||||
|
c.SnapshotKey = prev.Key
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRevision(id string) *revision {
|
||||||
|
now := time.Now()
|
||||||
|
return &revision{
|
||||||
|
Timestamp: now,
|
||||||
|
Key: fmt.Sprintf("boss.io.%s.%s", id, now.Format(timestampFormat)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type revision struct {
|
||||||
|
Timestamp time.Time
|
||||||
|
Key string
|
||||||
|
mounts []mount.Mount
|
||||||
|
}
|
||||||
|
|
||||||
|
// nolint: interfacer
|
||||||
|
func create(ctx context.Context, client *containerd.Client, i containerd.Image, id string, previous string) (*revision, error) {
|
||||||
|
diffIDs, err := i.RootFS(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
parent = identity.ChainID(diffIDs).String()
|
||||||
|
r = newRevision(id)
|
||||||
|
)
|
||||||
|
labels := map[string]string{
|
||||||
|
gcRoot: r.Timestamp.Format(time.RFC3339),
|
||||||
|
imageLabel: i.Name(),
|
||||||
|
}
|
||||||
|
if previous != "" {
|
||||||
|
labels[previousRevision] = previous
|
||||||
|
}
|
||||||
|
mounts, err := client.SnapshotService(containerd.DefaultSnapshotter).Prepare(ctx, r.Key, parent, snapshots.WithLabels(labels))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.mounts = mounts
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func save(ctx context.Context, client *containerd.Client, updatedImage containerd.Image, c *containers.Container) (*revision, error) {
|
||||||
|
snapshot, err := create(ctx, client, updatedImage, c.ID, c.SnapshotKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
service := client.SnapshotService(c.Snapshotter)
|
||||||
|
// create a diff from the existing snapshot
|
||||||
|
diff, err := rootfs.CreateDiff(ctx, c.SnapshotKey, service, client.DiffService())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
applier := apply.NewFileSystemApplier(client.ContentStore())
|
||||||
|
if _, err := applier.Apply(ctx, diff, snapshot.mounts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return snapshot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// nolint: interfacer
|
||||||
|
func previous(ctx context.Context, client *containerd.Client, c *containers.Container) (*revision, error) {
|
||||||
|
service := client.SnapshotService(c.Snapshotter)
|
||||||
|
sInfo, err := service.Stat(ctx, c.SnapshotKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
key := sInfo.Labels[previousRevision]
|
||||||
|
if key == "" {
|
||||||
|
return nil, ErrNoPreviousRevision
|
||||||
|
}
|
||||||
|
parts := strings.Split(key, ".")
|
||||||
|
timestamp, err := time.Parse(timestampFormat, parts[len(parts)-1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &revision{
|
||||||
|
Timestamp: timestamp,
|
||||||
|
Key: key,
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -12,6 +12,7 @@ import (
|
||||||
manifeststore "github.com/docker/cli/cli/manifest/store"
|
manifeststore "github.com/docker/cli/cli/manifest/store"
|
||||||
registryclient "github.com/docker/cli/cli/registry/client"
|
registryclient "github.com/docker/cli/cli/registry/client"
|
||||||
"github.com/docker/cli/cli/trust"
|
"github.com/docker/cli/cli/trust"
|
||||||
|
"github.com/docker/cli/internal/containerizedengine"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
notaryclient "github.com/theupdateframework/notary/client"
|
notaryclient "github.com/theupdateframework/notary/client"
|
||||||
)
|
)
|
||||||
|
@ -19,22 +20,24 @@ import (
|
||||||
// NotaryClientFuncType defines a function that returns a fake notary client
|
// NotaryClientFuncType defines a function that returns a fake notary client
|
||||||
type NotaryClientFuncType func(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
type NotaryClientFuncType func(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
||||||
type clientInfoFuncType func() command.ClientInfo
|
type clientInfoFuncType func() command.ClientInfo
|
||||||
|
type containerizedEngineFuncType func(string) (containerizedengine.Client, error)
|
||||||
|
|
||||||
// FakeCli emulates the default DockerCli
|
// FakeCli emulates the default DockerCli
|
||||||
type FakeCli struct {
|
type FakeCli struct {
|
||||||
command.DockerCli
|
command.DockerCli
|
||||||
client client.APIClient
|
client client.APIClient
|
||||||
configfile *configfile.ConfigFile
|
configfile *configfile.ConfigFile
|
||||||
out *command.OutStream
|
out *command.OutStream
|
||||||
outBuffer *bytes.Buffer
|
outBuffer *bytes.Buffer
|
||||||
err *bytes.Buffer
|
err *bytes.Buffer
|
||||||
in *command.InStream
|
in *command.InStream
|
||||||
server command.ServerInfo
|
server command.ServerInfo
|
||||||
clientInfoFunc clientInfoFuncType
|
clientInfoFunc clientInfoFuncType
|
||||||
notaryClientFunc NotaryClientFuncType
|
notaryClientFunc NotaryClientFuncType
|
||||||
manifestStore manifeststore.Store
|
manifestStore manifeststore.Store
|
||||||
registryClient registryclient.RegistryClient
|
registryClient registryclient.RegistryClient
|
||||||
contentTrust bool
|
contentTrust bool
|
||||||
|
containerizedEngineClientFunc containerizedEngineFuncType
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFakeCli returns a fake for the command.Cli interface
|
// NewFakeCli returns a fake for the command.Cli interface
|
||||||
|
@ -167,3 +170,16 @@ func (c *FakeCli) ContentTrustEnabled() bool {
|
||||||
func EnableContentTrust(c *FakeCli) {
|
func EnableContentTrust(c *FakeCli) {
|
||||||
c.contentTrust = true
|
c.contentTrust = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewContainerizedEngineClient returns a containerized engine client
|
||||||
|
func (c *FakeCli) NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error) {
|
||||||
|
if c.containerizedEngineClientFunc != nil {
|
||||||
|
return c.containerizedEngineClientFunc(sockPath)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("no containerized engine client available unless defined")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetContainerizedEngineClient on the fake cli
|
||||||
|
func (c *FakeCli) SetContainerizedEngineClient(containerizedEngineClientFunc containerizedEngineFuncType) {
|
||||||
|
c.containerizedEngineClientFunc = containerizedEngineClientFunc
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
# TODO fetch images?
|
||||||
|
./scripts/test/engine/wrapper
|
|
@ -0,0 +1,107 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Run engine specific integration tests against the latest containerd-in-docker
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
function container_ip {
|
||||||
|
local cid=$1
|
||||||
|
local network=$2
|
||||||
|
docker inspect \
|
||||||
|
-f "{{.NetworkSettings.Networks.${network}.IPAddress}}" "$cid"
|
||||||
|
}
|
||||||
|
|
||||||
|
function fetch_images {
|
||||||
|
## TODO - not yet implemented
|
||||||
|
./scripts/test/engine/load-image fetch-only
|
||||||
|
}
|
||||||
|
|
||||||
|
function setup {
|
||||||
|
### start containerd and log to a file
|
||||||
|
echo "Starting containerd in the background"
|
||||||
|
containerd 2&> /tmp/containerd.err &
|
||||||
|
echo "Waiting for containerd to be responsive"
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
for i in $(seq 1 60); do
|
||||||
|
if ctr namespace ls > /dev/null; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
ctr namespace ls > /dev/null
|
||||||
|
echo "containerd is ready"
|
||||||
|
|
||||||
|
# TODO Once https://github.com/moby/moby/pull/33355 or equivalent
|
||||||
|
# is merged, then this can be optimized to preload the image
|
||||||
|
# saved during the build phase
|
||||||
|
}
|
||||||
|
|
||||||
|
function cleanup {
|
||||||
|
#### if testexit is non-zero dump the containerd logs with a banner
|
||||||
|
if [ "${testexit}" -ne 0 ] ; then
|
||||||
|
echo "FAIL: dumping containerd logs"
|
||||||
|
echo ""
|
||||||
|
cat /tmp/containerd.err
|
||||||
|
if [ -f /var/log/engine.log ] ; then
|
||||||
|
echo ""
|
||||||
|
echo "FAIL: dumping engine log"
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "FAIL: engine log missing"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
echo "FAIL: remaining namespaces"
|
||||||
|
ctr namespace ls || /bin/tru
|
||||||
|
echo "FAIL: remaining containers"
|
||||||
|
ctr --namespace docker container ls || /bin/tru
|
||||||
|
echo "FAIL: remaining tasks"
|
||||||
|
ctr --namespace docker task ls || /bin/tru
|
||||||
|
echo "FAIL: remaining snapshots"
|
||||||
|
ctr --namespace docker snapshots ls || /bin/tru
|
||||||
|
echo "FAIL: remaining images"
|
||||||
|
ctr --namespace docker image ls || /bin/tru
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function runtests {
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
env -i \
|
||||||
|
GOPATH="$GOPATH" \
|
||||||
|
PATH="$PWD/build/:${PATH}" \
|
||||||
|
VERSION=${VERSION} \
|
||||||
|
"$(which go)" test -p 1 -parallel 1 -v ./e2eengine/... ${TESTFLAGS-}
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd=${1-}
|
||||||
|
|
||||||
|
case "$cmd" in
|
||||||
|
setup)
|
||||||
|
setup
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
cleanup)
|
||||||
|
cleanup
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
fetch-images)
|
||||||
|
fetch_images
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
test)
|
||||||
|
runtests
|
||||||
|
;;
|
||||||
|
run|"")
|
||||||
|
testexit=0
|
||||||
|
runtests || testexit=$?
|
||||||
|
cleanup
|
||||||
|
exit $testexit
|
||||||
|
;;
|
||||||
|
shell)
|
||||||
|
$SHELL
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown command: $cmd"
|
||||||
|
echo "Usage: "
|
||||||
|
echo " $0 [setup | cleanup | test | run]"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
|
@ -0,0 +1,18 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Setup, run and teardown engine test suite in containers.
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
./scripts/test/engine/run setup
|
||||||
|
|
||||||
|
testexit=0
|
||||||
|
|
||||||
|
test_cmd="test"
|
||||||
|
if [[ -n "${TEST_DEBUG-}" ]]; then
|
||||||
|
test_cmd="shell"
|
||||||
|
fi
|
||||||
|
|
||||||
|
./scripts/test/engine/run "$test_cmd" || testexit="$?"
|
||||||
|
|
||||||
|
export testexit
|
||||||
|
./scripts/test/engine/run cleanup
|
||||||
|
exit "$testexit"
|
|
@ -5,6 +5,7 @@ set -eu -o pipefail
|
||||||
# reduces the runtime from 200s down to 23s
|
# reduces the runtime from 200s down to 23s
|
||||||
go test -i "$@"
|
go test -i "$@"
|
||||||
|
|
||||||
|
echo "mode: atomic" > coverage.txt
|
||||||
for pkg in "$@"; do
|
for pkg in "$@"; do
|
||||||
./scripts/test/unit \
|
./scripts/test/unit \
|
||||||
-cover \
|
-cover \
|
||||||
|
@ -13,7 +14,7 @@ for pkg in "$@"; do
|
||||||
"${pkg}"
|
"${pkg}"
|
||||||
|
|
||||||
if test -f profile.out; then
|
if test -f profile.out; then
|
||||||
cat profile.out >> coverage.txt
|
grep -v "^mode:" < profile.out >> coverage.txt || true
|
||||||
rm profile.out
|
rm profile.out
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
Loading…
Reference in New Issue