Merge pull request #1260 from dhiltgen/ce_q3

Add CLI support for running dockerd in a container on containerd
This commit is contained in:
Michael Crosby 2018-08-20 13:39:49 -04:00 committed by GitHub
commit 0c444c521f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
408 changed files with 83936 additions and 76 deletions

View File

@ -12,14 +12,14 @@ clean: ## remove build artifacts
.PHONY: test-unit
test-unit: ## run unit test
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/')
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
.PHONY: test
test: test-unit ## run tests
.PHONY: test-coverage
test-coverage: ## run test coverage
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/')
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
.PHONY: lint
lint: ## run all the lint tools

View File

@ -19,6 +19,7 @@ import (
manifeststore "github.com/docker/cli/cli/manifest/store"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/cli/trust"
"github.com/docker/cli/internal/containerizedengine"
dopts "github.com/docker/cli/opts"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
@ -54,6 +55,7 @@ type Cli interface {
ManifestStore() manifeststore.Store
RegistryClient(bool) registryclient.RegistryClient
ContentTrustEnabled() bool
NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error)
}
// DockerCli is an instance the docker command line client.
@ -229,6 +231,11 @@ func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
}
// NewContainerizedEngineClient returns a containerized engine client
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error) {
return containerizedengine.NewClient(sockPath)
}
// ServerInfo stores details about the supported features and platform of the
// server
type ServerInfo struct {

View File

@ -8,6 +8,7 @@ import (
"github.com/docker/cli/cli/command/checkpoint"
"github.com/docker/cli/cli/command/config"
"github.com/docker/cli/cli/command/container"
"github.com/docker/cli/cli/command/engine"
"github.com/docker/cli/cli/command/image"
"github.com/docker/cli/cli/command/manifest"
"github.com/docker/cli/cli/command/network"
@ -84,6 +85,9 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
// volume
volume.NewVolumeCommand(dockerCli),
// engine
engine.NewEngineCommand(dockerCli),
// legacy commands may be hidden
hide(system.NewEventsCommand(dockerCli)),
hide(system.NewInfoCommand(dockerCli)),

View File

@ -0,0 +1,181 @@
package engine
import (
"context"
"fmt"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/internal/containerizedengine"
"github.com/docker/cli/internal/licenseutils"
"github.com/docker/docker/api/types"
"github.com/docker/licensing/model"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type activateOptions struct {
licenseFile string
version string
registryPrefix string
format string
image string
quiet bool
displayOnly bool
sockPath string
}
// newActivateCommand creates a new `docker engine activate` command
func newActivateCommand(dockerCli command.Cli) *cobra.Command {
var options activateOptions
cmd := &cobra.Command{
Use: "activate [OPTIONS]",
Short: "Activate Enterprise Edition",
Long: `Activate Enterprise Edition.
With this command you may apply an existing Docker enterprise license, or
interactively download one from Docker. In the interactive exchange, you can
sign up for a new trial, or download an existing license. If you are
currently running a Community Edition engine, the daemon will be updated to
the Enterprise Edition Docker engine with additional capabilities and long
term support.
For more information about different Docker Enterprise license types visit
https://www.docker.com/licenses
For non-interactive scriptable deployments, download your license from
https://hub.docker.com/ then specify the file with the '--license' flag.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runActivate(dockerCli, options)
},
}
flags := cmd.Flags()
flags.StringVar(&options.licenseFile, "license", "", "License File")
flags.StringVar(&options.version, "version", "", "Specify engine version (default is to use currently running version)")
flags.StringVar(&options.registryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
flags.StringVar(&options.image, "engine-image", containerizedengine.EnterpriseEngineImage, "Specify engine image")
flags.StringVar(&options.format, "format", "", "Pretty-print licenses using a Go template")
flags.BoolVar(&options.displayOnly, "display-only", false, "only display the available licenses and exit")
flags.BoolVar(&options.quiet, "quiet", false, "Only display available licenses by ID")
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
return cmd
}
func runActivate(cli command.Cli, options activateOptions) error {
ctx := context.Background()
client, err := cli.NewContainerizedEngineClient(options.sockPath)
if err != nil {
return errors.Wrap(err, "unable to access local containerd")
}
defer client.Close()
authConfig, err := getRegistryAuth(cli, options.registryPrefix)
if err != nil {
return err
}
var license *model.IssuedLicense
// Lookup on hub if no license provided via params
if options.licenseFile == "" {
if license, err = getLicenses(ctx, authConfig, cli, options); err != nil {
return err
}
if options.displayOnly {
return nil
}
} else {
if license, err = licenseutils.LoadLocalIssuedLicense(ctx, options.licenseFile); err != nil {
return err
}
}
if err = licenseutils.ApplyLicense(ctx, cli.Client(), license); err != nil {
return err
}
opts := containerizedengine.EngineInitOptions{
RegistryPrefix: options.registryPrefix,
EngineImage: options.image,
EngineVersion: options.version,
}
return client.ActivateEngine(ctx, opts, cli.Out(), authConfig,
func(ctx context.Context) error {
client := cli.Client()
_, err := client.Ping(ctx)
return err
})
}
func getLicenses(ctx context.Context, authConfig *types.AuthConfig, cli command.Cli, options activateOptions) (*model.IssuedLicense, error) {
user, err := licenseutils.Login(ctx, authConfig)
if err != nil {
return nil, err
}
fmt.Fprintf(cli.Out(), "Looking for existing licenses for %s...\n", user.User.Username)
subs, err := user.GetAvailableLicenses(ctx)
if err != nil {
return nil, err
}
if len(subs) == 0 {
return doTrialFlow(ctx, cli, user)
}
format := options.format
if len(format) == 0 {
format = formatter.TableFormatKey
}
updatesCtx := formatter.Context{
Output: cli.Out(),
Format: formatter.NewSubscriptionsFormat(format, options.quiet),
Trunc: false,
}
if err := formatter.SubscriptionsWrite(updatesCtx, subs); err != nil {
return nil, err
}
if options.displayOnly {
return nil, nil
}
fmt.Fprintf(cli.Out(), "Please pick a license by number: ")
var num int
if _, err := fmt.Fscan(cli.In(), &num); err != nil {
return nil, errors.Wrap(err, "failed to read user input")
}
if num < 0 || num >= len(subs) {
return nil, fmt.Errorf("invalid choice")
}
return user.GetIssuedLicense(ctx, subs[num].ID)
}
func doTrialFlow(ctx context.Context, cli command.Cli, user licenseutils.HubUser) (*model.IssuedLicense, error) {
if !command.PromptForConfirmation(cli.In(), cli.Out(),
"No existing licenses found, would you like to set up a new Enterprise Basic Trial license?") {
return nil, fmt.Errorf("you must have an existing enterprise license or generate a new trial to use the Enterprise Docker Engine")
}
targetID := user.User.ID
// If the user is a member of any organizations, allow trials generated against them
if len(user.Orgs) > 0 {
fmt.Fprintf(cli.Out(), "%d\t%s\n", 0, user.User.Username)
for i, org := range user.Orgs {
fmt.Fprintf(cli.Out(), "%d\t%s\n", i+1, org.Orgname)
}
fmt.Fprintf(cli.Out(), "Please choose an account to generate the trial in:")
var num int
if _, err := fmt.Fscan(cli.In(), &num); err != nil {
return nil, errors.Wrap(err, "failed to read user input")
}
if num < 0 || num > len(user.Orgs) {
return nil, fmt.Errorf("invalid choice")
}
if num > 0 {
targetID = user.Orgs[num-1].ID
}
}
return user.GenerateTrialLicense(ctx, targetID)
}

View File

@ -0,0 +1,37 @@
package engine
import (
"fmt"
"testing"
"github.com/docker/cli/internal/containerizedengine"
"gotest.tools/assert"
)
func TestActivateNoContainerd(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return nil, fmt.Errorf("some error")
},
)
cmd := newActivateCommand(testCli)
cmd.Flags().Set("license", "invalidpath")
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.ErrorContains(t, err, "unable to access local containerd")
}
func TestActivateBadLicense(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return &fakeContainerizedEngineClient{}, nil
},
)
cmd := newActivateCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
cmd.Flags().Set("license", "invalidpath")
err := cmd.Execute()
assert.Error(t, err, "open invalidpath: no such file or directory")
}

View File

@ -0,0 +1,33 @@
package engine
import (
"context"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/trust"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/pkg/errors"
)
func getRegistryAuth(cli command.Cli, registryPrefix string) (*types.AuthConfig, error) {
if registryPrefix == "" {
registryPrefix = "docker.io/docker"
}
distributionRef, err := reference.ParseNormalizedNamed(registryPrefix)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse image name: %s", registryPrefix)
}
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(context.Background(), nil, authResolver(cli), distributionRef.String())
if err != nil {
return nil, errors.Wrap(err, "failed to get imgRefAndAuth")
}
return imgRefAndAuth.AuthConfig(), nil
}
func authResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
return command.ResolveAuthConfig(ctx, cli, index)
}
}

133
cli/command/engine/check.go Normal file
View File

@ -0,0 +1,133 @@
package engine
import (
"context"
"fmt"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/internal/containerizedengine"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
const (
releaseNotePrefix = "https://docs.docker.com/releasenotes"
)
type checkOptions struct {
registryPrefix string
preReleases bool
downgrades bool
upgrades bool
format string
quiet bool
sockPath string
}
func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
var options checkOptions
cmd := &cobra.Command{
Use: "check [OPTIONS]",
Short: "Check for available engine updates",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runCheck(dockerCli, options)
},
}
flags := cmd.Flags()
flags.StringVar(&options.registryPrefix, "registry-prefix", "", "Override the existing location where engine images are pulled")
flags.BoolVar(&options.downgrades, "downgrades", false, "Report downgrades (default omits older versions)")
flags.BoolVar(&options.preReleases, "pre-releases", false, "Include pre-release versions")
flags.BoolVar(&options.upgrades, "upgrades", true, "Report available upgrades")
flags.StringVar(&options.format, "format", "", "Pretty-print updates using a Go template")
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display available versions")
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
return cmd
}
func runCheck(dockerCli command.Cli, options checkOptions) error {
ctx := context.Background()
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
if err != nil {
return errors.Wrap(err, "unable to access local containerd")
}
defer client.Close()
currentOpts, err := client.GetCurrentEngineVersion(ctx)
if err != nil {
return err
}
// override with user provided prefix if specified
if options.registryPrefix != "" {
currentOpts.RegistryPrefix = options.registryPrefix
}
imageName := currentOpts.RegistryPrefix + "/" + currentOpts.EngineImage
currentVersion := currentOpts.EngineVersion
versions, err := client.GetEngineVersions(ctx, dockerCli.RegistryClient(false), currentVersion, imageName)
if err != nil {
return err
}
availUpdates := []containerizedengine.Update{
{Type: "current", Version: currentVersion},
}
if len(versions.Patches) > 0 {
availUpdates = append(availUpdates,
processVersions(
currentVersion,
"patch",
options.preReleases,
versions.Patches)...)
}
if options.upgrades {
availUpdates = append(availUpdates,
processVersions(
currentVersion,
"upgrade",
options.preReleases,
versions.Upgrades)...)
}
if options.downgrades {
availUpdates = append(availUpdates,
processVersions(
currentVersion,
"downgrade",
options.preReleases,
versions.Downgrades)...)
}
format := options.format
if len(format) == 0 {
format = formatter.TableFormatKey
}
updatesCtx := formatter.Context{
Output: dockerCli.Out(),
Format: formatter.NewUpdatesFormat(format, options.quiet),
Trunc: false,
}
return formatter.UpdatesWrite(updatesCtx, availUpdates)
}
func processVersions(currentVersion, verType string,
includePrerelease bool,
versions []containerizedengine.DockerVersion) []containerizedengine.Update {
availUpdates := []containerizedengine.Update{}
for _, ver := range versions {
if !includePrerelease && ver.Prerelease() != "" {
continue
}
if ver.Tag != currentVersion {
availUpdates = append(availUpdates, containerizedengine.Update{
Type: verType,
Version: ver.Tag,
Notes: fmt.Sprintf("%s/%s", releaseNotePrefix, ver.Tag),
})
}
}
return availUpdates
}

View File

@ -0,0 +1,143 @@
package engine
import (
"context"
"fmt"
"testing"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/internal/containerizedengine"
"github.com/docker/cli/internal/test"
"github.com/docker/docker/client"
ver "github.com/hashicorp/go-version"
"gotest.tools/assert"
"gotest.tools/golden"
)
var (
testCli = test.NewFakeCli(&client.Client{})
)
func TestCheckForUpdatesNoContainerd(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return nil, fmt.Errorf("some error")
},
)
cmd := newCheckForUpdatesCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.ErrorContains(t, err, "unable to access local containerd")
}
func TestCheckForUpdatesNoCurrentVersion(t *testing.T) {
retErr := fmt.Errorf("some failure")
getCurrentEngineVersionFunc := func(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
return containerizedengine.EngineInitOptions{}, retErr
}
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return &fakeContainerizedEngineClient{
getCurrentEngineVersionFunc: getCurrentEngineVersionFunc,
}, nil
},
)
cmd := newCheckForUpdatesCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.Assert(t, err == retErr)
}
func TestCheckForUpdatesGetEngineVersionsFail(t *testing.T) {
retErr := fmt.Errorf("some failure")
getEngineVersionsFunc := func(ctx context.Context,
registryClient registryclient.RegistryClient,
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
return containerizedengine.AvailableVersions{}, retErr
}
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return &fakeContainerizedEngineClient{
getEngineVersionsFunc: getEngineVersionsFunc,
}, nil
},
)
cmd := newCheckForUpdatesCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.Assert(t, err == retErr)
}
func TestCheckForUpdatesGetEngineVersionsHappy(t *testing.T) {
getCurrentEngineVersionFunc := func(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
return containerizedengine.EngineInitOptions{
EngineImage: "current engine",
EngineVersion: "1.1.0",
}, nil
}
getEngineVersionsFunc := func(ctx context.Context,
registryClient registryclient.RegistryClient,
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
return containerizedengine.AvailableVersions{
Downgrades: parseVersions(t, "1.0.1", "1.0.2", "1.0.3-beta1"),
Patches: parseVersions(t, "1.1.1", "1.1.2", "1.1.3-beta1"),
Upgrades: parseVersions(t, "1.2.0", "2.0.0", "2.1.0-beta1"),
}, nil
}
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return &fakeContainerizedEngineClient{
getEngineVersionsFunc: getEngineVersionsFunc,
getCurrentEngineVersionFunc: getCurrentEngineVersionFunc,
}, nil
},
)
cmd := newCheckForUpdatesCommand(testCli)
cmd.Flags().Set("pre-releases", "true")
cmd.Flags().Set("downgrades", "true")
err := cmd.Execute()
assert.NilError(t, err)
golden.Assert(t, testCli.OutBuffer().String(), "check-all.golden")
testCli.OutBuffer().Reset()
cmd.Flags().Set("pre-releases", "false")
cmd.Flags().Set("downgrades", "true")
err = cmd.Execute()
assert.NilError(t, err)
fmt.Println(testCli.OutBuffer().String())
golden.Assert(t, testCli.OutBuffer().String(), "check-no-prerelease.golden")
testCli.OutBuffer().Reset()
cmd.Flags().Set("pre-releases", "false")
cmd.Flags().Set("downgrades", "false")
err = cmd.Execute()
assert.NilError(t, err)
fmt.Println(testCli.OutBuffer().String())
golden.Assert(t, testCli.OutBuffer().String(), "check-no-downgrades.golden")
testCli.OutBuffer().Reset()
cmd.Flags().Set("pre-releases", "false")
cmd.Flags().Set("downgrades", "false")
cmd.Flags().Set("upgrades", "false")
err = cmd.Execute()
assert.NilError(t, err)
fmt.Println(testCli.OutBuffer().String())
golden.Assert(t, testCli.OutBuffer().String(), "check-patches-only.golden")
}
func makeVersion(t *testing.T, tag string) containerizedengine.DockerVersion {
v, err := ver.NewVersion(tag)
assert.NilError(t, err)
return containerizedengine.DockerVersion{Version: *v, Tag: tag}
}
func parseVersions(t *testing.T, tags ...string) []containerizedengine.DockerVersion {
ret := make([]containerizedengine.DockerVersion, len(tags))
for i, tag := range tags {
ret[i] = makeVersion(t, tag)
}
return ret
}

View File

@ -0,0 +1,105 @@
package engine
import (
"context"
"github.com/containerd/containerd"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/internal/containerizedengine"
"github.com/docker/docker/api/types"
)
type (
fakeContainerizedEngineClient struct {
closeFunc func() error
activateEngineFunc func(ctx context.Context,
opts containerizedengine.EngineInitOptions,
out containerizedengine.OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
initEngineFunc func(ctx context.Context,
opts containerizedengine.EngineInitOptions,
out containerizedengine.OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
doUpdateFunc func(ctx context.Context,
opts containerizedengine.EngineInitOptions,
out containerizedengine.OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
getEngineVersionsFunc func(ctx context.Context,
registryClient registryclient.RegistryClient,
currentVersion,
imageName string) (containerizedengine.AvailableVersions, error)
getEngineFunc func(ctx context.Context) (containerd.Container, error)
removeEngineFunc func(ctx context.Context, engine containerd.Container) error
getCurrentEngineVersionFunc func(ctx context.Context) (containerizedengine.EngineInitOptions, error)
}
)
func (w *fakeContainerizedEngineClient) Close() error {
if w.closeFunc != nil {
return w.closeFunc()
}
return nil
}
func (w *fakeContainerizedEngineClient) ActivateEngine(ctx context.Context,
opts containerizedengine.EngineInitOptions,
out containerizedengine.OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error {
if w.activateEngineFunc != nil {
return w.activateEngineFunc(ctx, opts, out, authConfig, healthfn)
}
return nil
}
func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context,
opts containerizedengine.EngineInitOptions,
out containerizedengine.OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error {
if w.initEngineFunc != nil {
return w.initEngineFunc(ctx, opts, out, authConfig, healthfn)
}
return nil
}
func (w *fakeContainerizedEngineClient) DoUpdate(ctx context.Context,
opts containerizedengine.EngineInitOptions,
out containerizedengine.OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error {
if w.doUpdateFunc != nil {
return w.doUpdateFunc(ctx, opts, out, authConfig, healthfn)
}
return nil
}
func (w *fakeContainerizedEngineClient) GetEngineVersions(ctx context.Context,
registryClient registryclient.RegistryClient,
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
if w.getEngineVersionsFunc != nil {
return w.getEngineVersionsFunc(ctx, registryClient, currentVersion, imageName)
}
return containerizedengine.AvailableVersions{}, nil
}
func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containerd.Container, error) {
if w.getEngineFunc != nil {
return w.getEngineFunc(ctx)
}
return nil, nil
}
func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context, engine containerd.Container) error {
if w.removeEngineFunc != nil {
return w.removeEngineFunc(ctx, engine)
}
return nil
}
func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
if w.getCurrentEngineVersionFunc != nil {
return w.getCurrentEngineVersionFunc(ctx)
}
return containerizedengine.EngineInitOptions{}, nil
}

25
cli/command/engine/cmd.go Normal file
View File

@ -0,0 +1,25 @@
package engine
import (
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/spf13/cobra"
)
// NewEngineCommand returns a cobra command for `engine` subcommands
func NewEngineCommand(dockerCli command.Cli) *cobra.Command {
cmd := &cobra.Command{
Use: "engine COMMAND",
Short: "Manage the docker engine",
Args: cli.NoArgs,
RunE: command.ShowHelp(dockerCli.Err()),
}
cmd.AddCommand(
newInitCommand(dockerCli),
newActivateCommand(dockerCli),
newCheckForUpdatesCommand(dockerCli),
newUpdateCommand(dockerCli),
newRmCommand(dockerCli),
)
return cmd
}

View File

@ -0,0 +1,14 @@
package engine
import (
"testing"
"gotest.tools/assert"
)
func TestNewEngineCommand(t *testing.T) {
cmd := NewEngineCommand(testCli)
subcommands := cmd.Commands()
assert.Assert(t, len(subcommands) == 5)
}

View File

@ -0,0 +1,62 @@
package engine
import (
"context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/internal/containerizedengine"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type extendedEngineInitOptions struct {
containerizedengine.EngineInitOptions
sockPath string
}
func newInitCommand(dockerCli command.Cli) *cobra.Command {
var options extendedEngineInitOptions
cmd := &cobra.Command{
Use: "init [OPTIONS]",
Short: "Initialize a local engine",
Long: `This command will initialize a local engine running on containerd.
Configuration of the engine is managed through the daemon.json configuration
file on the host and may be pre-created before running the 'init' command.
`,
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runInit(dockerCli, options)
},
Annotations: map[string]string{"experimentalCLI": ""},
}
flags := cmd.Flags()
flags.StringVar(&options.EngineVersion, "version", cli.Version, "Specify engine version")
flags.StringVar(&options.EngineImage, "engine-image", containerizedengine.CommunityEngineImage, "Specify engine image")
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
flags.StringVar(&options.ConfigFile, "config-file", "/etc/docker/daemon.json", "Specify the location of the daemon configuration file on the host")
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
return cmd
}
func runInit(dockerCli command.Cli, options extendedEngineInitOptions) error {
ctx := context.Background()
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
if err != nil {
return errors.Wrap(err, "unable to access local containerd")
}
defer client.Close()
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
if err != nil {
return err
}
return client.InitEngine(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig,
func(ctx context.Context) error {
client := dockerCli.Client()
_, err := client.Ping(ctx)
return err
})
}

View File

@ -0,0 +1,33 @@
package engine
import (
"fmt"
"testing"
"github.com/docker/cli/internal/containerizedengine"
"gotest.tools/assert"
)
func TestInitNoContainerd(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return nil, fmt.Errorf("some error")
},
)
cmd := newInitCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.ErrorContains(t, err, "unable to access local containerd")
}
func TestInitHappy(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return &fakeContainerizedEngineClient{}, nil
},
)
cmd := newInitCommand(testCli)
err := cmd.Execute()
assert.NilError(t, err)
}

54
cli/command/engine/rm.go Normal file
View File

@ -0,0 +1,54 @@
package engine
import (
"context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// TODO - consider adding a "purge" flag that also removes
// configuration files and the docker root dir.
type rmOptions struct {
sockPath string
}
func newRmCommand(dockerCli command.Cli) *cobra.Command {
var options rmOptions
cmd := &cobra.Command{
Use: "rm [OPTIONS]",
Short: "Remove the local engine",
Long: `This command will remove the local engine running on containerd.
No state files will be removed from the host filesystem.
`,
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runRm(dockerCli, options)
},
Annotations: map[string]string{"experimentalCLI": ""},
}
flags := cmd.Flags()
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
return cmd
}
func runRm(dockerCli command.Cli, options rmOptions) error {
ctx := context.Background()
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
if err != nil {
return errors.Wrap(err, "unable to access local containerd")
}
defer client.Close()
engine, err := client.GetEngine(ctx)
if err != nil {
return err
}
return client.RemoveEngine(ctx, engine)
}

View File

@ -0,0 +1,33 @@
package engine
import (
"fmt"
"testing"
"github.com/docker/cli/internal/containerizedengine"
"gotest.tools/assert"
)
func TestRmNoContainerd(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return nil, fmt.Errorf("some error")
},
)
cmd := newRmCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.ErrorContains(t, err, "unable to access local containerd")
}
func TestRmHappy(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return &fakeContainerizedEngineClient{}, nil
},
)
cmd := newRmCommand(testCli)
err := cmd.Execute()
assert.NilError(t, err)
}

View File

@ -0,0 +1,11 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
patch 1.1.3-beta1 https://docs.docker.com/releasenotes/1.1.3-beta1
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
upgrade 2.1.0-beta1 https://docs.docker.com/releasenotes/2.1.0-beta1
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
downgrade 1.0.3-beta1 https://docs.docker.com/releasenotes/1.0.3-beta1

View File

@ -0,0 +1,6 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0

View File

@ -0,0 +1,8 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2

View File

@ -0,0 +1,4 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2

View File

@ -0,0 +1,68 @@
package engine
import (
"context"
"fmt"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
var options extendedEngineInitOptions
cmd := &cobra.Command{
Use: "update [OPTIONS]",
Short: "Update a local engine",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runUpdate(dockerCli, options)
},
}
flags := cmd.Flags()
flags.StringVar(&options.EngineVersion, "version", "", "Specify engine version")
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image")
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "", "Override the current location where engine images are pulled")
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
return cmd
}
func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
ctx := context.Background()
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
if err != nil {
return errors.Wrap(err, "unable to access local containerd")
}
defer client.Close()
if options.EngineImage == "" || options.RegistryPrefix == "" {
currentOpts, err := client.GetCurrentEngineVersion(ctx)
if err != nil {
return err
}
if options.EngineImage == "" {
options.EngineImage = currentOpts.EngineImage
}
if options.RegistryPrefix == "" {
options.RegistryPrefix = currentOpts.RegistryPrefix
}
}
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
if err != nil {
return err
}
if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig,
func(ctx context.Context) error {
client := dockerCli.Client()
_, err := client.Ping(ctx)
return err
}); err != nil {
return err
}
fmt.Fprintln(dockerCli.Out(), "Success! The docker engine is now running.")
return nil
}

View File

@ -0,0 +1,35 @@
package engine
import (
"fmt"
"testing"
"github.com/docker/cli/internal/containerizedengine"
"gotest.tools/assert"
)
func TestUpdateNoContainerd(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return nil, fmt.Errorf("some error")
},
)
cmd := newUpdateCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.ErrorContains(t, err, "unable to access local containerd")
}
func TestUpdateHappy(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (containerizedengine.Client, error) {
return &fakeContainerizedEngineClient{}, nil
},
)
cmd := newUpdateCommand(testCli)
cmd.Flags().Set("registry-prefix", "docker.io/docker")
cmd.Flags().Set("version", "someversion")
err := cmd.Execute()
assert.NilError(t, err)
}

View File

@ -0,0 +1,154 @@
package formatter
import (
"time"
"github.com/docker/cli/internal/licenseutils"
"github.com/docker/licensing/model"
)
const (
defaultSubscriptionsTableFormat = "table {{.Num}}\t{{.Owner}}\t{{.ProductID}}\t{{.Expires}}\t{{.ComponentsString}}"
defaultSubscriptionsQuietFormat = "{{.Num}}:{{.Summary}}"
numHeader = "NUM"
ownerHeader = "OWNER"
licenseNameHeader = "NAME"
idHeader = "ID"
dockerIDHeader = "DOCKER ID"
productIDHeader = "PRODUCT ID"
productRatePlanHeader = "PRODUCT RATE PLAN"
productRatePlanIDHeader = "PRODUCT RATE PLAN ID"
startHeader = "START"
expiresHeader = "EXPIRES"
stateHeader = "STATE"
eusaHeader = "EUSA"
pricingComponentsHeader = "PRICING COMPONENTS"
)
// NewSubscriptionsFormat returns a Format for rendering using a license Context
func NewSubscriptionsFormat(source string, quiet bool) Format {
switch source {
case TableFormatKey:
if quiet {
return defaultSubscriptionsQuietFormat
}
return defaultSubscriptionsTableFormat
case RawFormatKey:
if quiet {
return `license: {{.ID}}`
}
return `license: {{.ID}}\nname: {{.Name}}\nowner: {{.Owner}}\ncomponents: {{.ComponentsString}}\n`
}
return Format(source)
}
// SubscriptionsWrite writes the context
func SubscriptionsWrite(ctx Context, subs []licenseutils.LicenseDisplay) error {
render := func(format func(subContext subContext) error) error {
for _, sub := range subs {
licenseCtx := &licenseContext{trunc: ctx.Trunc, l: sub}
if err := format(licenseCtx); err != nil {
return err
}
}
return nil
}
licenseCtx := licenseContext{}
licenseCtx.header = map[string]string{
"Num": numHeader,
"Owner": ownerHeader,
"Name": licenseNameHeader,
"ID": idHeader,
"DockerID": dockerIDHeader,
"ProductID": productIDHeader,
"ProductRatePlan": productRatePlanHeader,
"ProductRatePlanID": productRatePlanIDHeader,
"Start": startHeader,
"Expires": expiresHeader,
"State": stateHeader,
"Eusa": eusaHeader,
"ComponentsString": pricingComponentsHeader,
}
return ctx.Write(&licenseCtx, render)
}
type licenseContext struct {
HeaderContext
trunc bool
l licenseutils.LicenseDisplay
}
func (c *licenseContext) MarshalJSON() ([]byte, error) {
return marshalJSON(c)
}
func (c *licenseContext) Num() int {
return c.l.Num
}
func (c *licenseContext) Owner() string {
return c.l.Owner
}
func (c *licenseContext) ComponentsString() string {
return c.l.ComponentsString
}
func (c *licenseContext) Summary() string {
return c.l.String()
}
func (c *licenseContext) Name() string {
return c.l.Name
}
func (c *licenseContext) ID() string {
return c.l.ID
}
func (c *licenseContext) DockerID() string {
return c.l.DockerID
}
func (c *licenseContext) ProductID() string {
return c.l.ProductID
}
func (c *licenseContext) ProductRatePlan() string {
return c.l.ProductRatePlan
}
func (c *licenseContext) ProductRatePlanID() string {
return c.l.ProductRatePlanID
}
func (c *licenseContext) Start() *time.Time {
return c.l.Start
}
func (c *licenseContext) Expires() *time.Time {
return c.l.Expires
}
func (c *licenseContext) State() string {
return c.l.State
}
func (c *licenseContext) Eusa() *model.EusaState {
return c.l.Eusa
}
func (c *licenseContext) PricingComponents() []model.SubscriptionPricingComponent {
// Dereference the pricing component pointers in the pricing components
// so it can be rendered properly with the template formatter
var ret []model.SubscriptionPricingComponent
for _, spc := range c.l.PricingComponents {
if spc == nil {
continue
}
ret = append(ret, *spc)
}
return ret
}

View File

@ -0,0 +1,256 @@
package formatter
import (
"bytes"
"encoding/json"
"strings"
"testing"
"time"
"github.com/docker/cli/internal/licenseutils"
"github.com/docker/licensing/model"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestSubscriptionContextWrite(t *testing.T) {
cases := []struct {
context Context
expected string
}{
// Errors
{
Context{Format: "{{InvalidFunction}}"},
`Template parsing error: template: :1: function "InvalidFunction" not defined
`,
},
{
Context{Format: "{{nil}}"},
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
`,
},
// Table format
{
Context{Format: NewSubscriptionsFormat("table", false)},
`NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS
1 owner1 productid1 2020-01-01 10:00:00 +0000 UTC compstring
2 owner2 productid2 2020-01-01 10:00:00 +0000 UTC compstring
`,
},
{
Context{Format: NewSubscriptionsFormat("table", true)},
`1:License Name: name1 Quantity: 10 nodes Expiration date: 2020-01-01
2:License Name: name2 Quantity: 20 nodes Expiration date: 2020-01-01
`,
},
{
Context{Format: NewSubscriptionsFormat("table {{.Owner}}", false)},
`OWNER
owner1
owner2
`,
},
{
Context{Format: NewSubscriptionsFormat("table {{.Owner}}", true)},
`OWNER
owner1
owner2
`,
},
// Raw Format
{
Context{Format: NewSubscriptionsFormat("raw", false)},
`license: id1
name: name1
owner: owner1
components: compstring
license: id2
name: name2
owner: owner2
components: compstring
`,
},
{
Context{Format: NewSubscriptionsFormat("raw", true)},
`license: id1
license: id2
`,
},
// Custom Format
{
Context{Format: NewSubscriptionsFormat("{{.Owner}}", false)},
`owner1
owner2
`,
},
}
expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC")
for _, testcase := range cases {
subscriptions := []licenseutils.LicenseDisplay{
{
Num: 1,
Owner: "owner1",
Subscription: model.Subscription{
ID: "id1",
Name: "name1",
ProductID: "productid1",
Expires: &expiration,
PricingComponents: model.PricingComponents{
&model.SubscriptionPricingComponent{
Name: "nodes",
Value: 10,
},
},
},
ComponentsString: "compstring",
},
{
Num: 2,
Owner: "owner2",
Subscription: model.Subscription{
ID: "id2",
Name: "name2",
ProductID: "productid2",
Expires: &expiration,
PricingComponents: model.PricingComponents{
&model.SubscriptionPricingComponent{
Name: "nodes",
Value: 20,
},
},
},
ComponentsString: "compstring",
},
}
out := &bytes.Buffer{}
testcase.context.Output = out
err := SubscriptionsWrite(testcase.context, subscriptions)
if err != nil {
assert.Error(t, err, testcase.expected)
} else {
assert.Check(t, is.Equal(testcase.expected, out.String()))
}
}
}
func TestSubscriptionContextWriteJSON(t *testing.T) {
expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC")
subscriptions := []licenseutils.LicenseDisplay{
{
Num: 1,
Owner: "owner1",
Subscription: model.Subscription{
ID: "id1",
Name: "name1",
ProductID: "productid1",
Expires: &expiration,
PricingComponents: model.PricingComponents{
&model.SubscriptionPricingComponent{
Name: "nodes",
Value: 10,
},
},
},
ComponentsString: "compstring",
},
{
Num: 2,
Owner: "owner2",
Subscription: model.Subscription{
ID: "id2",
Name: "name2",
ProductID: "productid2",
Expires: &expiration,
PricingComponents: model.PricingComponents{
&model.SubscriptionPricingComponent{
Name: "nodes",
Value: 20,
},
},
},
ComponentsString: "compstring",
},
}
expectedJSONs := []map[string]interface{}{
{
"Owner": "owner1",
"ComponentsString": "compstring",
"Expires": "2020-01-01T10:00:00Z",
"DockerID": "",
"Eusa": nil,
"ID": "id1",
"Start": nil,
"Name": "name1",
"Num": float64(1),
"PricingComponents": []interface{}{
map[string]interface{}{
"name": "nodes",
"value": float64(10),
},
},
"ProductID": "productid1",
"ProductRatePlan": "",
"ProductRatePlanID": "",
"State": "",
"Summary": "License Name: name1\tQuantity: 10 nodes\tExpiration date: 2020-01-01",
},
{
"Owner": "owner2",
"ComponentsString": "compstring",
"Expires": "2020-01-01T10:00:00Z",
"DockerID": "",
"Eusa": nil,
"ID": "id2",
"Start": nil,
"Name": "name2",
"Num": float64(2),
"PricingComponents": []interface{}{
map[string]interface{}{
"name": "nodes",
"value": float64(20),
},
},
"ProductID": "productid2",
"ProductRatePlan": "",
"ProductRatePlanID": "",
"State": "",
"Summary": "License Name: name2\tQuantity: 20 nodes\tExpiration date: 2020-01-01",
},
}
out := &bytes.Buffer{}
err := SubscriptionsWrite(Context{Format: "{{json .}}", Output: out}, subscriptions)
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
var m map[string]interface{}
if err := json.Unmarshal([]byte(line), &m); err != nil {
t.Fatal(err)
}
assert.Check(t, is.DeepEqual(expectedJSONs[i], m))
}
}
func TestSubscriptionContextWriteJSONField(t *testing.T) {
subscriptions := []licenseutils.LicenseDisplay{
{Num: 1, Owner: "owner1"},
{Num: 2, Owner: "owner2"},
}
out := &bytes.Buffer{}
err := SubscriptionsWrite(Context{Format: "{{json .Owner}}", Output: out}, subscriptions)
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
var s string
if err := json.Unmarshal([]byte(line), &s); err != nil {
t.Fatal(err)
}
assert.Check(t, is.Equal(subscriptions[i].Owner, s))
}
}

View File

@ -0,0 +1,73 @@
package formatter
import (
"github.com/docker/cli/internal/containerizedengine"
)
const (
defaultUpdatesTableFormat = "table {{.Type}}\t{{.Version}}\t{{.Notes}}"
defaultUpdatesQuietFormat = "{{.Version}}"
updatesTypeHeader = "TYPE"
versionHeader = "VERSION"
notesHeader = "NOTES"
)
// NewUpdatesFormat returns a Format for rendering using a updates context
func NewUpdatesFormat(source string, quiet bool) Format {
switch source {
case TableFormatKey:
if quiet {
return defaultUpdatesQuietFormat
}
return defaultUpdatesTableFormat
case RawFormatKey:
if quiet {
return `update_version: {{.Version}}`
}
return `update_version: {{.Version}}\ntype: {{.Type}}\nnotes: {{.Notes}}\n`
}
return Format(source)
}
// UpdatesWrite writes the context
func UpdatesWrite(ctx Context, availableUpdates []containerizedengine.Update) error {
render := func(format func(subContext subContext) error) error {
for _, update := range availableUpdates {
updatesCtx := &updateContext{trunc: ctx.Trunc, u: update}
if err := format(updatesCtx); err != nil {
return err
}
}
return nil
}
updatesCtx := updateContext{}
updatesCtx.header = map[string]string{
"Type": updatesTypeHeader,
"Version": versionHeader,
"Notes": notesHeader,
}
return ctx.Write(&updatesCtx, render)
}
type updateContext struct {
HeaderContext
trunc bool
u containerizedengine.Update
}
func (c *updateContext) MarshalJSON() ([]byte, error) {
return marshalJSON(c)
}
func (c *updateContext) Type() string {
return c.u.Type
}
func (c *updateContext) Version() string {
return c.u.Version
}
func (c *updateContext) Notes() string {
return c.u.Notes
}

View File

@ -0,0 +1,143 @@
package formatter
import (
"bytes"
"encoding/json"
"strings"
"testing"
"github.com/docker/cli/internal/containerizedengine"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestUpdateContextWrite(t *testing.T) {
cases := []struct {
context Context
expected string
}{
// Errors
{
Context{Format: "{{InvalidFunction}}"},
`Template parsing error: template: :1: function "InvalidFunction" not defined
`,
},
{
Context{Format: "{{nil}}"},
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
`,
},
// Table format
{
Context{Format: NewUpdatesFormat("table", false)},
`TYPE VERSION NOTES
updateType1 version1 description 1
updateType2 version2 description 2
`,
},
{
Context{Format: NewUpdatesFormat("table", true)},
`version1
version2
`,
},
{
Context{Format: NewUpdatesFormat("table {{.Version}}", false)},
`VERSION
version1
version2
`,
},
{
Context{Format: NewUpdatesFormat("table {{.Version}}", true)},
`VERSION
version1
version2
`,
},
// Raw Format
{
Context{Format: NewUpdatesFormat("raw", false)},
`update_version: version1
type: updateType1
notes: description 1
update_version: version2
type: updateType2
notes: description 2
`,
},
{
Context{Format: NewUpdatesFormat("raw", true)},
`update_version: version1
update_version: version2
`,
},
// Custom Format
{
Context{Format: NewUpdatesFormat("{{.Version}}", false)},
`version1
version2
`,
},
}
for _, testcase := range cases {
updates := []containerizedengine.Update{
{Type: "updateType1", Version: "version1", Notes: "description 1"},
{Type: "updateType2", Version: "version2", Notes: "description 2"},
}
out := &bytes.Buffer{}
testcase.context.Output = out
err := UpdatesWrite(testcase.context, updates)
if err != nil {
assert.Error(t, err, testcase.expected)
} else {
assert.Check(t, is.Equal(testcase.expected, out.String()))
}
}
}
func TestUpdateContextWriteJSON(t *testing.T) {
updates := []containerizedengine.Update{
{Type: "updateType1", Version: "version1", Notes: "note1"},
{Type: "updateType2", Version: "version2", Notes: "note2"},
}
expectedJSONs := []map[string]interface{}{
{"Version": "version1", "Notes": "note1", "Type": "updateType1"},
{"Version": "version2", "Notes": "note2", "Type": "updateType2"},
}
out := &bytes.Buffer{}
err := UpdatesWrite(Context{Format: "{{json .}}", Output: out}, updates)
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
var m map[string]interface{}
if err := json.Unmarshal([]byte(line), &m); err != nil {
t.Fatal(err)
}
assert.Check(t, is.DeepEqual(expectedJSONs[i], m))
}
}
func TestUpdateContextWriteJSONField(t *testing.T) {
updates := []containerizedengine.Update{
{Type: "updateType1", Version: "version1"},
{Type: "updateType2", Version: "version2"},
}
out := &bytes.Buffer{}
err := UpdatesWrite(Context{Format: "{{json .Type}}", Output: out}, updates)
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
var s string
if err := json.Unmarshal([]byte(line), &s); err != nil {
t.Fatal(err)
}
assert.Check(t, is.Equal(updates[i].Type, s))
}
}

View File

@ -15,6 +15,7 @@ type fakeRegistryClient struct {
getManifestListFunc func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
mountBlobFunc func(ctx context.Context, source reference.Canonical, target reference.Named) error
putManifestFunc func(ctx context.Context, source reference.Named, mf distribution.Manifest) (digest.Digest, error)
getTagsFunc func(ctx context.Context, ref reference.Named) ([]string, error)
}
func (c *fakeRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
@ -45,4 +46,11 @@ func (c *fakeRegistryClient) PutManifest(ctx context.Context, ref reference.Name
return digest.Digest(""), nil
}
func (c *fakeRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
if c.getTagsFunc != nil {
return c.getTagsFunc(ctx, ref)
}
return nil, nil
}
var _ client.RegistryClient = &fakeRegistryClient{}

View File

@ -11,6 +11,7 @@ import (
"runtime"
"strings"
"github.com/docker/cli/cli/debug"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
@ -26,9 +27,10 @@ func ElectAuthServer(ctx context.Context, cli Cli) string {
// example a Linux client might be interacting with a Windows daemon, hence
// the default registry URL might be Windows specific.
serverAddress := registry.IndexServer
if info, err := cli.Client().Info(ctx); err != nil {
if info, err := cli.Client().Info(ctx); err != nil && debug.IsEnabled() {
// Only report the warning if we're in debug mode to prevent nagging during engine initialization workflows
fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress)
} else if info.IndexServerAddress == "" {
} else if info.IndexServerAddress == "" && debug.IsEnabled() {
fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress)
} else {
serverAddress = info.IndexServerAddress

View File

@ -125,6 +125,11 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
}
response, err = clnt.RegistryLogin(ctx, *authConfig)
if err != nil && client.IsErrConnectionFailed(err) {
// If the server isn't responding (yet) attempt to login purely client side
response, err = loginClientSide(ctx, *authConfig)
}
// If we (still) have an error, give up
if err != nil {
return err
}
@ -167,3 +172,17 @@ func loginWithCredStoreCreds(ctx context.Context, dockerCli command.Cli, authCon
}
return response, err
}
func loginClientSide(ctx context.Context, auth types.AuthConfig) (registrytypes.AuthenticateOKBody, error) {
svc, err := registry.NewService(registry.ServiceOptions{})
if err != nil {
return registrytypes.AuthenticateOKBody{}, err
}
status, token, err := svc.Auth(ctx, &auth, command.UserAgent())
return registrytypes.AuthenticateOKBody{
Status: status,
IdentityToken: token,
}, err
}

View File

@ -13,6 +13,7 @@ import (
// Prevents a circular import with "github.com/docker/cli/internal/test"
. "github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/debug"
"github.com/docker/cli/internal/test"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
@ -78,6 +79,8 @@ func TestElectAuthServer(t *testing.T) {
},
},
}
// Enable debug to see warnings we're checking for
debug.Enable()
for _, tc := range testCases {
cli := test.NewFakeCli(&fakeClient{infoFunc: tc.infoFunc})
server := ElectAuthServer(context.Background(), cli)

View File

@ -7,6 +7,7 @@ import (
"strings"
manifesttypes "github.com/docker/cli/cli/manifest/types"
"github.com/docker/cli/cli/trust"
"github.com/docker/distribution"
"github.com/docker/distribution/reference"
distributionclient "github.com/docker/distribution/registry/client"
@ -24,6 +25,7 @@ type RegistryClient interface {
GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error
PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error)
GetTags(ctx context.Context, ref reference.Named) ([]string, error)
}
// NewRegistryClient returns a new RegistryClient with a resolver
@ -122,6 +124,19 @@ func (c *client) PutManifest(ctx context.Context, ref reference.Named, manifest
return dgst, errors.Wrapf(err, "failed to put manifest %s", ref)
}
func (c *client) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
repoEndpoint, err := newDefaultRepositoryEndpoint(ref, c.insecureRegistry)
if err != nil {
return nil, err
}
repo, err := c.getRepositoryForReference(ctx, ref, repoEndpoint)
if err != nil {
return nil, err
}
return repo.Tags(ctx).All(ctx)
}
func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Named, repoEndpoint repositoryEndpoint) (distribution.Repository, error) {
httpTransport, err := c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint)
if err != nil {
@ -181,3 +196,16 @@ func getManifestOptionsFromReference(ref reference.Named) (digest.Digest, []dist
}
return "", nil, errors.Errorf("%s no tag or digest", ref)
}
// GetRegistryAuth returns the auth config given an input image
func GetRegistryAuth(ctx context.Context, resolver AuthConfigResolver, imageName string) (*types.AuthConfig, error) {
distributionRef, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return nil, fmt.Errorf("Failed to parse image name: %s: %s", imageName, err)
}
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, resolver, distributionRef.String())
if err != nil {
return nil, fmt.Errorf("Failed to get imgRefAndAuth: %s", err)
}
return imgRefAndAuth.AuthConfig(), nil
}

View File

@ -105,7 +105,7 @@ shellcheck: build_shell_validate_image ## run shellcheck validation
docker run -ti --rm $(ENVVARS) $(MOUNTS) $(VALIDATE_IMAGE_NAME) make shellcheck
.PHONY: test-e2e ## run e2e tests
test-e2e: test-e2e-non-experimental test-e2e-experimental
test-e2e: test-e2e-non-experimental test-e2e-experimental test-e2e-containerized
.PHONY: test-e2e-experimental
test-e2e-experimental: build_e2e_image
@ -115,6 +115,14 @@ test-e2e-experimental: build_e2e_image
test-e2e-non-experimental: build_e2e_image
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock $(E2E_IMAGE_NAME)
.PHONY: test-e2e-containerized
test-e2e-containerized: build_e2e_image
docker run --rm --privileged \
-v /var/lib/docker \
-v /var/lib/containerd \
-v /lib/modules:/lib/modules \
$(E2E_IMAGE_NAME) /go/src/github.com/docker/cli/scripts/test/engine/entry
.PHONY: help
help: ## print this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)

View File

@ -1,4 +1,7 @@
ARG GO_VERSION=1.10.3
FROM docker/containerd-shim-process:a4d1531 AS containerd-shim-process
# Use Debian based image as docker-compose requires glibc.
FROM golang:${GO_VERSION}
@ -6,8 +9,34 @@ RUN apt-get update && apt-get install -y \
build-essential \
curl \
openssl \
btrfs-tools \
libapparmor-dev \
libseccomp-dev \
iptables \
&& rm -rf /var/lib/apt/lists/*
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
#ARG CONTAINERD_VERSION=v1.1.2
#ARG CONTAINERD_VERSION=47a128d
#ARG CONTAINERD_VERSION=6c3e782f
ARG CONTAINERD_VERSION=65839a47a88b0a1c5dc34981f1741eccefc9f2b0
RUN git clone https://github.com/containerd/containerd.git /go/src/github.com/containerd/containerd && \
cd /go/src/github.com/containerd/containerd && \
git checkout ${CONTAINERD_VERSION} && \
make && \
make install
COPY e2eengine/config.toml /etc/containerd/config.toml
COPY --from=containerd-shim-process /bin/containerd-shim-process-v1 /bin/
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
ARG RUNC_VERSION=v1.0.0-rc5
RUN git clone https://github.com/opencontainers/runc.git /go/src/github.com/opencontainers/runc && \
cd /go/src/github.com/opencontainers/runc && \
git checkout ${RUNC_VERSION} && \
make && \
make install
ARG COMPOSE_VERSION=1.21.2
RUN curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose \
&& chmod +x /usr/local/bin/docker-compose

View File

@ -0,0 +1,42 @@
package check
import (
"os"
"testing"
"github.com/docker/cli/e2eengine"
"gotest.tools/icmd"
)
func TestDockerEngineOnContainerdAltRootConfig(t *testing.T) {
defer func() {
err := e2eengine.CleanupEngine(t)
if err != nil {
t.Errorf("Failed to cleanup engine: %s", err)
}
}()
t.Log("First engine init")
// First init
result := icmd.RunCmd(icmd.Command("docker", "engine", "init", "--config-file", "/tmp/etc/docker/daemon.json"),
func(c *icmd.Cmd) {
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
})
result.Assert(t, icmd.Expected{
Out: "Success! The docker engine is now running.",
Err: "",
ExitCode: 0,
})
// Make sure update doesn't blow up with alternate config path
t.Log("perform update")
// Now update and succeed
targetVersion := os.Getenv("VERSION")
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
result.Assert(t, icmd.Expected{
Out: "Success! The docker engine is now running.",
Err: "",
ExitCode: 0,
})
}

14
e2eengine/config.toml Normal file
View File

@ -0,0 +1,14 @@
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0
[grpc]
address = "/run/containerd/containerd.sock"
uid = 0
gid = 0
[debug]
address = "/run/containerd/debug.sock"
uid = 0
gid = 0
level = "debug"

View File

@ -0,0 +1,85 @@
package multi
import (
"os"
"testing"
"github.com/docker/cli/e2eengine"
"gotest.tools/icmd"
)
func TestDockerEngineOnContainerdMultiTest(t *testing.T) {
defer func() {
err := e2eengine.CleanupEngine(t)
if err != nil {
t.Errorf("Failed to cleanup engine: %s", err)
}
}()
t.Log("Attempt engine init without experimental")
// First init
result := icmd.RunCmd(icmd.Command("docker", "engine", "init"),
func(c *icmd.Cmd) {
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=disabled")
})
result.Assert(t, icmd.Expected{
Out: "",
Err: "docker engine init is only supported",
ExitCode: 1,
})
t.Log("First engine init")
// First init
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
func(c *icmd.Cmd) {
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
})
result.Assert(t, icmd.Expected{
Out: "Success! The docker engine is now running.",
Err: "",
ExitCode: 0,
})
t.Log("checking for updates")
// Check for updates
result = icmd.RunCmd(icmd.Command("docker", "engine", "check", "--downgrades", "--pre-releases"))
result.Assert(t, icmd.Expected{
Out: "VERSION",
Err: "",
ExitCode: 0,
})
t.Log("attempt second init (should fail)")
// Attempt to init a second time and fail
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
func(c *icmd.Cmd) {
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
})
result.Assert(t, icmd.Expected{
Out: "",
Err: "engine already present",
ExitCode: 1,
})
t.Log("perform update")
// Now update and succeed
targetVersion := os.Getenv("VERSION")
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
result.Assert(t, icmd.Expected{
Out: "Success! The docker engine is now running.",
Err: "",
ExitCode: 0,
})
t.Log("remove engine")
result = icmd.RunCmd(icmd.Command("docker", "engine", "rm"),
func(c *icmd.Cmd) {
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
})
result.Assert(t, icmd.Expected{
Out: "",
Err: "",
ExitCode: 0,
})
}

39
e2eengine/utils.go Normal file
View File

@ -0,0 +1,39 @@
package e2eengine
import (
"context"
"strings"
"testing"
"github.com/docker/cli/internal/containerizedengine"
)
// CleanupEngine ensures the local engine has been removed between testcases
func CleanupEngine(t *testing.T) error {
t.Log("doing engine cleanup")
ctx := context.Background()
client, err := containerizedengine.NewClient("")
if err != nil {
return err
}
// See if the engine exists first
engine, err := client.GetEngine(ctx)
if err != nil {
if strings.Contains(err.Error(), "not present") {
t.Log("engine was not detected, no cleanup to perform")
// Nothing to do, it's not defined
return nil
}
t.Logf("failed to lookup engine: %s", err)
// Any other error is not good...
return err
}
// TODO Consider nuking the docker dir too so there's no cached content between test cases
err = client.RemoveEngine(ctx, engine)
if err != nil {
t.Logf("Failed to remove engine: %s", err)
}
return err
}

View File

@ -0,0 +1,348 @@
package containerizedengine
import (
"bytes"
"context"
"syscall"
"github.com/containerd/containerd"
containerdtypes "github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/oci"
prototypes "github.com/gogo/protobuf/types"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
)
type (
fakeContainerdClient struct {
containersFunc func(ctx context.Context, filters ...string) ([]containerd.Container, error)
newContainerFunc func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
pullFunc func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
getImageFunc func(ctx context.Context, ref string) (containerd.Image, error)
contentStoreFunc func() content.Store
containerServiceFunc func() containers.Store
}
fakeContainer struct {
idFunc func() string
infoFunc func(context.Context) (containers.Container, error)
deleteFunc func(context.Context, ...containerd.DeleteOpts) error
newTaskFunc func(context.Context, cio.Creator, ...containerd.NewTaskOpts) (containerd.Task, error)
specFunc func(context.Context) (*oci.Spec, error)
taskFunc func(context.Context, cio.Attach) (containerd.Task, error)
imageFunc func(context.Context) (containerd.Image, error)
labelsFunc func(context.Context) (map[string]string, error)
setLabelsFunc func(context.Context, map[string]string) (map[string]string, error)
extensionsFunc func(context.Context) (map[string]prototypes.Any, error)
updateFunc func(context.Context, ...containerd.UpdateContainerOpts) error
}
fakeImage struct {
nameFunc func() string
targetFunc func() ocispec.Descriptor
unpackFunc func(context.Context, string) error
rootFSFunc func(ctx context.Context) ([]digest.Digest, error)
sizeFunc func(ctx context.Context) (int64, error)
configFunc func(ctx context.Context) (ocispec.Descriptor, error)
isUnpackedFunc func(context.Context, string) (bool, error)
contentStoreFunc func() content.Store
}
fakeTask struct {
idFunc func() string
pidFunc func() uint32
startFunc func(context.Context) error
deleteFunc func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error)
killFunc func(context.Context, syscall.Signal, ...containerd.KillOpts) error
waitFunc func(context.Context) (<-chan containerd.ExitStatus, error)
closeIOFunc func(context.Context, ...containerd.IOCloserOpts) error
resizeFunc func(ctx context.Context, w, h uint32) error
ioFunc func() cio.IO
statusFunc func(context.Context) (containerd.Status, error)
pauseFunc func(context.Context) error
resumeFunc func(context.Context) error
execFunc func(context.Context, string, *specs.Process, cio.Creator) (containerd.Process, error)
pidsFunc func(context.Context) ([]containerd.ProcessInfo, error)
checkpointFunc func(context.Context, ...containerd.CheckpointTaskOpts) (containerd.Image, error)
updateFunc func(context.Context, ...containerd.UpdateTaskOpts) error
loadProcessFunc func(context.Context, string, cio.Attach) (containerd.Process, error)
metricsFunc func(context.Context) (*containerdtypes.Metric, error)
}
testOutStream struct {
bytes.Buffer
}
)
func (w *fakeContainerdClient) Containers(ctx context.Context, filters ...string) ([]containerd.Container, error) {
if w.containersFunc != nil {
return w.containersFunc(ctx, filters...)
}
return []containerd.Container{}, nil
}
func (w *fakeContainerdClient) NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
if w.newContainerFunc != nil {
return w.newContainerFunc(ctx, id, opts...)
}
return nil, nil
}
func (w *fakeContainerdClient) Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
if w.pullFunc != nil {
return w.pullFunc(ctx, ref, opts...)
}
return nil, nil
}
func (w *fakeContainerdClient) GetImage(ctx context.Context, ref string) (containerd.Image, error) {
if w.getImageFunc != nil {
return w.getImageFunc(ctx, ref)
}
return nil, nil
}
func (w *fakeContainerdClient) ContentStore() content.Store {
if w.contentStoreFunc != nil {
return w.contentStoreFunc()
}
return nil
}
func (w *fakeContainerdClient) ContainerService() containers.Store {
if w.containerServiceFunc != nil {
return w.containerServiceFunc()
}
return nil
}
func (w *fakeContainerdClient) Close() error {
return nil
}
func (c *fakeContainer) ID() string {
if c.idFunc != nil {
return c.idFunc()
}
return ""
}
func (c *fakeContainer) Info(ctx context.Context) (containers.Container, error) {
if c.infoFunc != nil {
return c.infoFunc(ctx)
}
return containers.Container{}, nil
}
func (c *fakeContainer) Delete(ctx context.Context, opts ...containerd.DeleteOpts) error {
if c.deleteFunc != nil {
return c.deleteFunc(ctx, opts...)
}
return nil
}
func (c *fakeContainer) NewTask(ctx context.Context, ioc cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
if c.newTaskFunc != nil {
return c.newTaskFunc(ctx, ioc, opts...)
}
return nil, nil
}
func (c *fakeContainer) Spec(ctx context.Context) (*oci.Spec, error) {
if c.specFunc != nil {
return c.specFunc(ctx)
}
return nil, nil
}
func (c *fakeContainer) Task(ctx context.Context, attach cio.Attach) (containerd.Task, error) {
if c.taskFunc != nil {
return c.taskFunc(ctx, attach)
}
return nil, nil
}
func (c *fakeContainer) Image(ctx context.Context) (containerd.Image, error) {
if c.imageFunc != nil {
return c.imageFunc(ctx)
}
return nil, nil
}
func (c *fakeContainer) Labels(ctx context.Context) (map[string]string, error) {
if c.labelsFunc != nil {
return c.labelsFunc(ctx)
}
return nil, nil
}
func (c *fakeContainer) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
if c.setLabelsFunc != nil {
return c.setLabelsFunc(ctx, labels)
}
return nil, nil
}
func (c *fakeContainer) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
if c.extensionsFunc != nil {
return c.extensionsFunc(ctx)
}
return nil, nil
}
func (c *fakeContainer) Update(ctx context.Context, opts ...containerd.UpdateContainerOpts) error {
if c.updateFunc != nil {
return c.updateFunc(ctx, opts...)
}
return nil
}
func (i *fakeImage) Name() string {
if i.nameFunc != nil {
return i.nameFunc()
}
return ""
}
func (i *fakeImage) Target() ocispec.Descriptor {
if i.targetFunc != nil {
return i.targetFunc()
}
return ocispec.Descriptor{}
}
func (i *fakeImage) Unpack(ctx context.Context, name string) error {
if i.unpackFunc != nil {
return i.unpackFunc(ctx, name)
}
return nil
}
func (i *fakeImage) RootFS(ctx context.Context) ([]digest.Digest, error) {
if i.rootFSFunc != nil {
return i.rootFSFunc(ctx)
}
return nil, nil
}
func (i *fakeImage) Size(ctx context.Context) (int64, error) {
if i.sizeFunc != nil {
return i.sizeFunc(ctx)
}
return 0, nil
}
func (i *fakeImage) Config(ctx context.Context) (ocispec.Descriptor, error) {
if i.configFunc != nil {
return i.configFunc(ctx)
}
return ocispec.Descriptor{}, nil
}
func (i *fakeImage) IsUnpacked(ctx context.Context, name string) (bool, error) {
if i.isUnpackedFunc != nil {
return i.isUnpackedFunc(ctx, name)
}
return false, nil
}
func (i *fakeImage) ContentStore() content.Store {
if i.contentStoreFunc != nil {
return i.contentStoreFunc()
}
return nil
}
func (t *fakeTask) ID() string {
if t.idFunc != nil {
return t.idFunc()
}
return ""
}
func (t *fakeTask) Pid() uint32 {
if t.pidFunc != nil {
return t.pidFunc()
}
return 0
}
func (t *fakeTask) Start(ctx context.Context) error {
if t.startFunc != nil {
return t.startFunc(ctx)
}
return nil
}
func (t *fakeTask) Delete(ctx context.Context, opts ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
if t.deleteFunc != nil {
return t.deleteFunc(ctx, opts...)
}
return nil, nil
}
func (t *fakeTask) Kill(ctx context.Context, signal syscall.Signal, opts ...containerd.KillOpts) error {
if t.killFunc != nil {
return t.killFunc(ctx, signal, opts...)
}
return nil
}
func (t *fakeTask) Wait(ctx context.Context) (<-chan containerd.ExitStatus, error) {
if t.waitFunc != nil {
return t.waitFunc(ctx)
}
return nil, nil
}
func (t *fakeTask) CloseIO(ctx context.Context, opts ...containerd.IOCloserOpts) error {
if t.closeIOFunc != nil {
return t.closeIOFunc(ctx, opts...)
}
return nil
}
func (t *fakeTask) Resize(ctx context.Context, w, h uint32) error {
if t.resizeFunc != nil {
return t.resizeFunc(ctx, w, h)
}
return nil
}
func (t *fakeTask) IO() cio.IO {
if t.ioFunc != nil {
return t.ioFunc()
}
return nil
}
func (t *fakeTask) Status(ctx context.Context) (containerd.Status, error) {
if t.statusFunc != nil {
return t.statusFunc(ctx)
}
return containerd.Status{}, nil
}
func (t *fakeTask) Pause(ctx context.Context) error {
if t.pauseFunc != nil {
return t.pauseFunc(ctx)
}
return nil
}
func (t *fakeTask) Resume(ctx context.Context) error {
if t.resumeFunc != nil {
return t.resumeFunc(ctx)
}
return nil
}
func (t *fakeTask) Exec(ctx context.Context, cmd string, proc *specs.Process, ioc cio.Creator) (containerd.Process, error) {
if t.execFunc != nil {
return t.execFunc(ctx, cmd, proc, ioc)
}
return nil, nil
}
func (t *fakeTask) Pids(ctx context.Context) ([]containerd.ProcessInfo, error) {
if t.pidsFunc != nil {
return t.pidsFunc(ctx)
}
return nil, nil
}
func (t *fakeTask) Checkpoint(ctx context.Context, opts ...containerd.CheckpointTaskOpts) (containerd.Image, error) {
if t.checkpointFunc != nil {
return t.checkpointFunc(ctx, opts...)
}
return nil, nil
}
func (t *fakeTask) Update(ctx context.Context, opts ...containerd.UpdateTaskOpts) error {
if t.updateFunc != nil {
return t.updateFunc(ctx, opts...)
}
return nil
}
func (t *fakeTask) LoadProcess(ctx context.Context, name string, attach cio.Attach) (containerd.Process, error) {
if t.loadProcessFunc != nil {
return t.loadProcessFunc(ctx, name, attach)
}
return nil, nil
}
func (t *fakeTask) Metrics(ctx context.Context) (*containerdtypes.Metric, error) {
if t.metricsFunc != nil {
return t.metricsFunc(ctx)
}
return nil, nil
}
func (o *testOutStream) FD() uintptr {
return 0
}
func (o *testOutStream) IsTerminal() bool {
return false
}

View File

@ -0,0 +1,77 @@
package containerizedengine
import (
"context"
"io"
"github.com/containerd/containerd"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// NewClient returns a new containerizedengine client
// This client can be used to manage the lifecycle of
// dockerd running as a container on containerd.
func NewClient(sockPath string) (Client, error) {
if sockPath == "" {
sockPath = containerdSockPath
}
cclient, err := containerd.New(sockPath)
if err != nil {
return nil, err
}
return baseClient{
cclient: cclient,
}, nil
}
// Close will close the underlying clients
func (c baseClient) Close() error {
return c.cclient.Close()
}
func (c baseClient) pullWithAuth(ctx context.Context, imageName string, out OutStream,
authConfig *types.AuthConfig) (containerd.Image, error) {
resolver := docker.NewResolver(docker.ResolverOptions{
Credentials: func(string) (string, string, error) {
return authConfig.Username, authConfig.Password, nil
},
})
ongoing := newJobs(imageName)
pctx, stopProgress := context.WithCancel(ctx)
progress := make(chan struct{})
bufin, bufout := io.Pipe()
go func() {
showProgress(pctx, ongoing, c.cclient.ContentStore(), bufout)
}()
go func() {
jsonmessage.DisplayJSONMessagesToStream(bufin, out, nil)
close(progress)
}()
h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if desc.MediaType != images.MediaTypeDockerSchema1Manifest {
ongoing.add(desc)
}
return nil, nil
})
image, err := c.cclient.Pull(ctx, imageName,
containerd.WithResolver(resolver),
containerd.WithImageHandler(h),
containerd.WithPullUnpack)
stopProgress()
if err != nil {
return nil, err
}
<-progress
return image, nil
}

View File

@ -0,0 +1,43 @@
package containerizedengine
import (
"context"
"fmt"
"testing"
"github.com/containerd/containerd"
"github.com/docker/docker/api/types"
"gotest.tools/assert"
)
func TestPullWithAuthPullFail(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, fmt.Errorf("pull failure")
},
},
}
imageName := "testnamegoeshere"
_, err := client.pullWithAuth(ctx, imageName, &testOutStream{}, &types.AuthConfig{})
assert.ErrorContains(t, err, "pull failure")
}
func TestPullWithAuthPullPass(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, nil
},
},
}
imageName := "testnamegoeshere"
_, err := client.pullWithAuth(ctx, imageName, &testOutStream{}, &types.AuthConfig{})
assert.NilError(t, err)
}

View File

@ -0,0 +1,261 @@
package containerizedengine
import (
"context"
"fmt"
"io"
"strings"
"syscall"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/runtime/restart"
"github.com/docker/cli/internal/pkg/containerized"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
)
// InitEngine is the main entrypoint for `docker engine init`
func (c baseClient) InitEngine(ctx context.Context, opts EngineInitOptions, out OutStream,
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
// Verify engine isn't already running
_, err := c.GetEngine(ctx)
if err == nil {
return ErrEngineAlreadyPresent
} else if err != ErrEngineNotPresent {
return err
}
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
// Look for desired image
_, err = c.cclient.GetImage(ctx, imageName)
if err != nil {
if errdefs.IsNotFound(err) {
_, err = c.pullWithAuth(ctx, imageName, out, authConfig)
if err != nil {
return errors.Wrapf(err, "unable to pull image %s", imageName)
}
} else {
return errors.Wrapf(err, "unable to check for image %s", imageName)
}
}
// Spin up the engine
err = c.startEngineOnContainerd(ctx, imageName, opts.ConfigFile)
if err != nil {
return errors.Wrap(err, "failed to create docker daemon")
}
// Wait for the daemon to start, verify it's responsive
fmt.Fprintf(out, "Waiting for engine to start... ")
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
defer cancel()
if err := c.waitForEngine(ctx, out, healthfn); err != nil {
// TODO once we have the logging strategy sorted out
// this should likely gather the last few lines of logs to report
// why the daemon failed to initialize
return errors.Wrap(err, "failed to start docker daemon")
}
fmt.Fprintf(out, "Success! The docker engine is now running.\n")
return nil
}
// GetEngine will return the containerd container running the engine (or error)
func (c baseClient) GetEngine(ctx context.Context) (containerd.Container, error) {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
containers, err := c.cclient.Containers(ctx, "id=="+engineContainerName)
if err != nil {
return nil, err
}
if len(containers) == 0 {
return nil, ErrEngineNotPresent
}
return containers[0], nil
}
// getEngineImage will return the current image used by the engine
func (c baseClient) getEngineImage(engine containerd.Container) (string, error) {
ctx := namespaces.WithNamespace(context.Background(), engineNamespace)
image, err := engine.Image(ctx)
if err != nil {
return "", err
}
return image.Name(), nil
}
// getEngineConfigFilePath will extract the config file location from the engine flags
func (c baseClient) getEngineConfigFilePath(ctx context.Context, engine containerd.Container) (string, error) {
spec, err := engine.Spec(ctx)
configFile := ""
if err != nil {
return configFile, err
}
for i := 0; i < len(spec.Process.Args); i++ {
arg := spec.Process.Args[i]
if strings.HasPrefix(arg, "--config-file") {
if strings.Contains(arg, "=") {
split := strings.SplitN(arg, "=", 2)
configFile = split[1]
} else {
if i+1 >= len(spec.Process.Args) {
return configFile, ErrMalformedConfigFileParam
}
configFile = spec.Process.Args[i+1]
}
}
}
if configFile == "" {
// TODO - any more diagnostics to offer?
return configFile, ErrEngineConfigLookupFailure
}
return configFile, nil
}
var (
engineWaitInterval = 500 * time.Millisecond
engineWaitTimeout = 60 * time.Second
)
// waitForEngine will wait for the engine to start
func (c baseClient) waitForEngine(ctx context.Context, out io.Writer, healthfn func(context.Context) error) error {
ticker := time.NewTicker(engineWaitInterval)
defer ticker.Stop()
defer func() {
fmt.Fprintf(out, "\n")
}()
err := c.waitForEngineContainer(ctx, ticker)
if err != nil {
return err
}
fmt.Fprintf(out, "waiting for engine to be responsive... ")
for {
select {
case <-ticker.C:
err = healthfn(ctx)
if err == nil {
fmt.Fprintf(out, "engine is online.")
return nil
}
case <-ctx.Done():
return errors.Wrap(err, "timeout waiting for engine to be responsive")
}
}
}
func (c baseClient) waitForEngineContainer(ctx context.Context, ticker *time.Ticker) error {
var ret error
for {
select {
case <-ticker.C:
engine, err := c.GetEngine(ctx)
if engine != nil {
return nil
}
ret = err
case <-ctx.Done():
return errors.Wrap(ret, "timeout waiting for engine to be responsive")
}
}
}
// RemoveEngine gracefully unwinds the current engine
func (c baseClient) RemoveEngine(ctx context.Context, engine containerd.Container) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
// Make sure the container isn't being restarted while we unwind it
stopLabel := map[string]string{}
stopLabel[restart.StatusLabel] = string(containerd.Stopped)
engine.SetLabels(ctx, stopLabel)
// Wind down the existing engine
task, err := engine.Task(ctx, nil)
if err != nil {
if !errdefs.IsNotFound(err) {
return err
}
} else {
status, err := task.Status(ctx)
if err != nil {
return err
}
if status.Status == containerd.Running {
// It's running, so kill it
err := task.Kill(ctx, syscall.SIGTERM, []containerd.KillOpts{}...)
if err != nil {
return errors.Wrap(err, "task kill error")
}
ch, err := task.Wait(ctx)
if err != nil {
return err
}
timeout := time.NewTimer(engineWaitTimeout)
select {
case <-timeout.C:
// TODO - consider a force flag in the future to allow a more aggressive
// kill of the engine via
// task.Kill(ctx, syscall.SIGKILL, containerd.WithKillAll)
return ErrEngineShutdownTimeout
case <-ch:
}
}
if _, err := task.Delete(ctx); err != nil {
return err
}
}
deleteOpts := []containerd.DeleteOpts{containerd.WithSnapshotCleanup}
err = engine.Delete(ctx, deleteOpts...)
if err != nil && errdefs.IsNotFound(err) {
return nil
}
return errors.Wrap(err, "failed to remove existing engine container")
}
// startEngineOnContainerd creates a new docker engine running on containerd
func (c baseClient) startEngineOnContainerd(ctx context.Context, imageName, configFile string) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
image, err := c.cclient.GetImage(ctx, imageName)
if err != nil {
if errdefs.IsNotFound(err) {
return fmt.Errorf("engine image missing: %s", imageName)
}
return errors.Wrap(err, "failed to check for engine image")
}
// Make sure we have a valid config file
err = c.verifyDockerConfig(configFile)
if err != nil {
return err
}
engineSpec.Process.Args = append(engineSpec.Process.Args,
"--config-file", configFile,
)
cOpts := []containerd.NewContainerOpts{
containerized.WithNewSnapshot(image),
restart.WithStatus(containerd.Running),
restart.WithLogPath("/var/log/engine.log"), // TODO - better!
genSpec(),
containerd.WithRuntime("io.containerd.runtime.process.v1", nil),
}
_, err = c.cclient.NewContainer(
ctx,
engineContainerName,
cOpts...,
)
if err != nil {
return errors.Wrap(err, "failed to create engine container")
}
return nil
}

View File

@ -0,0 +1,537 @@
package containerizedengine
import (
"context"
"fmt"
"syscall"
"testing"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/oci"
"github.com/docker/docker/api/types"
"github.com/opencontainers/runtime-spec/specs-go"
"gotest.tools/assert"
)
func healthfnHappy(ctx context.Context) error {
return nil
}
func healthfnError(ctx context.Context) error {
return fmt.Errorf("ping failure")
}
func TestInitGetEngineFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: CommunityEngineImage,
}
container := &fakeContainer{}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.Assert(t, err == ErrEngineAlreadyPresent)
}
func TestInitCheckImageFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: CommunityEngineImage,
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, fmt.Errorf("something went wrong")
},
},
}
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to check for image")
assert.ErrorContains(t, err, "something went wrong")
}
func TestInitPullFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: CommunityEngineImage,
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, errdefs.ErrNotFound
},
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, fmt.Errorf("pull failure")
},
},
}
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to pull image")
assert.ErrorContains(t, err, "pull failure")
}
func TestInitStartFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: CommunityEngineImage,
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, errdefs.ErrNotFound
},
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, nil
},
},
}
err := client.InitEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "failed to create docker daemon")
}
func TestGetEngineFail(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return nil, fmt.Errorf("container failure")
},
},
}
_, err := client.GetEngine(ctx)
assert.ErrorContains(t, err, "failure")
}
func TestGetEngineNotPresent(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
_, err := client.GetEngine(ctx)
assert.Assert(t, err == ErrEngineNotPresent)
}
func TestGetEngineFound(t *testing.T) {
ctx := context.Background()
container := &fakeContainer{}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
c, err := client.GetEngine(ctx)
assert.NilError(t, err)
assert.Equal(t, c, container)
}
func TestGetEngineImageFail(t *testing.T) {
client := baseClient{}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return nil, fmt.Errorf("failure")
},
}
_, err := client.getEngineImage(container)
assert.ErrorContains(t, err, "failure")
}
func TestGetEngineImagePass(t *testing.T) {
client := baseClient{}
image := &fakeImage{
nameFunc: func() string {
return "imagenamehere"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
name, err := client.getEngineImage(container)
assert.NilError(t, err)
assert.Equal(t, name, "imagenamehere")
}
func TestWaitForEngineNeverShowsUp(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
engineWaitInterval = 1 * time.Millisecond
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
err := client.waitForEngine(ctx, &testOutStream{}, healthfnError)
assert.ErrorContains(t, err, "timeout waiting")
}
func TestWaitForEnginePingFail(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
engineWaitInterval = 1 * time.Millisecond
container := &fakeContainer{}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
err := client.waitForEngine(ctx, &testOutStream{}, healthfnError)
assert.ErrorContains(t, err, "ping fail")
}
func TestWaitForEngineHealthy(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
engineWaitInterval = 1 * time.Millisecond
container := &fakeContainer{}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
err := client.waitForEngine(ctx, &testOutStream{}, healthfnHappy)
assert.NilError(t, err)
}
func TestRemoveEngineBadTaskBadDelete(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
deleteFunc: func(context.Context, ...containerd.DeleteOpts) error {
return fmt.Errorf("delete failure")
},
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return nil, errdefs.ErrNotFound
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "failed to remove existing engine")
assert.ErrorContains(t, err, "delete failure")
}
func TestRemoveEngineTaskNoStatus(t *testing.T) {
ctx := context.Background()
client := baseClient{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{}, fmt.Errorf("task status failure")
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "task status failure")
}
func TestRemoveEngineTaskNotRunningDeleteFail(t *testing.T) {
ctx := context.Background()
client := baseClient{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Unknown}, nil
},
deleteFunc: func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
return nil, fmt.Errorf("task delete failure")
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "task delete failure")
}
func TestRemoveEngineTaskRunningKillFail(t *testing.T) {
ctx := context.Background()
client := baseClient{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
killFunc: func(context.Context, syscall.Signal, ...containerd.KillOpts) error {
return fmt.Errorf("task kill failure")
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "task kill failure")
}
func TestRemoveEngineTaskRunningWaitFail(t *testing.T) {
ctx := context.Background()
client := baseClient{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
return nil, fmt.Errorf("task wait failure")
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.ErrorContains(t, err, "task wait failure")
}
func TestRemoveEngineTaskRunningHappyPath(t *testing.T) {
ctx := context.Background()
client := baseClient{}
ch := make(chan containerd.ExitStatus, 1)
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
ch <- containerd.ExitStatus{}
return ch, nil
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.NilError(t, err)
}
func TestRemoveEngineTaskKillTimeout(t *testing.T) {
ctx := context.Background()
ch := make(chan containerd.ExitStatus, 1)
client := baseClient{}
engineWaitTimeout = 10 * time.Millisecond
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
//ch <- containerd.ExitStatus{} // let it timeout
return ch, nil
},
}
container := &fakeContainer{
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return task, nil
},
}
err := client.RemoveEngine(ctx, container)
assert.Assert(t, err == ErrEngineShutdownTimeout)
}
func TestStartEngineOnContainerdImageErr(t *testing.T) {
ctx := context.Background()
imageName := "testnamegoeshere"
configFile := "/tmp/configfilegoeshere"
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, fmt.Errorf("some image lookup failure")
},
},
}
err := client.startEngineOnContainerd(ctx, imageName, configFile)
assert.ErrorContains(t, err, "some image lookup failure")
}
func TestStartEngineOnContainerdImageNotFound(t *testing.T) {
ctx := context.Background()
imageName := "testnamegoeshere"
configFile := "/tmp/configfilegoeshere"
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, errdefs.ErrNotFound
},
},
}
err := client.startEngineOnContainerd(ctx, imageName, configFile)
assert.ErrorContains(t, err, "engine image missing")
}
func TestStartEngineOnContainerdHappy(t *testing.T) {
ctx := context.Background()
imageName := "testnamegoeshere"
configFile := "/tmp/configfilegoeshere"
ch := make(chan containerd.ExitStatus, 1)
streams := cio.Streams{}
task := &fakeTask{
statusFunc: func(context.Context) (containerd.Status, error) {
return containerd.Status{Status: containerd.Running}, nil
},
waitFunc: func(context.Context) (<-chan containerd.ExitStatus, error) {
ch <- containerd.ExitStatus{}
return ch, nil
},
}
container := &fakeContainer{
newTaskFunc: func(ctx context.Context, creator cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
if streams.Stdout != nil {
streams.Stdout.Write([]byte("{}"))
}
return task, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, nil
},
newContainerFunc: func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
return container, nil
},
},
}
err := client.startEngineOnContainerd(ctx, imageName, configFile)
assert.NilError(t, err)
}
func TestGetEngineConfigFilePathBadSpec(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
specFunc: func(context.Context) (*oci.Spec, error) {
return nil, fmt.Errorf("spec error")
},
}
_, err := client.getEngineConfigFilePath(ctx, container)
assert.ErrorContains(t, err, "spec error")
}
func TestGetEngineConfigFilePathDistinct(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
specFunc: func(context.Context) (*oci.Spec, error) {
return &oci.Spec{
Process: &specs.Process{
Args: []string{
"--another-flag",
"foo",
"--config-file",
"configpath",
},
},
}, nil
},
}
configFile, err := client.getEngineConfigFilePath(ctx, container)
assert.NilError(t, err)
assert.Assert(t, err, configFile == "configpath")
}
func TestGetEngineConfigFilePathEquals(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
specFunc: func(context.Context) (*oci.Spec, error) {
return &oci.Spec{
Process: &specs.Process{
Args: []string{
"--another-flag=foo",
"--config-file=configpath",
},
},
}, nil
},
}
configFile, err := client.getEngineConfigFilePath(ctx, container)
assert.NilError(t, err)
assert.Assert(t, err, configFile == "configpath")
}
func TestGetEngineConfigFilePathMalformed1(t *testing.T) {
ctx := context.Background()
client := baseClient{}
container := &fakeContainer{
specFunc: func(context.Context) (*oci.Spec, error) {
return &oci.Spec{
Process: &specs.Process{
Args: []string{
"--another-flag",
"--config-file",
},
},
}, nil
},
}
_, err := client.getEngineConfigFilePath(ctx, container)
assert.Assert(t, err == ErrMalformedConfigFileParam)
}

View File

@ -0,0 +1,16 @@
// +build !windows
package containerizedengine
import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/oci"
"github.com/docker/cli/internal/pkg/containerized"
)
func genSpec() containerd.NewContainerOpts {
return containerd.WithSpec(&engineSpec,
containerized.WithAllCapabilities,
oci.WithParentCgroupDevices,
)
}

View File

@ -0,0 +1,14 @@
// +build windows
package containerizedengine
import (
"github.com/containerd/containerd"
"github.com/docker/cli/internal/pkg/containerized"
)
func genSpec() containerd.NewContainerOpts {
return containerd.WithSpec(&engineSpec,
containerized.WithAllCapabilities,
)
}

View File

@ -0,0 +1,35 @@
package containerizedengine
import (
"os"
"path"
)
func (c baseClient) verifyDockerConfig(configFile string) error {
// TODO - in the future consider leveraging containerd and a host runtime
// to create the file. For now, just create it locally since we have to be
// local to talk to containerd
configDir := path.Dir(configFile)
err := os.MkdirAll(configDir, 0644)
if err != nil {
return err
}
fd, err := os.OpenFile(configFile, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return err
}
defer fd.Close()
info, err := fd.Stat()
if err != nil {
return err
}
if info.Size() == 0 {
_, err := fd.Write([]byte("{}"))
return err
}
return nil
}

View File

@ -0,0 +1,215 @@
package containerizedengine
import (
"context"
"encoding/json"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
"github.com/docker/docker/pkg/jsonmessage"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, out io.WriteCloser) {
var (
ticker = time.NewTicker(100 * time.Millisecond)
start = time.Now()
enc = json.NewEncoder(out)
statuses = map[string]statusInfo{}
done bool
)
defer ticker.Stop()
outer:
for {
select {
case <-ticker.C:
resolved := "resolved"
if !ongoing.isResolved() {
resolved = "resolving"
}
statuses[ongoing.name] = statusInfo{
Ref: ongoing.name,
Status: resolved,
}
keys := []string{ongoing.name}
activeSeen := map[string]struct{}{}
if !done {
active, err := cs.ListStatuses(ctx, "")
if err != nil {
logrus.Debugf("active check failed: %s", err)
continue
}
// update status of active entries!
for _, active := range active {
statuses[active.Ref] = statusInfo{
Ref: active.Ref,
Status: "downloading",
Offset: active.Offset,
Total: active.Total,
StartedAt: active.StartedAt,
UpdatedAt: active.UpdatedAt,
}
activeSeen[active.Ref] = struct{}{}
}
}
err := updateNonActive(ctx, ongoing, cs, statuses, keys, activeSeen, &done, start)
if err != nil {
continue outer
}
var ordered []statusInfo
for _, key := range keys {
ordered = append(ordered, statuses[key])
}
for _, si := range ordered {
jm := si.JSONMessage()
err := enc.Encode(jm)
if err != nil {
logrus.Debugf("failed to encode progress message: %s", err)
}
}
if done {
out.Close()
return
}
case <-ctx.Done():
done = true // allow ui to update once more
}
}
}
func updateNonActive(ctx context.Context, ongoing *jobs, cs content.Store, statuses map[string]statusInfo, keys []string, activeSeen map[string]struct{}, done *bool, start time.Time) error {
for _, j := range ongoing.jobs() {
key := remotes.MakeRefKey(ctx, j)
keys = append(keys, key)
if _, ok := activeSeen[key]; ok {
continue
}
status, ok := statuses[key]
if !*done && (!ok || status.Status == "downloading") {
info, err := cs.Info(ctx, j.Digest)
if err != nil {
if !errdefs.IsNotFound(err) {
logrus.Debugf("failed to get content info: %s", err)
return err
}
statuses[key] = statusInfo{
Ref: key,
Status: "waiting",
}
} else if info.CreatedAt.After(start) {
statuses[key] = statusInfo{
Ref: key,
Status: "done",
Offset: info.Size,
Total: info.Size,
UpdatedAt: info.CreatedAt,
}
} else {
statuses[key] = statusInfo{
Ref: key,
Status: "exists",
}
}
} else if *done {
if ok {
if status.Status != "done" && status.Status != "exists" {
status.Status = "done"
statuses[key] = status
}
} else {
statuses[key] = statusInfo{
Ref: key,
Status: "done",
}
}
}
}
return nil
}
type jobs struct {
name string
added map[digest.Digest]struct{}
descs []ocispec.Descriptor
mu sync.Mutex
resolved bool
}
func newJobs(name string) *jobs {
return &jobs{
name: name,
added: map[digest.Digest]struct{}{},
}
}
func (j *jobs) add(desc ocispec.Descriptor) {
j.mu.Lock()
defer j.mu.Unlock()
j.resolved = true
if _, ok := j.added[desc.Digest]; ok {
return
}
j.descs = append(j.descs, desc)
j.added[desc.Digest] = struct{}{}
}
func (j *jobs) jobs() []ocispec.Descriptor {
j.mu.Lock()
defer j.mu.Unlock()
var descs []ocispec.Descriptor
return append(descs, j.descs...)
}
func (j *jobs) isResolved() bool {
j.mu.Lock()
defer j.mu.Unlock()
return j.resolved
}
// statusInfo holds the status info for an upload or download
type statusInfo struct {
Ref string
Status string
Offset int64
Total int64
StartedAt time.Time
UpdatedAt time.Time
}
func (s statusInfo) JSONMessage() jsonmessage.JSONMessage {
// Shorten the ID to use up less width on the display
id := s.Ref
if strings.Contains(id, ":") {
split := strings.SplitN(id, ":", 2)
id = split[1]
}
id = fmt.Sprintf("%.12s", id)
return jsonmessage.JSONMessage{
ID: id,
Status: s.Status,
Progress: &jsonmessage.JSONProgress{
Current: s.Offset,
Total: s.Total,
},
}
}

View File

@ -0,0 +1,12 @@
// +build !windows
package containerizedengine
import (
"golang.org/x/sys/unix"
)
var (
// SIGKILL maps to unix.SIGKILL
SIGKILL = unix.SIGKILL
)

View File

@ -0,0 +1,12 @@
// +build windows
package containerizedengine
import (
"syscall"
)
var (
// SIGKILL all signals are ignored by containerd kill windows
SIGKILL = syscall.Signal(0)
)

View File

@ -0,0 +1,159 @@
package containerizedengine
import (
"context"
"errors"
"io"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/docker/api/types"
ver "github.com/hashicorp/go-version"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
// CommunityEngineImage is the repo name for the community engine
CommunityEngineImage = "engine-community"
// EnterpriseEngineImage is the repo name for the enterprise engine
EnterpriseEngineImage = "engine-enterprise"
containerdSockPath = "/run/containerd/containerd.sock"
engineContainerName = "dockerd"
engineNamespace = "docker"
// Used to signal the containerd-proxy if it should manage
proxyLabel = "com.docker/containerd-proxy.scope"
)
var (
// ErrEngineAlreadyPresent returned when engine already present and should not be
ErrEngineAlreadyPresent = errors.New("engine already present, use the update command to change versions")
// ErrEngineNotPresent returned when the engine is not present and should be
ErrEngineNotPresent = errors.New("engine not present")
// ErrMalformedConfigFileParam returned if the engine config file parameter is malformed
ErrMalformedConfigFileParam = errors.New("malformed --config-file param on engine")
// ErrEngineConfigLookupFailure returned if unable to lookup existing engine configuration
ErrEngineConfigLookupFailure = errors.New("unable to lookup existing engine configuration")
// ErrEngineShutdownTimeout returned if the engine failed to shutdown in time
ErrEngineShutdownTimeout = errors.New("timeout waiting for engine to exit")
// ErrEngineImageMissingTag returned if the engine image is missing the version tag
ErrEngineImageMissingTag = errors.New("malformed engine image missing tag")
engineSpec = specs.Spec{
Root: &specs.Root{
Path: "rootfs",
},
Process: &specs.Process{
Cwd: "/",
Args: []string{
// In general, configuration should be driven by the config file, not these flags
// TODO - consider moving more of these to the config file, and make sure the defaults are set if not present.
"/sbin/dockerd",
"-s",
"overlay2",
"--containerd",
"/run/containerd/containerd.sock",
"--default-runtime",
"containerd",
"--add-runtime",
"containerd=runc",
},
User: specs.User{
UID: 0,
GID: 0,
},
Env: []string{
"PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
},
NoNewPrivileges: false,
},
}
)
// Client can be used to manage the lifecycle of
// dockerd running as a container on containerd.
type Client interface {
Close() error
ActivateEngine(ctx context.Context,
opts EngineInitOptions,
out OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
InitEngine(ctx context.Context,
opts EngineInitOptions,
out OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
DoUpdate(ctx context.Context,
opts EngineInitOptions,
out OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, currentVersion, imageName string) (AvailableVersions, error)
GetEngine(ctx context.Context) (containerd.Container, error)
RemoveEngine(ctx context.Context, engine containerd.Container) error
GetCurrentEngineVersion(ctx context.Context) (EngineInitOptions, error)
}
type baseClient struct {
cclient containerdClient
}
// EngineInitOptions contains the configuration settings
// use during initialization of a containerized docker engine
type EngineInitOptions struct {
RegistryPrefix string
EngineImage string
EngineVersion string
ConfigFile string
scope string
}
// containerdClient abstracts the containerd client to aid in testability
type containerdClient interface {
Containers(ctx context.Context, filters ...string) ([]containerd.Container, error)
NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
GetImage(ctx context.Context, ref string) (containerd.Image, error)
Close() error
ContentStore() content.Store
ContainerService() containers.Store
}
// AvailableVersions groups the available versions which were discovered
type AvailableVersions struct {
Downgrades []DockerVersion
Patches []DockerVersion
Upgrades []DockerVersion
}
// DockerVersion wraps a semantic version to retain the original tag
// since the docker date based versions don't strictly follow semantic
// versioning (leading zeros, etc.)
type DockerVersion struct {
ver.Version
Tag string
}
// Update stores available updates for rendering in a table
type Update struct {
Type string
Version string
Notes string
}
// OutStream is an output stream used to write normal program output.
type OutStream interface {
io.Writer
FD() uintptr
IsTerminal() bool
}

View File

@ -0,0 +1,130 @@
package containerizedengine
import (
"context"
"fmt"
"path"
"strings"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/namespaces"
"github.com/docker/cli/internal/pkg/containerized"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
)
// GetCurrentEngineVersion determines the current type of engine (image) and version
func (c baseClient) GetCurrentEngineVersion(ctx context.Context) (EngineInitOptions, error) {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
ret := EngineInitOptions{}
currentEngine := CommunityEngineImage
engine, err := c.GetEngine(ctx)
if err != nil {
if err == ErrEngineNotPresent {
return ret, errors.Wrap(err, "failed to find existing engine")
}
return ret, err
}
imageName, err := c.getEngineImage(engine)
if err != nil {
return ret, err
}
distributionRef, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return ret, errors.Wrapf(err, "failed to parse image name: %s", imageName)
}
if strings.Contains(distributionRef.Name(), EnterpriseEngineImage) {
currentEngine = EnterpriseEngineImage
}
taggedRef, ok := distributionRef.(reference.NamedTagged)
if !ok {
return ret, ErrEngineImageMissingTag
}
ret.EngineImage = currentEngine
ret.EngineVersion = taggedRef.Tag()
ret.RegistryPrefix = reference.Domain(taggedRef) + "/" + path.Dir(reference.Path(taggedRef))
return ret, nil
}
// ActivateEngine will switch the image from the CE to EE image
func (c baseClient) ActivateEngine(ctx context.Context, opts EngineInitOptions, out OutStream,
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
// set the proxy scope to "ee" for activate flows
opts.scope = "ee"
ctx = namespaces.WithNamespace(ctx, engineNamespace)
// If version is unspecified, use the existing engine version
if opts.EngineVersion == "" {
currentOpts, err := c.GetCurrentEngineVersion(ctx)
if err != nil {
return err
}
opts.EngineVersion = currentOpts.EngineVersion
if currentOpts.EngineImage == EnterpriseEngineImage {
// This is a "no-op" activation so the only change would be the license - don't update the engine itself
return nil
}
}
return c.DoUpdate(ctx, opts, out, authConfig, healthfn)
}
// DoUpdate performs the underlying engine update
func (c baseClient) DoUpdate(ctx context.Context, opts EngineInitOptions, out OutStream,
authConfig *types.AuthConfig, healthfn func(context.Context) error) error {
ctx = namespaces.WithNamespace(ctx, engineNamespace)
if opts.EngineVersion == "" {
// TODO - Future enhancement: This could be improved to be
// smart about figuring out the latest patch rev for the
// current engine version and automatically apply it so users
// could stay in sync by simply having a scheduled
// `docker engine update`
return fmt.Errorf("please pick the version you want to update to")
}
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
// Look for desired image
image, err := c.cclient.GetImage(ctx, imageName)
if err != nil {
if errdefs.IsNotFound(err) {
image, err = c.pullWithAuth(ctx, imageName, out, authConfig)
if err != nil {
return errors.Wrapf(err, "unable to pull image %s", imageName)
}
} else {
return errors.Wrapf(err, "unable to check for image %s", imageName)
}
}
// Gather information about the existing engine so we can recreate it
engine, err := c.GetEngine(ctx)
if err != nil {
if err == ErrEngineNotPresent {
return errors.Wrap(err, "unable to find existing engine - please use init")
}
return err
}
// TODO verify the image has changed and don't update if nothing has changed
err = containerized.AtomicImageUpdate(ctx, engine, image, func() error {
ctx, cancel := context.WithTimeout(ctx, engineWaitTimeout)
defer cancel()
return c.waitForEngine(ctx, out, healthfn)
})
if err == nil && opts.scope != "" {
var labels map[string]string
labels, err = engine.Labels(ctx)
if err != nil {
return err
}
labels[proxyLabel] = opts.scope
_, err = engine.SetLabels(ctx, labels)
}
return err
}

View File

@ -0,0 +1,318 @@
package containerizedengine
import (
"context"
"fmt"
"testing"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/errdefs"
"github.com/docker/docker/api/types"
"gotest.tools/assert"
)
func TestGetCurrentEngineVersionHappy(t *testing.T) {
ctx := context.Background()
image := &fakeImage{
nameFunc: func() string {
return "acme.com/dockermirror/" + CommunityEngineImage + ":engineversion"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
opts, err := client.GetCurrentEngineVersion(ctx)
assert.NilError(t, err)
assert.Equal(t, opts.EngineImage, CommunityEngineImage)
assert.Equal(t, opts.RegistryPrefix, "acme.com/dockermirror")
assert.Equal(t, opts.EngineVersion, "engineversion")
}
func TestGetCurrentEngineVersionEnterpriseHappy(t *testing.T) {
ctx := context.Background()
image := &fakeImage{
nameFunc: func() string {
return "docker.io/docker/" + EnterpriseEngineImage + ":engineversion"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
opts, err := client.GetCurrentEngineVersion(ctx)
assert.NilError(t, err)
assert.Equal(t, opts.EngineImage, EnterpriseEngineImage)
assert.Equal(t, opts.EngineVersion, "engineversion")
assert.Equal(t, opts.RegistryPrefix, "docker.io/docker")
}
func TestGetCurrentEngineVersionNoEngine(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
_, err := client.GetCurrentEngineVersion(ctx)
assert.ErrorContains(t, err, "failed to find existing engine")
}
func TestGetCurrentEngineVersionMiscEngineError(t *testing.T) {
ctx := context.Background()
expectedError := fmt.Errorf("some container lookup error")
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return nil, expectedError
},
},
}
_, err := client.GetCurrentEngineVersion(ctx)
assert.Assert(t, err == expectedError)
}
func TestGetCurrentEngineVersionImageFailure(t *testing.T) {
ctx := context.Background()
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return nil, fmt.Errorf("container image failure")
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
_, err := client.GetCurrentEngineVersion(ctx)
assert.ErrorContains(t, err, "container image failure")
}
func TestGetCurrentEngineVersionMalformed(t *testing.T) {
ctx := context.Background()
image := &fakeImage{
nameFunc: func() string {
return "imagename"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
_, err := client.GetCurrentEngineVersion(ctx)
assert.Assert(t, err == ErrEngineImageMissingTag)
}
func TestActivateNoEngine(t *testing.T) {
ctx := context.Background()
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: EnterpriseEngineImage,
}
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to find")
}
func TestActivateNoChange(t *testing.T) {
ctx := context.Background()
registryPrefix := "registryprefixgoeshere"
image := &fakeImage{
nameFunc: func() string {
return registryPrefix + "/" + EnterpriseEngineImage + ":engineversion"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
return nil, errdefs.ErrNotFound
},
labelsFunc: func(context.Context) (map[string]string, error) {
return map[string]string{}, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
},
}
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: EnterpriseEngineImage,
}
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.NilError(t, err)
}
func TestActivateDoUpdateFail(t *testing.T) {
ctx := context.Background()
registryPrefix := "registryprefixgoeshere"
image := &fakeImage{
nameFunc: func() string {
return registryPrefix + "/ce-engine:engineversion"
},
}
container := &fakeContainer{
imageFunc: func(context.Context) (containerd.Image, error) {
return image, nil
},
}
client := baseClient{
cclient: &fakeContainerdClient{
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{container}, nil
},
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, fmt.Errorf("something went wrong")
},
},
}
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: EnterpriseEngineImage,
}
err := client.ActivateEngine(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "check for image")
assert.ErrorContains(t, err, "something went wrong")
}
func TestDoUpdateNoVersion(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: EnterpriseEngineImage,
}
client := baseClient{}
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "please pick the version you")
}
func TestDoUpdateImageMiscError(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: "testnamegoeshere",
}
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, fmt.Errorf("something went wrong")
},
},
}
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "check for image")
assert.ErrorContains(t, err, "something went wrong")
}
func TestDoUpdatePullFail(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: "testnamegoeshere",
}
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return nil, errdefs.ErrNotFound
},
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
return nil, fmt.Errorf("pull failure")
},
},
}
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to pull")
assert.ErrorContains(t, err, "pull failure")
}
func TestDoUpdateEngineMissing(t *testing.T) {
ctx := context.Background()
opts := EngineInitOptions{
EngineVersion: "engineversiongoeshere",
RegistryPrefix: "registryprefixgoeshere",
ConfigFile: "/tmp/configfilegoeshere",
EngineImage: "testnamegoeshere",
}
image := &fakeImage{
nameFunc: func() string {
return "imagenamehere"
},
}
client := baseClient{
cclient: &fakeContainerdClient{
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
return image, nil
},
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
return []containerd.Container{}, nil
},
},
}
err := client.DoUpdate(ctx, opts, &testOutStream{}, &types.AuthConfig{}, healthfnHappy)
assert.ErrorContains(t, err, "unable to find existing engine")
}

View File

@ -0,0 +1,72 @@
package containerizedengine
import (
"context"
"sort"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/distribution/reference"
ver "github.com/hashicorp/go-version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// GetEngineVersions reports the versions of the engine that are available
func (c baseClient) GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, currentVersion, imageName string) (AvailableVersions, error) {
imageRef, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return AvailableVersions{}, err
}
tags, err := registryClient.GetTags(ctx, imageRef)
if err != nil {
return AvailableVersions{}, err
}
return parseTags(tags, currentVersion)
}
func parseTags(tags []string, currentVersion string) (AvailableVersions, error) {
var ret AvailableVersions
currentVer, err := ver.NewVersion(currentVersion)
if err != nil {
return ret, errors.Wrapf(err, "failed to parse existing version %s", currentVersion)
}
downgrades := []DockerVersion{}
patches := []DockerVersion{}
upgrades := []DockerVersion{}
currentSegments := currentVer.Segments()
for _, tag := range tags {
tmp, err := ver.NewVersion(tag)
if err != nil {
logrus.Debugf("Unable to parse %s: %s", tag, err)
continue
}
testVersion := DockerVersion{Version: *tmp, Tag: tag}
if testVersion.LessThan(currentVer) {
downgrades = append(downgrades, testVersion)
continue
}
testSegments := testVersion.Segments()
// lib always provides min 3 segments
if testSegments[0] == currentSegments[0] &&
testSegments[1] == currentSegments[1] {
patches = append(patches, testVersion)
} else {
upgrades = append(upgrades, testVersion)
}
}
sort.Slice(downgrades, func(i, j int) bool {
return downgrades[i].Version.LessThan(&downgrades[j].Version)
})
sort.Slice(patches, func(i, j int) bool {
return patches[i].Version.LessThan(&patches[j].Version)
})
sort.Slice(upgrades, func(i, j int) bool {
return upgrades[i].Version.LessThan(&upgrades[j].Version)
})
ret.Downgrades = downgrades
ret.Patches = patches
ret.Upgrades = upgrades
return ret, nil
}

View File

@ -0,0 +1,80 @@
package containerizedengine
import (
"context"
"testing"
"gotest.tools/assert"
)
func TestGetEngineVersionsBadImage(t *testing.T) {
ctx := context.Background()
client := baseClient{}
currentVersion := "currentversiongoeshere"
imageName := "this is an illegal image $%^&"
_, err := client.GetEngineVersions(ctx, nil, currentVersion, imageName)
assert.ErrorContains(t, err, "invalid reference format")
}
func TestParseTagsSimple(t *testing.T) {
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
currentVersion := "1.1.0"
res, err := parseTags(tags, currentVersion)
assert.NilError(t, err)
assert.Assert(t, err, "already present")
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1.0.0")
assert.Assert(t, len(res.Patches) == 2 && res.Patches[0].Tag == "1.1.1" && res.Patches[1].Tag == "1.1.2")
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
}
func TestParseConfirmMinSegments(t *testing.T) {
tags := []string{"1", "1.1.1", "2"}
currentVersion := "1.1"
res, err := parseTags(tags, currentVersion)
assert.NilError(t, err)
assert.Assert(t, err, "already present")
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1")
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "2")
}
func TestParseTagsFilterPrerelease(t *testing.T) {
tags := []string{"1.0.0", "1.1.1", "1.2.2", "1.1.0-beta1"}
currentVersion := "1.1.0"
res, err := parseTags(tags, currentVersion)
assert.NilError(t, err)
assert.Assert(t, err, "already present")
assert.Assert(t, len(res.Downgrades) == 2 && res.Downgrades[0].Tag == "1.0.0")
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
}
func TestParseTagsBadTag(t *testing.T) {
tags := []string{"1.0.0", "1.1.1", "1.2.2", "notasemanticversion"}
currentVersion := "1.1.0"
res, err := parseTags(tags, currentVersion)
assert.NilError(t, err)
assert.Assert(t, err, "already present")
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1.0.0")
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
}
func TestParseBadCurrent(t *testing.T) {
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
currentVersion := "notasemanticversion"
_, err := parseTags(tags, currentVersion)
assert.ErrorContains(t, err, "failed to parse existing")
}
func TestParseBadCurrent2(t *testing.T) {
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
currentVersion := ""
_, err := parseTags(tags, currentVersion)
assert.ErrorContains(t, err, "failed to parse existing")
}

View File

@ -0,0 +1,104 @@
package licenseutils
import (
"context"
"github.com/docker/licensing"
"github.com/docker/licensing/model"
)
type (
fakeLicensingClient struct {
loginViaAuthFunc func(ctx context.Context, username, password string) (authToken string, err error)
getHubUserOrgsFunc func(ctx context.Context, authToken string) (orgs []model.Org, err error)
getHubUserByNameFunc func(ctx context.Context, username string) (user *model.User, err error)
verifyLicenseFunc func(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error)
generateNewTrialSubscriptionFunc func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error)
listSubscriptionsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error)
listSubscriptionsDetailsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error)
downloadLicenseFromHubFunc func(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error)
parseLicenseFunc func(license []byte) (parsedLicense *model.IssuedLicense, err error)
storeLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error
loadLocalLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error)
}
)
func (c *fakeLicensingClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) {
if c.loginViaAuthFunc != nil {
return c.loginViaAuthFunc(ctx, username, password)
}
return "", nil
}
func (c *fakeLicensingClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) {
if c.getHubUserOrgsFunc != nil {
return c.getHubUserOrgsFunc(ctx, authToken)
}
return nil, nil
}
func (c *fakeLicensingClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) {
if c.getHubUserByNameFunc != nil {
return c.getHubUserByNameFunc(ctx, username)
}
return nil, nil
}
func (c *fakeLicensingClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
if c.verifyLicenseFunc != nil {
return c.verifyLicenseFunc(ctx, license)
}
return nil, nil
}
func (c *fakeLicensingClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
if c.generateNewTrialSubscriptionFunc != nil {
return c.generateNewTrialSubscriptionFunc(ctx, authToken, dockerID, email)
}
return "", nil
}
func (c *fakeLicensingClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
if c.listSubscriptionsFunc != nil {
return c.listSubscriptionsFunc(ctx, authToken, dockerID)
}
return nil, nil
}
func (c *fakeLicensingClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) {
if c.listSubscriptionsDetailsFunc != nil {
return c.listSubscriptionsDetailsFunc(ctx, authToken, dockerID)
}
return nil, nil
}
func (c *fakeLicensingClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) {
if c.downloadLicenseFromHubFunc != nil {
return c.downloadLicenseFromHubFunc(ctx, authToken, subscriptionID)
}
return nil, nil
}
func (c *fakeLicensingClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) {
if c.parseLicenseFunc != nil {
return c.parseLicenseFunc(license)
}
return nil, nil
}
func (c *fakeLicensingClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error {
if c.storeLicenseFunc != nil {
return c.storeLicenseFunc(ctx, dclnt, licenses, localRootDir)
}
return nil
}
func (c *fakeLicensingClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) {
if c.loadLocalLicenseFunc != nil {
return c.loadLocalLicenseFunc(ctx, dclnt)
}
return nil, nil
}

View File

@ -0,0 +1,25 @@
package licenseutils
import (
"github.com/docker/licensing/model"
)
var (
// licensingDefaultBaseURI is the default license server base URL
licensingDefaultBaseURI = "https://store.docker.com"
// licensingPublicKey is the official public license key for store.docker.com
// nolint: lll
licensingPublicKey = "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0Ka2lkOiBKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVAoKTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUF5ZEl5K2xVN283UGNlWSs0K3MrQwpRNU9FZ0N5RjhDeEljUUlXdUs4NHBJaVpjaVk2NzMweUNZbndMU0tUbHcrVTZVQy9RUmVXUmlvTU5ORTVEczVUCllFWGJHRzZvbG0ycWRXYkJ3Y0NnKzJVVUgvT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTEKSXYrbXE3bHA1blEvUnhyVDBLWlJBUVRZTEVNRWZHd20zaE1PL2dlTFBTK2hnS1B0SUhsa2c2L1djb3hUR29LUAo3OWQvd2FIWXhHTmw3V2hTbmVpQlN4YnBiUUFLazIxbGc3OThYYjd2WnlFQVRETXJSUjlNZUU2QWRqNUhKcFkzCkNveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTyt3YTJLTjhVd2dJSW0waTVJMXVXOUdrcTQKempCeTV6aGdxdVVYYkc5YldQQU9ZcnE1UWE4MUR4R2NCbEp5SFlBcCtERFBFOVRHZzR6WW1YakpueFpxSEVkdQpHcWRldlo4WE1JMHVrZmtHSUkxNHdVT2lNSUlJclhsRWNCZi80Nkk4Z1FXRHp4eWNaZS9KR1grTEF1YXlYcnlyClVGZWhWTlVkWlVsOXdYTmFKQitrYUNxejVRd2FSOTNzR3crUVNmdEQwTnZMZTdDeU9IK0U2dmc2U3QvTmVUdmcKdjhZbmhDaVhJbFo4SE9mSXdOZTd0RUYvVWN6NU9iUHlrbTN0eWxyTlVqdDBWeUFtdHRhY1ZJMmlHaWhjVVBybQprNGxWSVo3VkQvTFNXK2k3eW9TdXJ0cHNQWGNlMnBLRElvMzBsSkdoTy8zS1VtbDJTVVpDcXpKMXlFbUtweXNICjVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVQ0F3RUFBUT09Ci0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo="
)
type (
// LicenseDisplay stores license details for display
LicenseDisplay struct {
model.Subscription
Num int
Owner string
ComponentsString string
}
)

View File

@ -0,0 +1,189 @@
package licenseutils
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/licensing"
"github.com/docker/licensing/model"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// HubUser wraps a licensing client and holds key information
// for a user to avoid multiple lookups
type HubUser struct {
client licensing.Client
token string
User model.User
Orgs []model.Org
}
//GetOrgByID finds the org by the ID in the users list of orgs
func (u HubUser) GetOrgByID(orgID string) (model.Org, error) {
for _, org := range u.Orgs {
if org.ID == orgID {
return org, nil
}
}
return model.Org{}, fmt.Errorf("org %s not found", orgID)
}
// Login to the license server and return a client that can be used to look up and download license files or generate new trial licenses
func Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {
baseURI, err := url.Parse(licensingDefaultBaseURI)
if err != nil {
return HubUser{}, err
}
lclient, err := licensing.New(&licensing.Config{
BaseURI: *baseURI,
HTTPClient: &http.Client{},
PublicKey: licensingPublicKey,
})
if err != nil {
return HubUser{}, err
}
// For licensing we know they must have a valid login session
if authConfig.Username == "" {
return HubUser{}, fmt.Errorf("you must be logged in to access licenses. Please use 'docker login' then try again")
}
token, err := lclient.LoginViaAuth(ctx, authConfig.Username, authConfig.Password)
if err != nil {
return HubUser{}, err
}
user, err := lclient.GetHubUserByName(ctx, authConfig.Username)
if err != nil {
return HubUser{}, err
}
orgs, err := lclient.GetHubUserOrgs(ctx, token)
if err != nil {
return HubUser{}, err
}
return HubUser{
client: lclient,
token: token,
User: *user,
Orgs: orgs,
}, nil
}
// GetAvailableLicenses finds all available licenses for a given account and their orgs
func (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, error) {
subs, err := u.client.ListSubscriptions(ctx, u.token, u.User.ID)
if err != nil {
return nil, err
}
for _, org := range u.Orgs {
orgSub, err := u.client.ListSubscriptions(ctx, u.token, org.ID)
if err != nil {
return nil, err
}
subs = append(subs, orgSub...)
}
// Convert the SubscriptionDetails to a more user-friendly type to render in the CLI
res := []LicenseDisplay{}
// Filter out expired licenses
i := 0
for _, s := range subs {
if s.State != "expired" && s.Expires != nil {
owner := ""
if s.DockerID == u.User.ID {
owner = u.User.Username
} else {
ownerOrg, err := u.GetOrgByID(s.DockerID)
if err == nil {
owner = ownerOrg.Orgname
} else {
owner = "unknown"
logrus.Debugf("Unable to lookup org ID %s: %s", s.DockerID, err)
}
}
comps := []string{}
for _, pc := range s.PricingComponents {
comps = append(comps, fmt.Sprintf("%s:%d", pc.Name, pc.Value))
}
res = append(res, LicenseDisplay{
Subscription: *s,
Num: i,
Owner: owner,
ComponentsString: strings.Join(comps, ","),
})
i++
}
}
return res, nil
}
// GenerateTrialLicense will generate a new trial license for the specified user or org
func (u HubUser) GenerateTrialLicense(ctx context.Context, targetID string) (*model.IssuedLicense, error) {
subID, err := u.client.GenerateNewTrialSubscription(ctx, u.token, targetID, u.User.Email)
if err != nil {
return nil, err
}
return u.client.DownloadLicenseFromHub(ctx, u.token, subID)
}
// GetIssuedLicense will download a license by ID
func (u HubUser) GetIssuedLicense(ctx context.Context, ID string) (*model.IssuedLicense, error) {
return u.client.DownloadLicenseFromHub(ctx, u.token, ID)
}
// LoadLocalIssuedLicense will load a local license file
func LoadLocalIssuedLicense(ctx context.Context, filename string) (*model.IssuedLicense, error) {
baseURI, err := url.Parse(licensingDefaultBaseURI)
if err != nil {
return nil, err
}
lclient, err := licensing.New(&licensing.Config{
BaseURI: *baseURI,
HTTPClient: &http.Client{},
PublicKey: licensingPublicKey,
})
if err != nil {
return nil, err
}
return doLoadLocalIssuedLicense(ctx, filename, lclient)
}
func doLoadLocalIssuedLicense(ctx context.Context, filename string, lclient licensing.Client) (*model.IssuedLicense, error) {
var license model.IssuedLicense
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
err = json.Unmarshal(data, &license)
if err != nil {
return nil, errors.Wrap(err, "malformed license file")
}
_, err = lclient.VerifyLicense(ctx, license)
if err != nil {
return nil, err
}
return &license, nil
}
// ApplyLicense will store a license on the local system
func ApplyLicense(ctx context.Context, dclient licensing.WrappedDockerClient, license *model.IssuedLicense) error {
info, err := dclient.Info(ctx)
if err != nil {
return err
}
return licensing.StoreLicense(ctx, dclient, license, info.DockerRootDir)
}

View File

@ -0,0 +1,234 @@
package licenseutils
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/licensing/model"
"gotest.tools/assert"
)
func TestLoginNoAuth(t *testing.T) {
ctx := context.Background()
_, err := Login(ctx, &types.AuthConfig{})
assert.ErrorContains(t, err, "must be logged in")
}
func TestGetOrgByID(t *testing.T) {
orgs := []model.Org{
{ID: "id1"},
{ID: "id2"},
}
u := HubUser{
Orgs: orgs,
}
o, err := u.GetOrgByID("id1")
assert.NilError(t, err)
assert.Assert(t, o.ID == "id1")
o, err = u.GetOrgByID("id2")
assert.NilError(t, err)
assert.Assert(t, o.ID == "id2")
o, err = u.GetOrgByID("id3")
assert.ErrorContains(t, err, "not found")
}
func TestGetAvailableLicensesListFail(t *testing.T) {
ctx := context.Background()
user := HubUser{
client: &fakeLicensingClient{
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
return nil, fmt.Errorf("list subscriptions error")
},
},
}
_, err := user.GetAvailableLicenses(ctx)
assert.ErrorContains(t, err, "list subscriptions error")
}
func TestGetAvailableLicensesOrgFail(t *testing.T) {
ctx := context.Background()
user := HubUser{
Orgs: []model.Org{
{ID: "orgid"},
},
client: &fakeLicensingClient{
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
if dockerID == "orgid" {
return nil, fmt.Errorf("list subscriptions org error")
}
return nil, nil
},
},
}
_, err := user.GetAvailableLicenses(ctx)
assert.ErrorContains(t, err, "list subscriptions org error")
}
func TestGetAvailableLicensesHappy(t *testing.T) {
ctx := context.Background()
expiration := time.Now().Add(3600 * time.Second)
user := HubUser{
User: model.User{
ID: "userid",
Username: "username",
},
Orgs: []model.Org{
{
ID: "orgid",
Orgname: "orgname",
},
},
client: &fakeLicensingClient{
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
if dockerID == "orgid" {
return []*model.Subscription{
{
State: "expired",
Expires: &expiration,
},
{
State: "active",
DockerID: "orgid",
Expires: &expiration,
},
{
State: "active",
DockerID: "invalidid",
Expires: &expiration,
},
}, nil
} else if dockerID == "userid" {
return []*model.Subscription{
{
State: "expired",
},
{
State: "active",
DockerID: "userid",
Expires: &expiration,
PricingComponents: model.PricingComponents{
{
Name: "comp1",
Value: 1,
},
{
Name: "comp2",
Value: 2,
},
},
},
}, nil
}
return nil, nil
},
},
}
subs, err := user.GetAvailableLicenses(ctx)
assert.NilError(t, err)
assert.Assert(t, len(subs) == 3)
assert.Assert(t, subs[0].Owner == "username")
assert.Assert(t, subs[0].State == "active")
assert.Assert(t, subs[0].ComponentsString == "comp1:1,comp2:2")
assert.Assert(t, subs[1].Owner == "orgname")
assert.Assert(t, subs[1].State == "active")
assert.Assert(t, subs[2].Owner == "unknown")
assert.Assert(t, subs[2].State == "active")
}
func TestGenerateTrialFail(t *testing.T) {
ctx := context.Background()
user := HubUser{
client: &fakeLicensingClient{
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
return "", fmt.Errorf("generate trial failure")
},
},
}
targetID := "targetidgoeshere"
_, err := user.GenerateTrialLicense(ctx, targetID)
assert.ErrorContains(t, err, "generate trial failure")
}
func TestGenerateTrialHappy(t *testing.T) {
ctx := context.Background()
user := HubUser{
client: &fakeLicensingClient{
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
return "subid", nil
},
},
}
targetID := "targetidgoeshere"
_, err := user.GenerateTrialLicense(ctx, targetID)
assert.NilError(t, err)
}
func TestGetIssuedLicense(t *testing.T) {
ctx := context.Background()
user := HubUser{
client: &fakeLicensingClient{},
}
id := "idgoeshere"
_, err := user.GetIssuedLicense(ctx, id)
assert.NilError(t, err)
}
func TestLoadLocalIssuedLicenseNotExist(t *testing.T) {
ctx := context.Background()
tmpdir, err := ioutil.TempDir("", "licensing-test")
assert.NilError(t, err)
defer os.RemoveAll(tmpdir)
filename := filepath.Join(tmpdir, "subscription.lic")
_, err = LoadLocalIssuedLicense(ctx, filename)
assert.ErrorContains(t, err, "no such file")
}
func TestLoadLocalIssuedLicenseNotJson(t *testing.T) {
ctx := context.Background()
tmpdir, err := ioutil.TempDir("", "licensing-test")
assert.NilError(t, err)
defer os.RemoveAll(tmpdir)
filename := filepath.Join(tmpdir, "subscription.lic")
err = ioutil.WriteFile(filename, []byte("not json"), 0644)
assert.NilError(t, err)
_, err = LoadLocalIssuedLicense(ctx, filename)
assert.ErrorContains(t, err, "malformed license file")
}
func TestLoadLocalIssuedLicenseNoVerify(t *testing.T) {
lclient := &fakeLicensingClient{
verifyLicenseFunc: func(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
return nil, fmt.Errorf("verification failed")
},
}
ctx := context.Background()
tmpdir, err := ioutil.TempDir("", "licensing-test")
assert.NilError(t, err)
defer os.RemoveAll(tmpdir)
filename := filepath.Join(tmpdir, "subscription.lic")
err = ioutil.WriteFile(filename, []byte("{}"), 0644)
assert.NilError(t, err)
_, err = doLoadLocalIssuedLicense(ctx, filename, lclient)
assert.ErrorContains(t, err, "verification failed")
}
func TestLoadLocalIssuedLicenseHappy(t *testing.T) {
lclient := &fakeLicensingClient{}
ctx := context.Background()
tmpdir, err := ioutil.TempDir("", "licensing-test")
assert.NilError(t, err)
defer os.RemoveAll(tmpdir)
filename := filepath.Join(tmpdir, "subscription.lic")
err = ioutil.WriteFile(filename, []byte("{}"), 0644)
assert.NilError(t, err)
_, err = doLoadLocalIssuedLicense(ctx, filename, lclient)
assert.NilError(t, err)
}

View File

@ -0,0 +1,61 @@
package containerized
import (
"context"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/oci"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// WithAllCapabilities enables all capabilities required to run privileged containers
func WithAllCapabilities(_ context.Context, _ oci.Client, c *containers.Container, s *specs.Spec) error {
caps := []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_FOWNER",
"CAP_FSETID",
"CAP_KILL",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_LINUX_IMMUTABLE",
"CAP_NET_BIND_SERVICE",
"CAP_NET_BROADCAST",
"CAP_NET_ADMIN",
"CAP_NET_RAW",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_SYS_MODULE",
"CAP_SYS_RAWIO",
"CAP_SYS_CHROOT",
"CAP_SYS_PTRACE",
"CAP_SYS_PACCT",
"CAP_SYS_ADMIN",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_RESOURCE",
"CAP_SYS_TIME",
"CAP_SYS_TTY_CONFIG",
"CAP_MKNOD",
"CAP_LEASE",
"CAP_AUDIT_WRITE",
"CAP_AUDIT_CONTROL",
"CAP_SETFCAP",
"CAP_MAC_OVERRIDE",
"CAP_MAC_ADMIN",
"CAP_SYSLOG",
"CAP_WAKE_ALARM",
"CAP_BLOCK_SUSPEND",
"CAP_AUDIT_READ",
}
if s.Process.Capabilities == nil {
s.Process.Capabilities = &specs.LinuxCapabilities{}
}
s.Process.Capabilities.Bounding = caps
s.Process.Capabilities.Effective = caps
s.Process.Capabilities.Inheritable = caps
s.Process.Capabilities.Permitted = caps
return nil
}

View File

@ -0,0 +1,21 @@
package containerized
import (
"context"
"testing"
"github.com/containerd/containerd/containers"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gotest.tools/assert"
)
func TestWithAllCapabilities(t *testing.T) {
c := &containers.Container{}
s := &specs.Spec{
Process: &specs.Process{},
}
ctx := context.Background()
err := WithAllCapabilities(ctx, nil, c, s)
assert.NilError(t, err)
assert.Assert(t, len(s.Process.Capabilities.Bounding) > 0)
}

View File

@ -0,0 +1,74 @@
package containerized
import (
"context"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// AtomicImageUpdate will perform an update of the given container with the new image
// and verify success via the provided healthcheckFn. If the healthcheck fails, the
// container will be reverted to the prior image
func AtomicImageUpdate(ctx context.Context, container containerd.Container, image containerd.Image, healthcheckFn func() error) error {
updateCompleted := false
err := pauseAndRun(ctx, container, func() error {
if err := container.Update(ctx, WithUpgrade(image)); err != nil {
return errors.Wrap(err, "failed to update to new image")
}
updateCompleted = true
task, err := container.Task(ctx, nil)
if err != nil {
if errdefs.IsNotFound(err) {
return nil
}
return errors.Wrap(err, "failed to lookup task")
}
return task.Kill(ctx, sigTERM)
})
if err != nil {
if updateCompleted {
logrus.WithError(err).Error("failed to update, rolling back")
return rollBack(ctx, container)
}
return err
}
if err := healthcheckFn(); err != nil {
logrus.WithError(err).Error("failed health check, rolling back")
return rollBack(ctx, container)
}
return nil
}
func rollBack(ctx context.Context, container containerd.Container) error {
return pauseAndRun(ctx, container, func() error {
if err := container.Update(ctx, WithRollback); err != nil {
return err
}
task, err := container.Task(ctx, nil)
if err != nil {
if errdefs.IsNotFound(err) {
return nil
}
return errors.Wrap(err, "failed to lookup task")
}
return task.Kill(ctx, sigTERM)
})
}
func pauseAndRun(ctx context.Context, container containerd.Container, fn func() error) error {
task, err := container.Task(ctx, nil)
if err != nil {
if errdefs.IsNotFound(err) {
return fn()
}
return errors.Wrap(err, "failed to lookup task")
}
if err := task.Pause(ctx); err != nil {
return errors.Wrap(err, "failed to pause task")
}
defer task.Resume(ctx)
return fn()
}

View File

@ -0,0 +1,12 @@
// +build !windows
package containerized
import (
"golang.org/x/sys/unix"
)
var (
// sigTERM maps to unix.SIGTERM
sigTERM = unix.SIGTERM
)

View File

@ -0,0 +1,12 @@
// +build windows
package containerized
import (
"syscall"
)
var (
// sigTERM all signals are ignored by containerd kill windows
sigTERM = syscall.Signal(0)
)

View File

@ -0,0 +1,158 @@
package containerized
import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/diff/apply"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/rootfs"
"github.com/containerd/containerd/snapshots"
"github.com/opencontainers/image-spec/identity"
)
const (
gcRoot = "containerd.io/gc.root"
timestampFormat = "01-02-2006-15:04:05"
previousRevision = "docker.com/revision.previous"
imageLabel = "docker.com/revision.image"
)
// ErrNoPreviousRevision returned if the container has to previous revision
var ErrNoPreviousRevision = errors.New("no previous revision")
// WithNewSnapshot creates a new snapshot managed by containerized
func WithNewSnapshot(i containerd.Image) containerd.NewContainerOpts {
return func(ctx context.Context, client *containerd.Client, c *containers.Container) error {
if c.Snapshotter == "" {
c.Snapshotter = containerd.DefaultSnapshotter
}
r, err := create(ctx, client, i, c.ID, "")
if err != nil {
return err
}
c.SnapshotKey = r.Key
c.Image = i.Name()
return nil
}
}
// WithUpgrade upgrades an existing container's image to a new one
func WithUpgrade(i containerd.Image) containerd.UpdateContainerOpts {
return func(ctx context.Context, client *containerd.Client, c *containers.Container) error {
revision, err := save(ctx, client, i, c)
if err != nil {
return err
}
c.Image = i.Name()
c.SnapshotKey = revision.Key
return nil
}
}
// WithRollback rolls back to the previous container's revision
func WithRollback(ctx context.Context, client *containerd.Client, c *containers.Container) error {
prev, err := previous(ctx, client, c)
if err != nil {
return err
}
ss := client.SnapshotService(c.Snapshotter)
sInfo, err := ss.Stat(ctx, prev.Key)
if err != nil {
return err
}
snapshotImage, ok := sInfo.Labels[imageLabel]
if !ok {
return fmt.Errorf("snapshot %s does not have a service image label", prev.Key)
}
if snapshotImage == "" {
return fmt.Errorf("snapshot %s has an empty service image label", prev.Key)
}
c.Image = snapshotImage
c.SnapshotKey = prev.Key
return nil
}
func newRevision(id string) *revision {
now := time.Now()
return &revision{
Timestamp: now,
Key: fmt.Sprintf("boss.io.%s.%s", id, now.Format(timestampFormat)),
}
}
type revision struct {
Timestamp time.Time
Key string
mounts []mount.Mount
}
// nolint: interfacer
func create(ctx context.Context, client *containerd.Client, i containerd.Image, id string, previous string) (*revision, error) {
diffIDs, err := i.RootFS(ctx)
if err != nil {
return nil, err
}
var (
parent = identity.ChainID(diffIDs).String()
r = newRevision(id)
)
labels := map[string]string{
gcRoot: r.Timestamp.Format(time.RFC3339),
imageLabel: i.Name(),
}
if previous != "" {
labels[previousRevision] = previous
}
mounts, err := client.SnapshotService(containerd.DefaultSnapshotter).Prepare(ctx, r.Key, parent, snapshots.WithLabels(labels))
if err != nil {
return nil, err
}
r.mounts = mounts
return r, nil
}
func save(ctx context.Context, client *containerd.Client, updatedImage containerd.Image, c *containers.Container) (*revision, error) {
snapshot, err := create(ctx, client, updatedImage, c.ID, c.SnapshotKey)
if err != nil {
return nil, err
}
service := client.SnapshotService(c.Snapshotter)
// create a diff from the existing snapshot
diff, err := rootfs.CreateDiff(ctx, c.SnapshotKey, service, client.DiffService())
if err != nil {
return nil, err
}
applier := apply.NewFileSystemApplier(client.ContentStore())
if _, err := applier.Apply(ctx, diff, snapshot.mounts); err != nil {
return nil, err
}
return snapshot, nil
}
// nolint: interfacer
func previous(ctx context.Context, client *containerd.Client, c *containers.Container) (*revision, error) {
service := client.SnapshotService(c.Snapshotter)
sInfo, err := service.Stat(ctx, c.SnapshotKey)
if err != nil {
return nil, err
}
key := sInfo.Labels[previousRevision]
if key == "" {
return nil, ErrNoPreviousRevision
}
parts := strings.Split(key, ".")
timestamp, err := time.Parse(timestampFormat, parts[len(parts)-1])
if err != nil {
return nil, err
}
return &revision{
Timestamp: timestamp,
Key: key,
}, nil
}

View File

@ -12,6 +12,7 @@ import (
manifeststore "github.com/docker/cli/cli/manifest/store"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/cli/trust"
"github.com/docker/cli/internal/containerizedengine"
"github.com/docker/docker/client"
notaryclient "github.com/theupdateframework/notary/client"
)
@ -19,22 +20,24 @@ import (
// NotaryClientFuncType defines a function that returns a fake notary client
type NotaryClientFuncType func(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
type clientInfoFuncType func() command.ClientInfo
type containerizedEngineFuncType func(string) (containerizedengine.Client, error)
// FakeCli emulates the default DockerCli
type FakeCli struct {
command.DockerCli
client client.APIClient
configfile *configfile.ConfigFile
out *command.OutStream
outBuffer *bytes.Buffer
err *bytes.Buffer
in *command.InStream
server command.ServerInfo
clientInfoFunc clientInfoFuncType
notaryClientFunc NotaryClientFuncType
manifestStore manifeststore.Store
registryClient registryclient.RegistryClient
contentTrust bool
client client.APIClient
configfile *configfile.ConfigFile
out *command.OutStream
outBuffer *bytes.Buffer
err *bytes.Buffer
in *command.InStream
server command.ServerInfo
clientInfoFunc clientInfoFuncType
notaryClientFunc NotaryClientFuncType
manifestStore manifeststore.Store
registryClient registryclient.RegistryClient
contentTrust bool
containerizedEngineClientFunc containerizedEngineFuncType
}
// NewFakeCli returns a fake for the command.Cli interface
@ -167,3 +170,16 @@ func (c *FakeCli) ContentTrustEnabled() bool {
func EnableContentTrust(c *FakeCli) {
c.contentTrust = true
}
// NewContainerizedEngineClient returns a containerized engine client
func (c *FakeCli) NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error) {
if c.containerizedEngineClientFunc != nil {
return c.containerizedEngineClientFunc(sockPath)
}
return nil, fmt.Errorf("no containerized engine client available unless defined")
}
// SetContainerizedEngineClient on the fake cli
func (c *FakeCli) SetContainerizedEngineClient(containerizedEngineClientFunc containerizedEngineFuncType) {
c.containerizedEngineClientFunc = containerizedEngineClientFunc
}

5
scripts/test/engine/entry Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/env bash
set -eu -o pipefail
# TODO fetch images?
./scripts/test/engine/wrapper

107
scripts/test/engine/run Executable file
View File

@ -0,0 +1,107 @@
#!/usr/bin/env bash
# Run engine specific integration tests against the latest containerd-in-docker
set -eu -o pipefail
function container_ip {
local cid=$1
local network=$2
docker inspect \
-f "{{.NetworkSettings.Networks.${network}.IPAddress}}" "$cid"
}
function fetch_images {
## TODO - not yet implemented
./scripts/test/engine/load-image fetch-only
}
function setup {
### start containerd and log to a file
echo "Starting containerd in the background"
containerd 2&> /tmp/containerd.err &
echo "Waiting for containerd to be responsive"
# shellcheck disable=SC2034
for i in $(seq 1 60); do
if ctr namespace ls > /dev/null; then
break
fi
sleep 1
done
ctr namespace ls > /dev/null
echo "containerd is ready"
# TODO Once https://github.com/moby/moby/pull/33355 or equivalent
# is merged, then this can be optimized to preload the image
# saved during the build phase
}
function cleanup {
#### if testexit is non-zero dump the containerd logs with a banner
if [ "${testexit}" -ne 0 ] ; then
echo "FAIL: dumping containerd logs"
echo ""
cat /tmp/containerd.err
if [ -f /var/log/engine.log ] ; then
echo ""
echo "FAIL: dumping engine log"
echo ""
else
echo ""
echo "FAIL: engine log missing"
echo ""
fi
echo "FAIL: remaining namespaces"
ctr namespace ls || /bin/tru
echo "FAIL: remaining containers"
ctr --namespace docker container ls || /bin/tru
echo "FAIL: remaining tasks"
ctr --namespace docker task ls || /bin/tru
echo "FAIL: remaining snapshots"
ctr --namespace docker snapshots ls || /bin/tru
echo "FAIL: remaining images"
ctr --namespace docker image ls || /bin/tru
fi
}
function runtests {
# shellcheck disable=SC2086
env -i \
GOPATH="$GOPATH" \
PATH="$PWD/build/:${PATH}" \
VERSION=${VERSION} \
"$(which go)" test -p 1 -parallel 1 -v ./e2eengine/... ${TESTFLAGS-}
}
cmd=${1-}
case "$cmd" in
setup)
setup
exit
;;
cleanup)
cleanup
exit
;;
fetch-images)
fetch_images
exit
;;
test)
runtests
;;
run|"")
testexit=0
runtests || testexit=$?
cleanup
exit $testexit
;;
shell)
$SHELL
;;
*)
echo "Unknown command: $cmd"
echo "Usage: "
echo " $0 [setup | cleanup | test | run]"
exit 1
;;
esac

18
scripts/test/engine/wrapper Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
# Setup, run and teardown engine test suite in containers.
set -eu -o pipefail
./scripts/test/engine/run setup
testexit=0
test_cmd="test"
if [[ -n "${TEST_DEBUG-}" ]]; then
test_cmd="shell"
fi
./scripts/test/engine/run "$test_cmd" || testexit="$?"
export testexit
./scripts/test/engine/run cleanup
exit "$testexit"

View File

@ -5,6 +5,7 @@ set -eu -o pipefail
# reduces the runtime from 200s down to 23s
go test -i "$@"
echo "mode: atomic" > coverage.txt
for pkg in "$@"; do
./scripts/test/unit \
-cover \
@ -13,7 +14,7 @@ for pkg in "$@"; do
"${pkg}"
if test -f profile.out; then
cat profile.out >> coverage.txt
grep -v "^mode:" < profile.out >> coverage.txt || true
rm profile.out
fi
done

View File

@ -1,12 +1,16 @@
github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403
github.com/asaskevich/govalidator f9ffefc3facfbe0caee3fea233cbb6e8208f4541
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/beorn7/perks 3a771d992973f24aa725d07868b467d1ddfceafb
github.com/containerd/console 5d1b48d6114b8c9666f0c8b916f871af97b0a761
github.com/containerd/containerd a88b6319614de846458750ff882723479ca7b1a1
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
github.com/containerd/containerd 0ffb948
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
github.com/containerd/fifo 3d5202a
github.com/containerd/typeurl f694355
github.com/coreos/etcd v3.3.9
github.com/cpuguy83/go-md2man v1.0.8
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 # v1.1.0
github.com/dgrijalva/jwt-go a2c85815a77d0f951e33ba4db5ae93629a1530af
github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5
github.com/docker/docker a7ff19d69a90dfe152abd146221c8b9b46a0903d
github.com/docker/docker-credential-helpers 5241b46610f2491efdf9d1c85f1ddf5b02f6d962
@ -17,9 +21,12 @@ github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
github.com/docker/licensing 369e530
github.com/docker/swarmkit edd5641391926a50bc5f7040e20b7efc05003c26
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
github.com/ghodss/yaml 0ca9ea5df5451ffdf184b4428c902747c2c11cd7 # v1.0.0
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
github.com/gogo/protobuf v1.1.1
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
github.com/golang/protobuf v1.1.0
@ -34,11 +41,13 @@ github.com/gregjones/httpcache 9cad4c3443a7200dd6400aef47183728de563a38
github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/hashicorp/golang-lru 0fb14efe8c47ae851c0034ed7a448854d3d34cf3
github.com/hashicorp/go-version 23480c0
github.com/imdario/mergo v0.3.6
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 # v1.0
github.com/json-iterator/go ab8a2e0c74be9d3be70b3184d9acc634935ded82 # 1.1.4
github.com/mattn/go-shellwords v1.0.3
github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/Microsoft/hcsshim v0.6.11
github.com/Microsoft/go-winio v0.4.9
github.com/miekg/pkcs11 287d9350987cc9334667882061e202e96cdfb4d0
github.com/mitchellh/mapstructure f15292f7a699fcc1a38a80977f80a046874ba8ac
@ -50,6 +59,7 @@ github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1
github.com/opencontainers/runtime-spec v1.0.1
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
github.com/peterbourgon/diskv 5f041e8faa004a95c88a202771f4cc3e991971e6 # v2.0.1
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
@ -58,10 +68,12 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
github.com/russross/blackfriday 1d6b8e9301e720b08a8938b8c25c018285885438
github.com/satori/go.uuid d41af8bb6a7704f00bc3b7cba9355ae6a5a80048
github.com/shurcooL/sanitized_anchor_name 10ef21a441db47d8b13ebcc5fd2310f636973c77
github.com/sirupsen/logrus v1.0.6
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.1
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
github.com/theupdateframework/notary v0.6.1
github.com/tonistiigi/fsutil b19464cd1b6a00773b4f2eb7acf9c30426f9df42
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
@ -85,4 +97,3 @@ k8s.io/client-go kubernetes-1.11.0
k8s.io/kube-openapi d8ea2fe547a448256204cfc68dfee7b26c720acb
k8s.io/kubernetes v1.11.0
vbom.ml/util 256737ac55c46798123f754ab7d2c784e2c71783

21
vendor/github.com/Microsoft/hcsshim/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

33
vendor/github.com/Microsoft/hcsshim/README.md generated vendored Normal file
View File

@ -0,0 +1,33 @@
# hcsshim
This package supports launching Windows Server containers from Go. It is
primarily used in the [Docker Engine](https://github.com/docker/docker) project,
but it can be freely used by other projects as well.
## Contributing
---------------
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## Reporting Security Issues
Security issues and bugs should be reported privately, via email, to the Microsoft Security
Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should
receive a response within 24 hours. If for some reason you do not, please follow up via
email to ensure we received your original message. Further information, including the
[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in
the [Security TechCenter](https://technet.microsoft.com/en-us/security/default).
-------------------------------------------
Copyright (c) 2018 Microsoft Corp. All rights reserved.

28
vendor/github.com/Microsoft/hcsshim/activatelayer.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package hcsshim
import "github.com/sirupsen/logrus"
// ActivateLayer will find the layer with the given id and mount it's filesystem.
// For a read/write layer, the mounted filesystem will appear as a volume on the
// host, while a read-only layer is generally expected to be a no-op.
// An activated layer must later be deactivated via DeactivateLayer.
func ActivateLayer(info DriverInfo, id string) error {
title := "hcsshim::ActivateLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = activateLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour)
return nil
}

171
vendor/github.com/Microsoft/hcsshim/baselayer.go generated vendored Normal file
View File

@ -0,0 +1,171 @@
package hcsshim
import (
"errors"
"os"
"path/filepath"
"syscall"
"github.com/Microsoft/go-winio"
)
type baseLayerWriter struct {
root *os.File
f *os.File
bw *winio.BackupFileWriter
err error
hasUtilityVM bool
dirInfo []dirInfo
}
type dirInfo struct {
path string
fileInfo winio.FileBasicInfo
}
// reapplyDirectoryTimes reapplies directory modification, creation, etc. times
// after processing of the directory tree has completed. The times are expected
// to be ordered such that parent directories come before child directories.
func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error {
for i := range dis {
di := &dis[len(dis)-i-1] // reverse order: process child directories first
f, err := openRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_OPEN, _FILE_DIRECTORY_FILE)
if err != nil {
return err
}
err = winio.SetFileBasicInfo(f, &di.fileInfo)
f.Close()
if err != nil {
return err
}
}
return nil
}
func (w *baseLayerWriter) closeCurrentFile() error {
if w.f != nil {
err := w.bw.Close()
err2 := w.f.Close()
w.f = nil
w.bw = nil
if err != nil {
return err
}
if err2 != nil {
return err2
}
}
return nil
}
func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) {
defer func() {
if err != nil {
w.err = err
}
}()
err = w.closeCurrentFile()
if err != nil {
return err
}
if filepath.ToSlash(name) == `UtilityVM/Files` {
w.hasUtilityVM = true
}
var f *os.File
defer func() {
if f != nil {
f.Close()
}
}()
extraFlags := uint32(0)
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
extraFlags |= _FILE_DIRECTORY_FILE
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo})
}
}
mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
f, err = openRelative(name, w.root, mode, syscall.FILE_SHARE_READ, _FILE_CREATE, extraFlags)
if err != nil {
return makeError(err, "Failed to openRelative", name)
}
err = winio.SetFileBasicInfo(f, fileInfo)
if err != nil {
return makeError(err, "Failed to SetFileBasicInfo", name)
}
w.f = f
w.bw = winio.NewBackupFileWriter(f, true)
f = nil
return nil
}
func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
defer func() {
if err != nil {
w.err = err
}
}()
err = w.closeCurrentFile()
if err != nil {
return err
}
return linkRelative(target, w.root, name, w.root)
}
func (w *baseLayerWriter) Remove(name string) error {
return errors.New("base layer cannot have tombstones")
}
func (w *baseLayerWriter) Write(b []byte) (int, error) {
n, err := w.bw.Write(b)
if err != nil {
w.err = err
}
return n, err
}
func (w *baseLayerWriter) Close() error {
defer func() {
w.root.Close()
w.root = nil
}()
err := w.closeCurrentFile()
if err != nil {
return err
}
if w.err == nil {
// Restore the file times of all the directories, since they may have
// been modified by creating child directories.
err = reapplyDirectoryTimes(w.root, w.dirInfo)
if err != nil {
return err
}
err = ProcessBaseLayer(w.root.Name())
if err != nil {
return err
}
if w.hasUtilityVM {
err := ensureNotReparsePointRelative("UtilityVM", w.root)
if err != nil {
return err
}
err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM"))
if err != nil {
return err
}
}
}
return w.err
}

79
vendor/github.com/Microsoft/hcsshim/callback.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
package hcsshim
import (
"sync"
"syscall"
)
var (
nextCallback uintptr
callbackMap = map[uintptr]*notifcationWatcherContext{}
callbackMapLock = sync.RWMutex{}
notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
// Notifications for HCS_SYSTEM handles
hcsNotificationSystemExited hcsNotification = 0x00000001
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
// Notifications for HCS_PROCESS handles
hcsNotificationProcessExited hcsNotification = 0x00010000
// Common notifications
hcsNotificationInvalid hcsNotification = 0x00000000
hcsNotificationServiceDisconnect hcsNotification = 0x01000000
)
type hcsNotification uint32
type notificationChannel chan error
type notifcationWatcherContext struct {
channels notificationChannels
handle hcsCallback
}
type notificationChannels map[hcsNotification]notificationChannel
func newChannels() notificationChannels {
channels := make(notificationChannels)
channels[hcsNotificationSystemExited] = make(notificationChannel, 1)
channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
return channels
}
func closeChannels(channels notificationChannels) {
close(channels[hcsNotificationSystemExited])
close(channels[hcsNotificationSystemCreateCompleted])
close(channels[hcsNotificationSystemStartCompleted])
close(channels[hcsNotificationSystemPauseCompleted])
close(channels[hcsNotificationSystemResumeCompleted])
close(channels[hcsNotificationProcessExited])
close(channels[hcsNotificationServiceDisconnect])
}
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
var result error
if int32(notificationStatus) < 0 {
result = syscall.Errno(win32FromHresult(notificationStatus))
}
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return 0
}
context.channels[notificationType] <- result
return 0
}

7
vendor/github.com/Microsoft/hcsshim/cgo.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
package hcsshim
import "C"
// This import is needed to make the library compile as CGO because HCSSHIM
// only works with CGO due to callbacks from HCS comming back from a C thread
// which is not supported without CGO. See https://github.com/golang/go/issues/10973

800
vendor/github.com/Microsoft/hcsshim/container.go generated vendored Normal file
View File

@ -0,0 +1,800 @@
package hcsshim
import (
"encoding/json"
"fmt"
"os"
"sync"
"syscall"
"time"
"github.com/sirupsen/logrus"
)
var (
defaultTimeout = time.Minute * 4
)
const (
pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}`
statisticsQuery = `{ "PropertyTypes" : ["Statistics"]}`
processListQuery = `{ "PropertyTypes" : ["ProcessList"]}`
mappedVirtualDiskQuery = `{ "PropertyTypes" : ["MappedVirtualDisk"]}`
)
type container struct {
handleLock sync.RWMutex
handle hcsSystem
id string
callbackNumber uintptr
}
// ContainerProperties holds the properties for a container and the processes running in that container
type ContainerProperties struct {
ID string `json:"Id"`
Name string
SystemType string
Owner string
SiloGUID string `json:"SiloGuid,omitempty"`
RuntimeID string `json:"RuntimeId,omitempty"`
IsRuntimeTemplate bool `json:",omitempty"`
RuntimeImagePath string `json:",omitempty"`
Stopped bool `json:",omitempty"`
ExitType string `json:",omitempty"`
AreUpdatesPending bool `json:",omitempty"`
ObRoot string `json:",omitempty"`
Statistics Statistics `json:",omitempty"`
ProcessList []ProcessListItem `json:",omitempty"`
MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"`
}
// MemoryStats holds the memory statistics for a container
type MemoryStats struct {
UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
}
// ProcessorStats holds the processor statistics for a container
type ProcessorStats struct {
TotalRuntime100ns uint64 `json:",omitempty"`
RuntimeUser100ns uint64 `json:",omitempty"`
RuntimeKernel100ns uint64 `json:",omitempty"`
}
// StorageStats holds the storage statistics for a container
type StorageStats struct {
ReadCountNormalized uint64 `json:",omitempty"`
ReadSizeBytes uint64 `json:",omitempty"`
WriteCountNormalized uint64 `json:",omitempty"`
WriteSizeBytes uint64 `json:",omitempty"`
}
// NetworkStats holds the network statistics for a container
type NetworkStats struct {
BytesReceived uint64 `json:",omitempty"`
BytesSent uint64 `json:",omitempty"`
PacketsReceived uint64 `json:",omitempty"`
PacketsSent uint64 `json:",omitempty"`
DroppedPacketsIncoming uint64 `json:",omitempty"`
DroppedPacketsOutgoing uint64 `json:",omitempty"`
EndpointId string `json:",omitempty"`
InstanceId string `json:",omitempty"`
}
// Statistics is the structure returned by a statistics call on a container
type Statistics struct {
Timestamp time.Time `json:",omitempty"`
ContainerStartTime time.Time `json:",omitempty"`
Uptime100ns uint64 `json:",omitempty"`
Memory MemoryStats `json:",omitempty"`
Processor ProcessorStats `json:",omitempty"`
Storage StorageStats `json:",omitempty"`
Network []NetworkStats `json:",omitempty"`
}
// ProcessList is the structure of an item returned by a ProcessList call on a container
type ProcessListItem struct {
CreateTimestamp time.Time `json:",omitempty"`
ImageName string `json:",omitempty"`
KernelTime100ns uint64 `json:",omitempty"`
MemoryCommitBytes uint64 `json:",omitempty"`
MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"`
MemoryWorkingSetSharedBytes uint64 `json:",omitempty"`
ProcessId uint32 `json:",omitempty"`
UserTime100ns uint64 `json:",omitempty"`
}
// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container
type MappedVirtualDiskController struct {
MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"`
}
// Type of Request Support in ModifySystem
type RequestType string
// Type of Resource Support in ModifySystem
type ResourceType string
// RequestType const
const (
Add RequestType = "Add"
Remove RequestType = "Remove"
Network ResourceType = "Network"
)
// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system
// Supported resource types are Network and Request Types are Add/Remove
type ResourceModificationRequestResponse struct {
Resource ResourceType `json:"ResourceType"`
Data interface{} `json:"Settings"`
Request RequestType `json:"RequestType,omitempty"`
}
// createContainerAdditionalJSON is read from the environment at initialisation
// time. It allows an environment variable to define additional JSON which
// is merged in the CreateContainer call to HCS.
var createContainerAdditionalJSON string
func init() {
createContainerAdditionalJSON = os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")
}
// CreateContainer creates a new container with the given configuration but does not start it.
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
return createContainerWithJSON(id, c, "")
}
// CreateContainerWithJSON creates a new container with the given configuration but does not start it.
// It is identical to CreateContainer except that optional additional JSON can be merged before passing to HCS.
func CreateContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
return createContainerWithJSON(id, c, additionalJSON)
}
func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
operation := "CreateContainer"
title := "HCSShim::" + operation
container := &container{
id: id,
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, err
}
configuration := string(configurationb)
logrus.Debugf(title+" id=%s config=%s", id, configuration)
// Merge any additional JSON. Priority is given to what is passed in explicitly,
// falling back to what's set in the environment.
if additionalJSON == "" && createContainerAdditionalJSON != "" {
additionalJSON = createContainerAdditionalJSON
}
if additionalJSON != "" {
configurationMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(configuration), &configurationMap); err != nil {
return nil, fmt.Errorf("failed to unmarshal %s: %s", configuration, err)
}
additionalMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(additionalJSON), &additionalMap); err != nil {
return nil, fmt.Errorf("failed to unmarshal %s: %s", additionalJSON, err)
}
mergedMap := mergeMaps(additionalMap, configurationMap)
mergedJSON, err := json.Marshal(mergedMap)
if err != nil {
return nil, fmt.Errorf("failed to marshal merged configuration map %+v: %s", mergedMap, err)
}
configuration = string(mergedJSON)
logrus.Debugf(title+" id=%s merged config=%s", id, configuration)
}
var (
resultp *uint16
identity syscall.Handle
)
createError := hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp)
if createError == nil || IsPending(createError) {
if err := container.registerCallback(); err != nil {
// Terminate the container if it still exists. We're okay to ignore a failure here.
container.Terminate()
return nil, makeContainerError(container, operation, "", err)
}
}
err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
if err != nil {
if err == ErrTimeout {
// Terminate the container if it still exists. We're okay to ignore a failure here.
container.Terminate()
}
return nil, makeContainerError(container, operation, configuration, err)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle)
return container, nil
}
// mergeMaps recursively merges map `fromMap` into map `ToMap`. Any pre-existing values
// in ToMap are overwritten. Values in fromMap are added to ToMap.
// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang
func mergeMaps(fromMap, ToMap interface{}) interface{} {
switch fromMap := fromMap.(type) {
case map[string]interface{}:
ToMap, ok := ToMap.(map[string]interface{})
if !ok {
return fromMap
}
for keyToMap, valueToMap := range ToMap {
if valueFromMap, ok := fromMap[keyToMap]; ok {
fromMap[keyToMap] = mergeMaps(valueFromMap, valueToMap)
} else {
fromMap[keyToMap] = valueToMap
}
}
case nil:
// merge(nil, map[string]interface{...}) -> map[string]interface{...}
ToMap, ok := ToMap.(map[string]interface{})
if ok {
return ToMap
}
}
return fromMap
}
// OpenContainer opens an existing container by ID.
func OpenContainer(id string) (Container, error) {
operation := "OpenContainer"
title := "HCSShim::" + operation
logrus.Debugf(title+" id=%s", id)
container := &container{
id: id,
}
var (
handle hcsSystem
resultp *uint16
)
err := hcsOpenComputeSystem(id, &handle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
container.handle = handle
if err := container.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
return container, nil
}
// GetContainers gets a list of the containers on the system that match the query
func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) {
operation := "GetContainers"
title := "HCSShim::" + operation
queryb, err := json.Marshal(q)
if err != nil {
return nil, err
}
query := string(queryb)
logrus.Debugf(title+" query=%s", query)
var (
resultp *uint16
computeSystemsp *uint16
)
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
if computeSystemsp == nil {
return nil, ErrUnexpectedValue
}
computeSystemsRaw := convertAndFreeCoTaskMemBytes(computeSystemsp)
computeSystems := []ContainerProperties{}
if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
return nil, err
}
logrus.Debugf(title + " succeeded")
return computeSystems, nil
}
// Start synchronously starts the container.
func (container *container) Start() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Start"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsStartComputeSystem(container.handle, "", &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Shutdown requests a container shutdown, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (container *container) Shutdown() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Shutdown"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsShutdownComputeSystem(container.handle, "", &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Terminate requests a container terminate, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (container *container) Terminate() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Terminate"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsTerminateComputeSystem(container.handle, "", &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Wait synchronously waits for the container to shutdown or terminate.
func (container *container) Wait() error {
operation := "Wait"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse.
// If the timeout expires, IsTimeout(err) == true
func (container *container) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
func (container *container) properties(query string) (*ContainerProperties, error) {
var (
resultp *uint16
propertiesp *uint16
)
err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
properties := &ContainerProperties{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, err
}
return properties, nil
}
// HasPendingUpdates returns true if the container has updates pending to install
func (container *container) HasPendingUpdates() (bool, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "HasPendingUpdates"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return false, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
properties, err := container.properties(pendingUpdatesQuery)
if err != nil {
return false, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.AreUpdatesPending, nil
}
// Statistics returns statistics for the container
func (container *container) Statistics() (Statistics, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Statistics"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return Statistics{}, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
properties, err := container.properties(statisticsQuery)
if err != nil {
return Statistics{}, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.Statistics, nil
}
// ProcessList returns an array of ProcessListItems for the container
func (container *container) ProcessList() ([]ProcessListItem, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "ProcessList"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
properties, err := container.properties(processListQuery)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.ProcessList, nil
}
// MappedVirtualDisks returns a map of the controllers and the disks mapped
// to a container.
//
// Example of JSON returned by the query.
//{
// "Id":"1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3_svm",
// "SystemType":"Container",
// "RuntimeOsType":"Linux",
// "RuntimeId":"00000000-0000-0000-0000-000000000000",
// "State":"Running",
// "MappedVirtualDiskControllers":{
// "0":{
// "MappedVirtualDisks":{
// "2":{
// "HostPath":"C:\\lcow\\lcow\\scratch\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3.vhdx",
// "ContainerPath":"/mnt/gcs/LinuxServiceVM/scratch",
// "Lun":2,
// "CreateInUtilityVM":true
// },
// "3":{
// "HostPath":"C:\\lcow\\lcow\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3\\sandbox.vhdx",
// "Lun":3,
// "CreateInUtilityVM":true,
// "AttachOnly":true
// }
// }
// }
// }
//}
func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "MappedVirtualDiskList"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
properties, err := container.properties(mappedVirtualDiskQuery)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.MappedVirtualDiskControllers, nil
}
// Pause pauses the execution of the container. This feature is not enabled in TP5.
func (container *container) Pause() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Pause"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsPauseComputeSystem(container.handle, "", &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Resume resumes the execution of the container. This feature is not enabled in TP5.
func (container *container) Resume() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Resume"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsResumeComputeSystem(container.handle, "", &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// CreateProcess launches a new process within the container.
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "CreateProcess"
title := "HCSShim::Container::" + operation
var (
processInfo hcsProcessInformation
processHandle hcsProcess
resultp *uint16
)
if container.handle == 0 {
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
// If we are not emulating a console, ignore any console size passed to us
if !c.EmulateConsole {
c.ConsoleSize[0] = 0
c.ConsoleSize[1] = 0
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
configuration := string(configurationb)
logrus.Debugf(title+" id=%s config=%s", container.id, configuration)
err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, configuration, err)
}
process := &process{
handle: processHandle,
processID: int(processInfo.ProcessId),
container: container,
cachedPipes: &cachedPipes{
stdIn: processInfo.StdInput,
stdOut: processInfo.StdOutput,
stdErr: processInfo.StdError,
},
}
if err := process.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s processid=%d", container.id, process.processID)
return process, nil
}
// OpenProcess gets an interface to an existing process within the container.
func (container *container) OpenProcess(pid int) (Process, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "OpenProcess"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s, processid=%d", container.id, pid)
var (
processHandle hcsProcess
resultp *uint16
)
if container.handle == 0 {
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
}
process := &process{
handle: processHandle,
processID: pid,
container: container,
}
if err := process.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
return process, nil
}
// Close cleans up any state associated with the container but does not terminate or wait for it.
func (container *container) Close() error {
container.handleLock.Lock()
defer container.handleLock.Unlock()
operation := "Close"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
// Don't double free this
if container.handle == 0 {
return nil
}
if err := container.unregisterCallback(); err != nil {
return makeContainerError(container, operation, "", err)
}
if err := hcsCloseComputeSystem(container.handle); err != nil {
return makeContainerError(container, operation, "", err)
}
container.handle = 0
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
func (container *container) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
container.callbackNumber = callbackNumber
return nil
}
func (container *container) unregisterCallback() error {
callbackNumber := container.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterComputeSystemCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterComputeSystemCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
}
// Modifies the System by sending a request to HCS
func (container *container) Modify(config *ResourceModificationRequestResponse) error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Modify"
title := "HCSShim::Container::" + operation
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
requestJSON, err := json.Marshal(config)
if err != nil {
return err
}
requestString := string(requestJSON)
logrus.Debugf(title+" id=%s request=%s", container.id, requestString)
var resultp *uint16
err = hcsModifyComputeSystem(container.handle, requestString, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}

27
vendor/github.com/Microsoft/hcsshim/createlayer.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package hcsshim
import "github.com/sirupsen/logrus"
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
// the parent layer provided.
func CreateLayer(info DriverInfo, id, parent string) error {
title := "hcsshim::CreateLayer "
logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = createLayer(&infop, id, parent)
if err != nil {
err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour)
return nil
}

View File

@ -0,0 +1,35 @@
package hcsshim
import "github.com/sirupsen/logrus"
// CreateSandboxLayer creates and populates new read-write layer for use by a container.
// This requires both the id of the direct parent layer, as well as the full list
// of paths to all parent layers up to the base (and including the direct parent
// whose id was provided).
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
title := "hcsshim::CreateSandboxLayer "
logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = createSandboxLayer(&infop, layerId, parentId, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId)
return nil
}

26
vendor/github.com/Microsoft/hcsshim/deactivatelayer.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package hcsshim
import "github.com/sirupsen/logrus"
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
func DeactivateLayer(info DriverInfo, id string) error {
title := "hcsshim::DeactivateLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = deactivateLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
return nil
}

27
vendor/github.com/Microsoft/hcsshim/destroylayer.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package hcsshim
import "github.com/sirupsen/logrus"
// DestroyLayer will remove the on-disk files representing the layer with the given
// id, including that layer's containing folder, if any.
func DestroyLayer(info DriverInfo, id string) error {
title := "hcsshim::DestroyLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = destroyLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
return nil
}

261
vendor/github.com/Microsoft/hcsshim/errors.go generated vendored Normal file
View File

@ -0,0 +1,261 @@
package hcsshim
import (
"errors"
"fmt"
"syscall"
)
var (
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
// ErrElementNotFound is an error encountered when the object being referenced does not exist
ErrElementNotFound = syscall.Errno(0x490)
// ErrElementNotFound is an error encountered when the object being referenced does not exist
ErrNotSupported = syscall.Errno(0x32)
// ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported
// decimal -2147024883 / hex 0x8007000d
ErrInvalidData = syscall.Errno(0xd)
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
// ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method
ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed")
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
// ErrTimeout is an error encountered when waiting on a notification times out
ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
// a different expected notification
ErrUnexpectedContainerExit = errors.New("unexpected container exit")
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
// is lost while waiting for a notification
ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
// ErrProcNotFound is an error encountered when the the process cannot be found
ErrProcNotFound = syscall.Errno(0x7f)
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
// builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3.
ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5)
// ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management
ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d)
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
)
type EndpointNotFoundError struct {
EndpointName string
}
func (e EndpointNotFoundError) Error() string {
return fmt.Sprintf("Endpoint %s not found", e.EndpointName)
}
type NetworkNotFoundError struct {
NetworkName string
}
func (e NetworkNotFoundError) Error() string {
return fmt.Sprintf("Network %s not found", e.NetworkName)
}
// ProcessError is an error encountered in HCS during an operation on a Process object
type ProcessError struct {
Process *process
Operation string
ExtraInfo string
Err error
}
// ContainerError is an error encountered in HCS during an operation on a Container object
type ContainerError struct {
Container *container
Operation string
ExtraInfo string
Err error
}
func (e *ContainerError) Error() string {
if e == nil {
return "<nil>"
}
if e.Container == nil {
return "unexpected nil container for error: " + e.Err.Error()
}
s := "container " + e.Container.id
if e.Operation != "" {
s += " encountered an error during " + e.Operation
}
switch e.Err.(type) {
case nil:
break
case syscall.Errno:
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
default:
s += fmt.Sprintf(": %s", e.Err.Error())
}
if e.ExtraInfo != "" {
s += " extra info: " + e.ExtraInfo
}
return s
}
func makeContainerError(container *container, operation string, extraInfo string, err error) error {
// Don't double wrap errors
if _, ok := err.(*ContainerError); ok {
return err
}
containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err}
return containerError
}
func (e *ProcessError) Error() string {
if e == nil {
return "<nil>"
}
if e.Process == nil {
return "Unexpected nil process for error: " + e.Err.Error()
}
s := fmt.Sprintf("process %d", e.Process.processID)
if e.Process.container != nil {
s += " in container " + e.Process.container.id
}
if e.Operation != "" {
s += " encountered an error during " + e.Operation
}
switch e.Err.(type) {
case nil:
break
case syscall.Errno:
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
default:
s += fmt.Sprintf(": %s", e.Err.Error())
}
return s
}
func makeProcessError(process *process, operation string, extraInfo string, err error) error {
// Don't double wrap errors
if _, ok := err.(*ProcessError); ok {
return err
}
processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err}
return processError
}
// IsNotExist checks if an error is caused by the Container or Process not existing.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsNotExist(err error) bool {
err = getInnerError(err)
if _, ok := err.(EndpointNotFoundError); ok {
return true
}
if _, ok := err.(NetworkNotFoundError); ok {
return true
}
return err == ErrComputeSystemDoesNotExist ||
err == ErrElementNotFound ||
err == ErrProcNotFound
}
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
// already closed by a call to the Close() method.
func IsAlreadyClosed(err error) bool {
err = getInnerError(err)
return err == ErrAlreadyClosed
}
// IsPending returns a boolean indicating whether the error is that
// the requested operation is being completed in the background.
func IsPending(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeOperationPending
}
// IsTimeout returns a boolean indicating whether the error is caused by
// a timeout waiting for the operation to complete.
func IsTimeout(err error) bool {
err = getInnerError(err)
return err == ErrTimeout
}
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
// a Container or Process being already stopped.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsAlreadyStopped(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped ||
err == ErrElementNotFound ||
err == ErrProcNotFound
}
// IsNotSupported returns a boolean indicating whether the error is caused by
// unsupported platform requests
// Note: Currently Unsupported platform requests can be mean either
// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
// is thrown from the Platform
func IsNotSupported(err error) bool {
err = getInnerError(err)
// If Platform doesn't recognize or support the request sent, below errors are seen
return err == ErrVmcomputeInvalidJSON ||
err == ErrInvalidData ||
err == ErrNotSupported ||
err == ErrVmcomputeUnknownMessage
}
func getInnerError(err error) error {
switch pe := err.(type) {
case nil:
return nil
case *ContainerError:
err = pe.Err
case *ProcessError:
err = pe.Err
}
return err
}

View File

@ -0,0 +1,26 @@
package hcsshim
import "github.com/sirupsen/logrus"
// ExpandSandboxSize expands the size of a layer to at least size bytes.
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
title := "hcsshim::ExpandSandboxSize "
logrus.Debugf(title+"layerId=%s size=%d", layerId, size)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = expandSandboxSize(&infop, layerId, size)
if err != nil {
err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size)
return nil
}

156
vendor/github.com/Microsoft/hcsshim/exportlayer.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
package hcsshim
import (
"io"
"io/ioutil"
"os"
"syscall"
"github.com/Microsoft/go-winio"
"github.com/sirupsen/logrus"
)
// ExportLayer will create a folder at exportFolderPath and fill that folder with
// the transport format version of the layer identified by layerId. This transport
// format includes any metadata required for later importing the layer (using
// ImportLayer), and requires the full list of parent layer paths in order to
// perform the export.
func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
title := "hcsshim::ExportLayer "
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerId, exportFolderPath)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = exportLayer(&infop, layerId, exportFolderPath, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerId, info.Flavour, exportFolderPath)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerId, exportFolderPath)
return nil
}
type LayerReader interface {
Next() (string, int64, *winio.FileBasicInfo, error)
Read(b []byte) (int, error)
Close() error
}
// FilterLayerReader provides an interface for extracting the contents of an on-disk layer.
type FilterLayerReader struct {
context uintptr
}
// Next reads the next available file from a layer, ensuring that parent directories are always read
// before child files and directories.
//
// Next returns the file's relative path, size, and basic file metadata. Read() should be used to
// extract a Win32 backup stream with the remainder of the metadata and the data.
func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) {
var fileNamep *uint16
fileInfo := &winio.FileBasicInfo{}
var deleted uint32
var fileSize int64
err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted)
if err != nil {
if err == syscall.ERROR_NO_MORE_FILES {
err = io.EOF
} else {
err = makeError(err, "ExportLayerNext", "")
}
return "", 0, nil, err
}
fileName := convertAndFreeCoTaskMemString(fileNamep)
if deleted != 0 {
fileInfo = nil
}
if fileName[0] == '\\' {
fileName = fileName[1:]
}
return fileName, fileSize, fileInfo, nil
}
// Read reads from the current file's Win32 backup stream.
func (r *FilterLayerReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := exportLayerRead(r.context, b, &bytesRead)
if err != nil {
return 0, makeError(err, "ExportLayerRead", "")
}
if bytesRead == 0 {
return 0, io.EOF
}
return int(bytesRead), nil
}
// Close frees resources associated with the layer reader. It will return an
// error if there was an error while reading the layer or of the layer was not
// completely read.
func (r *FilterLayerReader) Close() (err error) {
if r.context != 0 {
err = exportLayerEnd(r.context)
if err != nil {
err = makeError(err, "ExportLayerEnd", "")
}
r.context = 0
}
return
}
// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
// The caller must have taken the SeBackupPrivilege privilege
// to call this and any methods on the resulting LayerReader.
func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
if procExportLayerBegin.Find() != nil {
// The new layer reader is not available on this Windows build. Fall back to the
// legacy export code path.
path, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
err = ExportLayer(info, layerID, path, parentLayerPaths)
if err != nil {
os.RemoveAll(path)
return nil, err
}
return &legacyLayerReaderWrapper{newLegacyLayerReader(path)}, nil
}
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return nil, err
}
infop, err := convertDriverInfo(info)
if err != nil {
return nil, err
}
r := &FilterLayerReader{}
err = exportLayerBegin(&infop, layerID, layers, &r.context)
if err != nil {
return nil, makeError(err, "ExportLayerBegin", "")
}
return r, err
}
type legacyLayerReaderWrapper struct {
*legacyLayerReader
}
func (r *legacyLayerReaderWrapper) Close() error {
err := r.legacyLayerReader.Close()
os.RemoveAll(r.root)
return err
}

View File

@ -0,0 +1,55 @@
package hcsshim
import (
"syscall"
"github.com/sirupsen/logrus"
)
// GetLayerMountPath will look for a mounted layer with the given id and return
// the path at which that layer can be accessed. This path may be a volume path
// if the layer is a mounted read-write layer, otherwise it is expected to be the
// folder path at which the layer is stored.
func GetLayerMountPath(info DriverInfo, id string) (string, error) {
title := "hcsshim::GetLayerMountPath "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return "", err
}
var mountPathLength uintptr
mountPathLength = 0
// Call the procedure itself.
logrus.Debugf("Calling proc (1)")
err = getLayerMountPath(&infop, id, &mountPathLength, nil)
if err != nil {
err = makeErrorf(err, title, "(first call) id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return "", err
}
// Allocate a mount path of the returned length.
if mountPathLength == 0 {
return "", nil
}
mountPathp := make([]uint16, mountPathLength)
mountPathp[0] = 0
// Call the procedure again
logrus.Debugf("Calling proc (2)")
err = getLayerMountPath(&infop, id, &mountPathLength, &mountPathp[0])
if err != nil {
err = makeErrorf(err, title, "(second call) id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return "", err
}
path := syscall.UTF16ToString(mountPathp[0:])
logrus.Debugf(title+"succeeded flavour=%d id=%s path=%s", info.Flavour, id, path)
return path, nil
}

View File

@ -0,0 +1,22 @@
package hcsshim
import "github.com/sirupsen/logrus"
// GetSharedBaseImages will enumerate the images stored in the common central
// image store and return descriptive info about those images for the purpose
// of registering them with the graphdriver, graph, and tagstore.
func GetSharedBaseImages() (imageData string, err error) {
title := "hcsshim::GetSharedBaseImages "
logrus.Debugf("Calling proc")
var buffer *uint16
err = getBaseImages(&buffer)
if err != nil {
err = makeError(err, title, "")
logrus.Error(err)
return
}
imageData = convertAndFreeCoTaskMemString(buffer)
logrus.Debugf(title+" - succeeded output=%s", imageData)
return
}

19
vendor/github.com/Microsoft/hcsshim/guid.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
package hcsshim
import (
"crypto/sha1"
"fmt"
)
type GUID [16]byte
func NewGUID(source string) *GUID {
h := sha1.Sum([]byte(source))
var g GUID
copy(g[0:], h[0:16])
return &g
}
func (g *GUID) ToString() string {
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
}

166
vendor/github.com/Microsoft/hcsshim/hcsshim.go generated vendored Normal file
View File

@ -0,0 +1,166 @@
// Shim for the Host Compute Service (HCS) to manage Windows Server
// containers and Hyper-V containers.
package hcsshim
import (
"fmt"
"syscall"
"unsafe"
"github.com/sirupsen/logrus"
)
//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go safeopen.go
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer?
//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer?
//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize?
//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer?
//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer?
//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer?
//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath?
//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages?
//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer?
//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists?
//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid?
//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer?
//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer?
//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings?
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
const (
// Specific user-visible exit codes
WaitErrExecFailed = 32767
ERROR_GEN_FAILURE = syscall.Errno(31)
ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
WSAEINVAL = syscall.Errno(10022)
// Timeout on wait calls
TimeoutInfinite = 0xFFFFFFFF
)
type HcsError struct {
title string
rest string
Err error
}
type hcsSystem syscall.Handle
type hcsProcess syscall.Handle
type hcsCallback syscall.Handle
type hcsProcessInformation struct {
ProcessId uint32
Reserved uint32
StdInput syscall.Handle
StdOutput syscall.Handle
StdError syscall.Handle
}
func makeError(err error, title, rest string) error {
// Pass through DLL errors directly since they do not originate from HCS.
if _, ok := err.(*syscall.DLLError); ok {
return err
}
return &HcsError{title, rest, err}
}
func makeErrorf(err error, title, format string, a ...interface{}) error {
return makeError(err, title, fmt.Sprintf(format, a...))
}
func win32FromError(err error) uint32 {
if herr, ok := err.(*HcsError); ok {
return win32FromError(herr.Err)
}
if code, ok := err.(syscall.Errno); ok {
return uint32(code)
}
return uint32(ERROR_GEN_FAILURE)
}
func win32FromHresult(hr uintptr) uintptr {
if hr&0x1fff0000 == 0x00070000 {
return hr & 0xffff
}
return hr
}
func (e *HcsError) Error() string {
s := e.title
if len(s) > 0 && s[len(s)-1] != ' ' {
s += " "
}
s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
if e.rest != "" {
if e.rest[0] != ' ' {
s += " "
}
s += e.rest
}
return s
}
func convertAndFreeCoTaskMemString(buffer *uint16) string {
str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:])
coTaskMemFree(unsafe.Pointer(buffer))
return str
}
func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
return []byte(convertAndFreeCoTaskMemString(buffer))
}
func processHcsResult(err error, resultp *uint16) error {
if resultp != nil {
result := convertAndFreeCoTaskMemString(resultp)
logrus.Debugf("Result: %s", result)
}
return err
}

323
vendor/github.com/Microsoft/hcsshim/hnsendpoint.go generated vendored Normal file
View File

@ -0,0 +1,323 @@
package hcsshim
import (
"encoding/json"
"net"
"github.com/sirupsen/logrus"
)
// HNSEndpoint represents a network endpoint in HNS
type HNSEndpoint struct {
Id string `json:"ID,omitempty"`
Name string `json:",omitempty"`
VirtualNetwork string `json:",omitempty"`
VirtualNetworkName string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacAddress string `json:",omitempty"`
IPAddress net.IP `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
EnableInternalDNS bool `json:",omitempty"`
DisableICC bool `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
IsRemoteEndpoint bool `json:",omitempty"`
}
//SystemType represents the type of the system on which actions are done
type SystemType string
// SystemType const
const (
ContainerType SystemType = "Container"
VirtualMachineType SystemType = "VirtualMachine"
HostType SystemType = "Host"
)
// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system
// Supported resource types are Network and Request Types are Add/Remove
type EndpointAttachDetachRequest struct {
ContainerID string `json:"ContainerId,omitempty"`
SystemType SystemType `json:"SystemType"`
CompartmentID uint16 `json:"CompartmentId,omitempty"`
VirtualNICName string `json:"VirtualNicName,omitempty"`
}
// EndpointResquestResponse is object to get the endpoint request response
type EndpointResquestResponse struct {
Success bool
Error string
}
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
endpoint := &HNSEndpoint{}
err := hnsCall(method, "/endpoints/"+path, request, &endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}
// HNSListEndpointRequest makes a HNS call to query the list of available endpoints
func HNSListEndpointRequest() ([]HNSEndpoint, error) {
var endpoint []HNSEndpoint
err := hnsCall("GET", "/endpoints/", "", &endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}
// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container
func HotAttachEndpoint(containerID string, endpointID string) error {
return modifyNetworkEndpoint(containerID, endpointID, Add)
}
// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container
func HotDetachEndpoint(containerID string, endpointID string) error {
return modifyNetworkEndpoint(containerID, endpointID, Remove)
}
// ModifyContainer corresponding to the container id, by sending a request
func modifyContainer(id string, request *ResourceModificationRequestResponse) error {
container, err := OpenContainer(id)
if err != nil {
if IsNotExist(err) {
return ErrComputeSystemDoesNotExist
}
return getInnerError(err)
}
defer container.Close()
err = container.Modify(request)
if err != nil {
if IsNotSupported(err) {
return ErrPlatformNotSupported
}
return getInnerError(err)
}
return nil
}
func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error {
requestMessage := &ResourceModificationRequestResponse{
Resource: Network,
Request: request,
Data: endpointID,
}
err := modifyContainer(containerID, requestMessage)
if err != nil {
return err
}
return nil
}
// GetHNSEndpointByID get the Endpoint by ID
func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
return HNSEndpointRequest("GET", endpointID, "")
}
// GetHNSEndpointByName gets the endpoint filtered by Name
func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
hnsResponse, err := HNSListEndpointRequest()
if err != nil {
return nil, err
}
for _, hnsEndpoint := range hnsResponse {
if hnsEndpoint.Name == endpointName {
return &hnsEndpoint, nil
}
}
return nil, EndpointNotFoundError{EndpointName: endpointName}
}
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) {
operation := "Create"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
jsonString, err := json.Marshal(endpoint)
if err != nil {
return nil, err
}
return HNSEndpointRequest("POST", "", string(jsonString))
}
// Delete Endpoint by sending EndpointRequest to HNS
func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) {
operation := "Delete"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
return HNSEndpointRequest("DELETE", endpoint.Id, "")
}
// Update Endpoint
func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) {
operation := "Update"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
jsonString, err := json.Marshal(endpoint)
if err != nil {
return nil, err
}
err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint)
return endpoint, err
}
// ContainerHotAttach attaches an endpoint to a running container
func (endpoint *HNSEndpoint) ContainerHotAttach(containerID string) error {
operation := "ContainerHotAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
return modifyNetworkEndpoint(containerID, endpoint.Id, Add)
}
// ContainerHotDetach detaches an endpoint from a running container
func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error {
operation := "ContainerHotDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
return modifyNetworkEndpoint(containerID, endpoint.Id, Remove)
}
// ApplyACLPolicy applies a set of ACL Policies on the Endpoint
func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error {
operation := "ApplyACLPolicy"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
for _, policy := range policies {
if policy == nil {
continue
}
jsonString, err := json.Marshal(policy)
if err != nil {
return err
}
endpoint.Policies = append(endpoint.Policies, jsonString)
}
_, err := endpoint.Update()
return err
}
// ContainerAttach attaches an endpoint to container
func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
operation := "ContainerAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
ContainerID: containerID,
CompartmentID: compartmentID,
SystemType: ContainerType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// ContainerDetach detaches an endpoint from container
func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error {
operation := "ContainerDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
ContainerID: containerID,
SystemType: ContainerType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}
// HostAttach attaches a nic on the host
func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
operation := "HostAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
CompartmentID: compartmentID,
SystemType: HostType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// HostDetach detaches a nic on the host
func (endpoint *HNSEndpoint) HostDetach() error {
operation := "HostDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
SystemType: HostType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}
// VirtualMachineNICAttach attaches a endpoint to a virtual machine
func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error {
operation := "VirtualMachineNicAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
VirtualNICName: virtualMachineNICName,
SystemType: VirtualMachineType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// VirtualMachineNICDetach detaches a endpoint from a virtual machine
func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error {
operation := "VirtualMachineNicDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
SystemType: VirtualMachineType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}

40
vendor/github.com/Microsoft/hcsshim/hnsfuncs.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package hcsshim
import (
"encoding/json"
"fmt"
"github.com/sirupsen/logrus"
)
func hnsCall(method, path, request string, returnResponse interface{}) error {
var responseBuffer *uint16
logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request)
err := _hnsCall(method, path, request, &responseBuffer)
if err != nil {
return makeError(err, "hnsCall ", "")
}
response := convertAndFreeCoTaskMemString(responseBuffer)
hnsresponse := &hnsResponse{}
if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
return err
}
if !hnsresponse.Success {
return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
}
if len(hnsresponse.Output) == 0 {
return nil
}
logrus.Debugf("Network Response : %s", hnsresponse.Output)
err = json.Unmarshal(hnsresponse.Output, returnResponse)
if err != nil {
return err
}
return nil
}

141
vendor/github.com/Microsoft/hcsshim/hnsnetwork.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
package hcsshim
import (
"encoding/json"
"net"
"github.com/sirupsen/logrus"
)
// Subnet is assoicated with a network and represents a list
// of subnets available to the network
type Subnet struct {
AddressPrefix string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
}
// MacPool is assoicated with a network and represents a list
// of macaddresses available to the network
type MacPool struct {
StartMacAddress string `json:",omitempty"`
EndMacAddress string `json:",omitempty"`
}
// HNSNetwork represents a network in HNS
type HNSNetwork struct {
Id string `json:"ID,omitempty"`
Name string `json:",omitempty"`
Type string `json:",omitempty"`
NetworkAdapterName string `json:",omitempty"`
SourceMac string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacPools []MacPool `json:",omitempty"`
Subnets []Subnet `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
DNSServerCompartment uint32 `json:",omitempty"`
ManagementIP string `json:",omitempty"`
AutomaticDNS bool `json:",omitempty"`
}
type hnsNetworkResponse struct {
Success bool
Error string
Output HNSNetwork
}
type hnsResponse struct {
Success bool
Error string
Output json.RawMessage
}
// HNSNetworkRequest makes a call into HNS to update/query a single network
func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) {
var network HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return &network, nil
}
// HNSListNetworkRequest makes a HNS call to query the list of available networks
func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) {
var network []HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return network, nil
}
// GetHNSNetworkByID
func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) {
return HNSNetworkRequest("GET", networkID, "")
}
// GetHNSNetworkName filtered by Name
func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) {
hsnnetworks, err := HNSListNetworkRequest("GET", "", "")
if err != nil {
return nil, err
}
for _, hnsnetwork := range hsnnetworks {
if hnsnetwork.Name == networkName {
return &hnsnetwork, nil
}
}
return nil, NetworkNotFoundError{NetworkName: networkName}
}
// Create Network by sending NetworkRequest to HNS.
func (network *HNSNetwork) Create() (*HNSNetwork, error) {
operation := "Create"
title := "HCSShim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
jsonString, err := json.Marshal(network)
if err != nil {
return nil, err
}
return HNSNetworkRequest("POST", "", string(jsonString))
}
// Delete Network by sending NetworkRequest to HNS
func (network *HNSNetwork) Delete() (*HNSNetwork, error) {
operation := "Delete"
title := "HCSShim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
return HNSNetworkRequest("DELETE", network.Id, "")
}
// Creates an endpoint on the Network.
func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint {
return &HNSEndpoint{
VirtualNetwork: network.Id,
IPAddress: ipAddress,
MacAddress: string(macAddress),
}
}
func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
operation := "CreateEndpoint"
title := "HCSShim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id)
endpoint.VirtualNetwork = network.Id
return endpoint.Create()
}
func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
operation := "CreateRemoteEndpoint"
title := "HCSShim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
endpoint.IsRemoteEndpoint = true
return network.CreateEndpoint(endpoint)
}

94
vendor/github.com/Microsoft/hcsshim/hnspolicy.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
package hcsshim
// Type of Request Support in ModifySystem
type PolicyType string
// RequestType const
const (
Nat PolicyType = "NAT"
ACL PolicyType = "ACL"
PA PolicyType = "PA"
VLAN PolicyType = "VLAN"
VSID PolicyType = "VSID"
VNet PolicyType = "VNET"
L2Driver PolicyType = "L2Driver"
Isolation PolicyType = "Isolation"
QOS PolicyType = "QOS"
OutboundNat PolicyType = "OutBoundNAT"
ExternalLoadBalancer PolicyType = "ELB"
Route PolicyType = "ROUTE"
)
type NatPolicy struct {
Type PolicyType `json:"Type"`
Protocol string
InternalPort uint16
ExternalPort uint16
}
type QosPolicy struct {
Type PolicyType `json:"Type"`
MaximumOutgoingBandwidthInBytes uint64
}
type IsolationPolicy struct {
Type PolicyType `json:"Type"`
VLAN uint
VSID uint
InDefaultIsolation bool
}
type VlanPolicy struct {
Type PolicyType `json:"Type"`
VLAN uint
}
type VsidPolicy struct {
Type PolicyType `json:"Type"`
VSID uint
}
type PaPolicy struct {
Type PolicyType `json:"Type"`
PA string `json:"PA"`
}
type OutboundNatPolicy struct {
Policy
VIP string `json:"VIP,omitempty"`
Exceptions []string `json:"ExceptionList,omitempty"`
}
type ActionType string
type DirectionType string
type RuleType string
const (
Allow ActionType = "Allow"
Block ActionType = "Block"
In DirectionType = "In"
Out DirectionType = "Out"
Host RuleType = "Host"
Switch RuleType = "Switch"
)
type ACLPolicy struct {
Type PolicyType `json:"Type"`
Protocol uint16
InternalPort uint16
Action ActionType
Direction DirectionType
LocalAddresses string
RemoteAddresses string
LocalPort uint16
RemotePort uint16
RuleType RuleType `json:"RuleType,omitempty"`
Priority uint16
ServiceName string
}
type Policy struct {
Type PolicyType `json:"Type"`
}

200
vendor/github.com/Microsoft/hcsshim/hnspolicylist.go generated vendored Normal file
View File

@ -0,0 +1,200 @@
package hcsshim
import (
"encoding/json"
"github.com/sirupsen/logrus"
)
// RoutePolicy is a structure defining schema for Route based Policy
type RoutePolicy struct {
Policy
DestinationPrefix string `json:"DestinationPrefix,omitempty"`
NextHop string `json:"NextHop,omitempty"`
EncapEnabled bool `json:"NeedEncap,omitempty"`
}
// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy
type ELBPolicy struct {
LBPolicy
SourceVIP string `json:"SourceVIP,omitempty"`
VIPs []string `json:"VIPs,omitempty"`
ILB bool `json:"ILB,omitempty"`
}
// LBPolicy is a structure defining schema for LoadBalancing based Policy
type LBPolicy struct {
Policy
Protocol uint16 `json:"Protocol,omitempty"`
InternalPort uint16
ExternalPort uint16
}
// PolicyList is a structure defining schema for Policy list request
type PolicyList struct {
ID string `json:"ID,omitempty"`
EndpointReferences []string `json:"References,omitempty"`
Policies []json.RawMessage `json:"Policies,omitempty"`
}
// HNSPolicyListRequest makes a call into HNS to update/query a single network
func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) {
var policy PolicyList
err := hnsCall(method, "/policylists/"+path, request, &policy)
if err != nil {
return nil, err
}
return &policy, nil
}
// HNSListPolicyListRequest gets all the policy list
func HNSListPolicyListRequest() ([]PolicyList, error) {
var plist []PolicyList
err := hnsCall("GET", "/policylists/", "", &plist)
if err != nil {
return nil, err
}
return plist, nil
}
// PolicyListRequest makes a HNS call to modify/query a network policy list
func PolicyListRequest(method, path, request string) (*PolicyList, error) {
policylist := &PolicyList{}
err := hnsCall(method, "/policylists/"+path, request, &policylist)
if err != nil {
return nil, err
}
return policylist, nil
}
// GetPolicyListByID get the policy list by ID
func GetPolicyListByID(policyListID string) (*PolicyList, error) {
return PolicyListRequest("GET", policyListID, "")
}
// Create PolicyList by sending PolicyListRequest to HNS.
func (policylist *PolicyList) Create() (*PolicyList, error) {
operation := "Create"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s", policylist.ID)
jsonString, err := json.Marshal(policylist)
if err != nil {
return nil, err
}
return PolicyListRequest("POST", "", string(jsonString))
}
// Delete deletes PolicyList
func (policylist *PolicyList) Delete() (*PolicyList, error) {
operation := "Delete"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s", policylist.ID)
return PolicyListRequest("DELETE", policylist.ID, "")
}
// AddEndpoint add an endpoint to a Policy List
func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
operation := "AddEndpoint"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
_, err := policylist.Delete()
if err != nil {
return nil, err
}
// Add Endpoint to the Existing List
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
return policylist.Create()
}
// RemoveEndpoint removes an endpoint from the Policy List
func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
operation := "RemoveEndpoint"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
_, err := policylist.Delete()
if err != nil {
return nil, err
}
elementToRemove := "/endpoints/" + endpoint.Id
var references []string
for _, endpointReference := range policylist.EndpointReferences {
if endpointReference == elementToRemove {
continue
}
references = append(references, endpointReference)
}
policylist.EndpointReferences = references
return policylist.Create()
}
// AddLoadBalancer policy list for the specified endpoints
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
operation := "AddLoadBalancer"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)
policylist := &PolicyList{}
elbPolicy := &ELBPolicy{
SourceVIP: sourceVIP,
ILB: isILB,
}
if len(vip) > 0 {
elbPolicy.VIPs = []string{vip}
}
elbPolicy.Type = ExternalLoadBalancer
elbPolicy.Protocol = protocol
elbPolicy.InternalPort = internalPort
elbPolicy.ExternalPort = externalPort
for _, endpoint := range endpoints {
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
}
jsonString, err := json.Marshal(elbPolicy)
if err != nil {
return nil, err
}
policylist.Policies = append(policylist.Policies, jsonString)
return policylist.Create()
}
// AddRoute adds route policy list for the specified endpoints
func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) {
operation := "AddRoute"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix)
policylist := &PolicyList{}
rPolicy := &RoutePolicy{
DestinationPrefix: destinationPrefix,
NextHop: nextHop,
EncapEnabled: encapEnabled,
}
rPolicy.Type = Route
for _, endpoint := range endpoints {
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
}
jsonString, err := json.Marshal(rPolicy)
if err != nil {
return nil, err
}
policylist.Policies = append(policylist.Policies, jsonString)
return policylist.Create()
}

222
vendor/github.com/Microsoft/hcsshim/importlayer.go generated vendored Normal file
View File

@ -0,0 +1,222 @@
package hcsshim
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"github.com/Microsoft/go-winio"
"github.com/sirupsen/logrus"
)
// ImportLayer will take the contents of the folder at importFolderPath and import
// that into a layer with the id layerId. Note that in order to correctly populate
// the layer and interperet the transport format, all parent layers must already
// be present on the system at the paths provided in parentLayerPaths.
func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
title := "hcsshim::ImportLayer "
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerID, importFolderPath)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = importLayer(&infop, layerID, importFolderPath, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerID, info.Flavour, importFolderPath)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerID, importFolderPath)
return nil
}
// LayerWriter is an interface that supports writing a new container image layer.
type LayerWriter interface {
// Add adds a file to the layer with given metadata.
Add(name string, fileInfo *winio.FileBasicInfo) error
// AddLink adds a hard link to the layer. The target must already have been added.
AddLink(name string, target string) error
// Remove removes a file that was present in a parent layer from the layer.
Remove(name string) error
// Write writes data to the current file. The data must be in the format of a Win32
// backup stream.
Write(b []byte) (int, error)
// Close finishes the layer writing process and releases any resources.
Close() error
}
// FilterLayerWriter provides an interface to write the contents of a layer to the file system.
type FilterLayerWriter struct {
context uintptr
}
// Add adds a file or directory to the layer. The file's parent directory must have already been added.
//
// name contains the file's relative path. fileInfo contains file times and file attributes; the rest
// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method.
// winio.BackupStreamWriter can be used to facilitate this.
func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
if name[0] != '\\' {
name = `\` + name
}
err := importLayerNext(w.context, name, fileInfo)
if err != nil {
return makeError(err, "ImportLayerNext", "")
}
return nil
}
// AddLink adds a hard link to the layer. The target of the link must have already been added.
func (w *FilterLayerWriter) AddLink(name string, target string) error {
return errors.New("hard links not yet supported")
}
// Remove removes a file from the layer. The file must have been present in the parent layer.
//
// name contains the file's relative path.
func (w *FilterLayerWriter) Remove(name string) error {
if name[0] != '\\' {
name = `\` + name
}
err := importLayerNext(w.context, name, nil)
if err != nil {
return makeError(err, "ImportLayerNext", "")
}
return nil
}
// Write writes more backup stream data to the current file.
func (w *FilterLayerWriter) Write(b []byte) (int, error) {
err := importLayerWrite(w.context, b)
if err != nil {
err = makeError(err, "ImportLayerWrite", "")
return 0, err
}
return len(b), err
}
// Close completes the layer write operation. The error must be checked to ensure that the
// operation was successful.
func (w *FilterLayerWriter) Close() (err error) {
if w.context != 0 {
err = importLayerEnd(w.context)
if err != nil {
err = makeError(err, "ImportLayerEnd", "")
}
w.context = 0
}
return
}
type legacyLayerWriterWrapper struct {
*legacyLayerWriter
info DriverInfo
layerID string
path string
parentLayerPaths []string
}
func (r *legacyLayerWriterWrapper) Close() error {
defer os.RemoveAll(r.root.Name())
defer r.legacyLayerWriter.CloseRoots()
err := r.legacyLayerWriter.Close()
if err != nil {
return err
}
info := r.info
info.HomeDir = ""
if err = ImportLayer(info, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil {
return err
}
for _, name := range r.Tombstones {
if err = removeRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) {
return err
}
}
// Add any hard links that were collected.
for _, lnk := range r.PendingLinks {
if err = removeRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) {
return err
}
if err = linkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil {
return err
}
}
// Prepare the utility VM for use if one is present in the layer.
if r.HasUtilityVM {
err := ensureNotReparsePointRelative("UtilityVM", r.destRoot)
if err != nil {
return err
}
err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM"))
if err != nil {
return err
}
}
return nil
}
// NewLayerWriter returns a new layer writer for creating a layer on disk.
// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
// to call this and any methods on the resulting LayerWriter.
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
if len(parentLayerPaths) == 0 {
// This is a base layer. It gets imported differently.
f, err := openRoot(filepath.Join(info.HomeDir, layerID))
if err != nil {
return nil, err
}
return &baseLayerWriter{
root: f,
}, nil
}
if procImportLayerBegin.Find() != nil {
// The new layer reader is not available on this Windows build. Fall back to the
// legacy export code path.
path, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
w, err := newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID))
if err != nil {
return nil, err
}
return &legacyLayerWriterWrapper{
legacyLayerWriter: w,
info: info,
layerID: layerID,
path: path,
parentLayerPaths: parentLayerPaths,
}, nil
}
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return nil, err
}
infop, err := convertDriverInfo(info)
if err != nil {
return nil, err
}
w := &FilterLayerWriter{}
err = importLayerBegin(&infop, layerID, layers, &w.context)
if err != nil {
return nil, makeError(err, "ImportLayerStart", "")
}
return w, nil
}

188
vendor/github.com/Microsoft/hcsshim/interface.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
package hcsshim
import (
"encoding/json"
"io"
"time"
)
// ProcessConfig is used as both the input of Container.CreateProcess
// and to convert the parameters to JSON for passing onto the HCS
type ProcessConfig struct {
ApplicationName string `json:",omitempty"`
CommandLine string `json:",omitempty"`
CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows
User string `json:",omitempty"`
WorkingDirectory string `json:",omitempty"`
Environment map[string]string `json:",omitempty"`
EmulateConsole bool `json:",omitempty"`
CreateStdInPipe bool `json:",omitempty"`
CreateStdOutPipe bool `json:",omitempty"`
CreateStdErrPipe bool `json:",omitempty"`
ConsoleSize [2]uint `json:",omitempty"`
CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows
OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows
}
type Layer struct {
ID string
Path string
}
type MappedDir struct {
HostPath string
ContainerPath string
ReadOnly bool
BandwidthMaximum uint64
IOPSMaximum uint64
CreateInUtilityVM bool
}
type MappedPipe struct {
HostPath string
ContainerPipeName string
}
type HvRuntime struct {
ImagePath string `json:",omitempty"`
SkipTemplate bool `json:",omitempty"`
LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM
LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM
LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode
BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD
WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD
}
type MappedVirtualDisk struct {
HostPath string `json:",omitempty"` // Path to VHD on the host
ContainerPath string // Platform-specific mount point path in the container
CreateInUtilityVM bool `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing"
AttachOnly bool `json:",omitempty:`
}
// ContainerConfig is used as both the input of CreateContainer
// and to convert the parameters to JSON for passing onto the HCS
type ContainerConfig struct {
SystemType string // HCS requires this to be hard-coded to "Container"
Name string // Name of the container. We use the docker ID.
Owner string `json:",omitempty"` // The management platform that created this container
VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID}
IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows
LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID
Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID
Credentials string `json:",omitempty"` // Credentials information
ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container.
ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares.
ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit.
StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS
StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second
StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller
MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes
HostName string `json:",omitempty"` // Hostname
MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts)
MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes
HvPartition bool // True if it a Hyper-V Container
NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with.
EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container
HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM
Servicing bool `json:",omitempty"` // True if this container is for servicing
AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution
DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution
ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise.
TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed
MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start
}
type ComputeSystemQuery struct {
IDs []string `json:"Ids,omitempty"`
Types []string `json:",omitempty"`
Names []string `json:",omitempty"`
Owners []string `json:",omitempty"`
}
// Container represents a created (but not necessarily running) container.
type Container interface {
// Start synchronously starts the container.
Start() error
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
Shutdown() error
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
Terminate() error
// Waits synchronously waits for the container to shutdown or terminate.
Wait() error
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
// returns false if timeout occurs.
WaitTimeout(time.Duration) error
// Pause pauses the execution of a container.
Pause() error
// Resume resumes the execution of a container.
Resume() error
// HasPendingUpdates returns true if the container has updates pending to install.
HasPendingUpdates() (bool, error)
// Statistics returns statistics for a container.
Statistics() (Statistics, error)
// ProcessList returns details for the processes in a container.
ProcessList() ([]ProcessListItem, error)
// MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller
MappedVirtualDisks() (map[int]MappedVirtualDiskController, error)
// CreateProcess launches a new process within the container.
CreateProcess(c *ProcessConfig) (Process, error)
// OpenProcess gets an interface to an existing process within the container.
OpenProcess(pid int) (Process, error)
// Close cleans up any state associated with the container but does not terminate or wait for it.
Close() error
// Modify the System
Modify(config *ResourceModificationRequestResponse) error
}
// Process represents a running or exited process.
type Process interface {
// Pid returns the process ID of the process within the container.
Pid() int
// Kill signals the process to terminate but does not wait for it to finish terminating.
Kill() error
// Wait waits for the process to exit.
Wait() error
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
WaitTimeout(time.Duration) error
// ExitCode returns the exit code of the process. The process must have
// already terminated.
ExitCode() (int, error)
// ResizeConsole resizes the console of the process.
ResizeConsole(width, height uint16) error
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error)
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
CloseStdin() error
// Close cleans up any state associated with the process but does not kill
// or wait on it.
Close() error
}

30
vendor/github.com/Microsoft/hcsshim/layerexists.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package hcsshim
import "github.com/sirupsen/logrus"
// LayerExists will return true if a layer with the given id exists and is known
// to the system.
func LayerExists(info DriverInfo, id string) (bool, error) {
title := "hcsshim::LayerExists "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return false, err
}
// Call the procedure itself.
var exists uint32
err = layerExists(&infop, id, &exists)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return false, err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s exists=%d", info.Flavour, id, exists)
return exists != 0, nil
}

111
vendor/github.com/Microsoft/hcsshim/layerutils.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package hcsshim
// This file contains utility functions to support storage (graph) related
// functionality.
import (
"path/filepath"
"syscall"
"github.com/sirupsen/logrus"
)
/* To pass into syscall, we need a struct matching the following:
enum GraphDriverType
{
DiffDriver,
FilterDriver
};
struct DriverInfo {
GraphDriverType Flavour;
LPCWSTR HomeDir;
};
*/
type DriverInfo struct {
Flavour int
HomeDir string
}
type driverInfo struct {
Flavour int
HomeDirp *uint16
}
func convertDriverInfo(info DriverInfo) (driverInfo, error) {
homedirp, err := syscall.UTF16PtrFromString(info.HomeDir)
if err != nil {
logrus.Debugf("Failed conversion of home to pointer for driver info: %s", err.Error())
return driverInfo{}, err
}
return driverInfo{
Flavour: info.Flavour,
HomeDirp: homedirp,
}, nil
}
/* To pass into syscall, we need a struct matching the following:
typedef struct _WC_LAYER_DESCRIPTOR {
//
// The ID of the layer
//
GUID LayerId;
//
// Additional flags
//
union {
struct {
ULONG Reserved : 31;
ULONG Dirty : 1; // Created from sandbox as a result of snapshot
};
ULONG Value;
} Flags;
//
// Path to the layer root directory, null-terminated
//
PCWSTR Path;
} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR;
*/
type WC_LAYER_DESCRIPTOR struct {
LayerId GUID
Flags uint32
Pathp *uint16
}
func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) {
// Array of descriptors that gets constructed.
var layers []WC_LAYER_DESCRIPTOR
for i := 0; i < len(parentLayerPaths); i++ {
// Create a layer descriptor, using the folder name
// as the source for a GUID LayerId
_, folderName := filepath.Split(parentLayerPaths[i])
g, err := NameToGuid(folderName)
if err != nil {
logrus.Debugf("Failed to convert name to guid %s", err)
return nil, err
}
p, err := syscall.UTF16PtrFromString(parentLayerPaths[i])
if err != nil {
logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err)
return nil, err
}
layers = append(layers, WC_LAYER_DESCRIPTOR{
LayerId: g,
Flags: 0,
Pathp: p,
})
}
return layers, nil
}

827
vendor/github.com/Microsoft/hcsshim/legacy.go generated vendored Normal file
View File

@ -0,0 +1,827 @@
package hcsshim
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/Microsoft/go-winio"
)
var errorIterationCanceled = errors.New("")
var mutatedUtilityVMFiles = map[string]bool{
`EFI\Microsoft\Boot\BCD`: true,
`EFI\Microsoft\Boot\BCD.LOG`: true,
`EFI\Microsoft\Boot\BCD.LOG1`: true,
`EFI\Microsoft\Boot\BCD.LOG2`: true,
}
const (
filesPath = `Files`
hivesPath = `Hives`
utilityVMPath = `UtilityVM`
utilityVMFilesPath = `UtilityVM\Files`
)
func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) {
return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition)
}
func makeLongAbsPath(path string) (string, error) {
if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) {
return path, nil
}
if !filepath.IsAbs(path) {
absPath, err := filepath.Abs(path)
if err != nil {
return "", err
}
path = absPath
}
if strings.HasPrefix(path, `\\`) {
return `\\?\UNC\` + path[2:], nil
}
return `\\?\` + path, nil
}
func hasPathPrefix(p, prefix string) bool {
return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\'
}
type fileEntry struct {
path string
fi os.FileInfo
err error
}
type legacyLayerReader struct {
root string
result chan *fileEntry
proceed chan bool
currentFile *os.File
backupReader *winio.BackupFileReader
}
// newLegacyLayerReader returns a new LayerReader that can read the Windows
// container layer transport format from disk.
func newLegacyLayerReader(root string) *legacyLayerReader {
r := &legacyLayerReader{
root: root,
result: make(chan *fileEntry),
proceed: make(chan bool),
}
go r.walk()
return r
}
func readTombstones(path string) (map[string]([]string), error) {
tf, err := os.Open(filepath.Join(path, "tombstones.txt"))
if err != nil {
return nil, err
}
defer tf.Close()
s := bufio.NewScanner(tf)
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
return nil, errors.New("Invalid tombstones file")
}
ts := make(map[string]([]string))
for s.Scan() {
t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\`
dir := filepath.Dir(t)
ts[dir] = append(ts[dir], t)
}
if err = s.Err(); err != nil {
return nil, err
}
return ts, nil
}
func (r *legacyLayerReader) walkUntilCancelled() error {
root, err := makeLongAbsPath(r.root)
if err != nil {
return err
}
r.root = root
ts, err := readTombstones(r.root)
if err != nil {
return err
}
err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048.
// Handle failure from what may be a golang bug in the conversion of
// UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat
// which is called by filepath.Walk will fail when a filename contains
// unicode characters. Skip the recycle bin regardless which is goodness.
if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() {
return filepath.SkipDir
}
if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") {
return nil
}
r.result <- &fileEntry{path, info, nil}
if !<-r.proceed {
return errorIterationCanceled
}
// List all the tombstones.
if info.IsDir() {
relPath, err := filepath.Rel(r.root, path)
if err != nil {
return err
}
if dts, ok := ts[relPath]; ok {
for _, t := range dts {
r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil}
if !<-r.proceed {
return errorIterationCanceled
}
}
}
}
return nil
})
if err == errorIterationCanceled {
return nil
}
if err == nil {
return io.EOF
}
return err
}
func (r *legacyLayerReader) walk() {
defer close(r.result)
if !<-r.proceed {
return
}
err := r.walkUntilCancelled()
if err != nil {
for {
r.result <- &fileEntry{err: err}
if !<-r.proceed {
return
}
}
}
}
func (r *legacyLayerReader) reset() {
if r.backupReader != nil {
r.backupReader.Close()
r.backupReader = nil
}
if r.currentFile != nil {
r.currentFile.Close()
r.currentFile = nil
}
}
func findBackupStreamSize(r io.Reader) (int64, error) {
br := winio.NewBackupStreamReader(r)
for {
hdr, err := br.Next()
if err != nil {
if err == io.EOF {
err = nil
}
return 0, err
}
if hdr.Id == winio.BackupData {
return hdr.Size, nil
}
}
}
func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) {
r.reset()
r.proceed <- true
fe := <-r.result
if fe == nil {
err = errors.New("LegacyLayerReader closed")
return
}
if fe.err != nil {
err = fe.err
return
}
path, err = filepath.Rel(r.root, fe.path)
if err != nil {
return
}
if fe.fi == nil {
// This is a tombstone. Return a nil fileInfo.
return
}
if fe.fi.IsDir() && hasPathPrefix(path, filesPath) {
fe.path += ".$wcidirs$"
}
f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING)
if err != nil {
return
}
defer func() {
if f != nil {
f.Close()
}
}()
fileInfo, err = winio.GetFileBasicInfo(f)
if err != nil {
return
}
if !hasPathPrefix(path, filesPath) {
size = fe.fi.Size()
r.backupReader = winio.NewBackupFileReader(f, false)
if path == hivesPath || path == filesPath {
// The Hives directory has a non-deterministic file time because of the
// nature of the import process. Use the times from System_Delta.
var g *os.File
g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`))
if err != nil {
return
}
attr := fileInfo.FileAttributes
fileInfo, err = winio.GetFileBasicInfo(g)
g.Close()
if err != nil {
return
}
fileInfo.FileAttributes = attr
}
// The creation time and access time get reset for files outside of the Files path.
fileInfo.CreationTime = fileInfo.LastWriteTime
fileInfo.LastAccessTime = fileInfo.LastWriteTime
} else {
// The file attributes are written before the backup stream.
var attr uint32
err = binary.Read(f, binary.LittleEndian, &attr)
if err != nil {
return
}
fileInfo.FileAttributes = uintptr(attr)
beginning := int64(4)
// Find the accurate file size.
if !fe.fi.IsDir() {
size, err = findBackupStreamSize(f)
if err != nil {
err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err}
return
}
}
// Return back to the beginning of the backup stream.
_, err = f.Seek(beginning, 0)
if err != nil {
return
}
}
r.currentFile = f
f = nil
return
}
func (r *legacyLayerReader) Read(b []byte) (int, error) {
if r.backupReader == nil {
if r.currentFile == nil {
return 0, io.EOF
}
return r.currentFile.Read(b)
}
return r.backupReader.Read(b)
}
func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) {
if r.backupReader == nil {
if r.currentFile == nil {
return 0, errors.New("no current file")
}
return r.currentFile.Seek(offset, whence)
}
return 0, errors.New("seek not supported on this stream")
}
func (r *legacyLayerReader) Close() error {
r.proceed <- false
<-r.result
r.reset()
return nil
}
type pendingLink struct {
Path, Target string
TargetRoot *os.File
}
type pendingDir struct {
Path string
Root *os.File
}
type legacyLayerWriter struct {
root *os.File
destRoot *os.File
parentRoots []*os.File
currentFile *os.File
currentFileName string
currentFileRoot *os.File
backupWriter *winio.BackupFileWriter
Tombstones []string
HasUtilityVM bool
uvmDi []dirInfo
addedFiles map[string]bool
PendingLinks []pendingLink
pendingDirs []pendingDir
currentIsDir bool
}
// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer
// transport format to disk.
func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) {
w = &legacyLayerWriter{
addedFiles: make(map[string]bool),
}
defer func() {
if err != nil {
w.CloseRoots()
w = nil
}
}()
w.root, err = openRoot(root)
if err != nil {
return
}
w.destRoot, err = openRoot(destRoot)
if err != nil {
return
}
for _, r := range parentRoots {
f, err := openRoot(r)
if err != nil {
return w, err
}
w.parentRoots = append(w.parentRoots, f)
}
return
}
func (w *legacyLayerWriter) CloseRoots() {
if w.root != nil {
w.root.Close()
w.root = nil
}
if w.destRoot != nil {
w.destRoot.Close()
w.destRoot = nil
}
for i := range w.parentRoots {
w.parentRoots[i].Close()
}
w.parentRoots = nil
}
func (w *legacyLayerWriter) initUtilityVM() error {
if !w.HasUtilityVM {
err := mkdirRelative(utilityVMPath, w.destRoot)
if err != nil {
return err
}
// Server 2016 does not support multiple layers for the utility VM, so
// clone the utility VM from the parent layer into this layer. Use hard
// links to avoid unnecessary copying, since most of the files are
// immutable.
err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles)
if err != nil {
return fmt.Errorf("cloning the parent utility VM image failed: %s", err)
}
w.HasUtilityVM = true
}
return nil
}
func (w *legacyLayerWriter) reset() error {
if w.currentIsDir {
r := w.currentFile
br := winio.NewBackupStreamReader(r)
// Seek to the beginning of the backup stream, skipping the fileattrs
if _, err := r.Seek(4, io.SeekStart); err != nil {
return err
}
for {
bhdr, err := br.Next()
if err == io.EOF {
// end of backupstream data
break
}
if err != nil {
return err
}
switch bhdr.Id {
case winio.BackupReparseData:
// The current file is a `.$wcidirs$` metadata file that
// describes a directory reparse point. Delete the placeholder
// directory to prevent future files being added into the
// destination of the reparse point during the ImportLayer call
if err := removeRelative(w.currentFileName, w.currentFileRoot); err != nil {
return err
}
w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot})
default:
// ignore all other stream types, as we only care about directory reparse points
}
}
w.currentIsDir = false
}
if w.backupWriter != nil {
w.backupWriter.Close()
w.backupWriter = nil
}
if w.currentFile != nil {
w.currentFile.Close()
w.currentFile = nil
w.currentFileName = ""
w.currentFileRoot = nil
}
return nil
}
// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata
func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) {
src, err := openRelative(
subPath,
srcRoot,
syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
_FILE_OPEN,
_FILE_OPEN_REPARSE_POINT)
if err != nil {
return nil, err
}
defer src.Close()
srcr := winio.NewBackupFileReader(src, true)
defer srcr.Close()
fileInfo, err = winio.GetFileBasicInfo(src)
if err != nil {
return nil, err
}
extraFlags := uint32(0)
if isDir {
extraFlags |= _FILE_DIRECTORY_FILE
}
dest, err := openRelative(
subPath,
destRoot,
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
_FILE_CREATE,
extraFlags)
if err != nil {
return nil, err
}
defer dest.Close()
err = winio.SetFileBasicInfo(dest, fileInfo)
if err != nil {
return nil, err
}
destw := winio.NewBackupFileWriter(dest, true)
defer func() {
cerr := destw.Close()
if err == nil {
err = cerr
}
}()
_, err = io.Copy(destw, srcr)
if err != nil {
return nil, err
}
return fileInfo, nil
}
// cloneTree clones a directory tree using hard links. It skips hard links for
// the file names in the provided map and just copies those files.
func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error {
var di []dirInfo
err := ensureNotReparsePointRelative(subPath, srcRoot)
if err != nil {
return err
}
err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath)
if err != nil {
return err
}
fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes
// Directories, reparse points, and files that will be mutated during
// utility VM import must be copied. All other files can be hard linked.
isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0
// In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink.
// See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc
// Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly
isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0
if isDir || isReparsePoint || mutatedFiles[relPath] {
fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir)
if err != nil {
return err
}
if isDir && !isReparsePoint {
di = append(di, dirInfo{path: relPath, fileInfo: *fi})
}
} else {
err = linkRelative(relPath, srcRoot, relPath, destRoot)
if err != nil {
return err
}
}
// Don't recurse on reparse points in go1.8 and older. Filepath.Walk
// handles this in go1.9 and newer.
if isDir && isReparsePoint && shouldSkipDirectoryReparse {
return filepath.SkipDir
}
return nil
})
if err != nil {
return err
}
return reapplyDirectoryTimes(destRoot, di)
}
func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error {
if err := w.reset(); err != nil {
return err
}
if name == utilityVMPath {
return w.initUtilityVM()
}
name = filepath.Clean(name)
if hasPathPrefix(name, utilityVMPath) {
if !w.HasUtilityVM {
return errors.New("missing UtilityVM directory")
}
if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
return errors.New("invalid UtilityVM layer")
}
createDisposition := uint32(_FILE_OPEN)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
st, err := lstatRelative(name, w.destRoot)
if err != nil && !os.IsNotExist(err) {
return err
}
if st != nil {
// Delete the existing file/directory if it is not the same type as this directory.
existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes
if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
if err = removeAllRelative(name, w.destRoot); err != nil {
return err
}
st = nil
}
}
if st == nil {
if err = mkdirRelative(name, w.destRoot); err != nil {
return err
}
}
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
w.uvmDi = append(w.uvmDi, dirInfo{path: name, fileInfo: *fileInfo})
}
} else {
// Overwrite any existing hard link.
err := removeRelative(name, w.destRoot)
if err != nil && !os.IsNotExist(err) {
return err
}
createDisposition = _FILE_CREATE
}
f, err := openRelative(
name,
w.destRoot,
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
createDisposition,
_FILE_OPEN_REPARSE_POINT,
)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
removeRelative(name, w.destRoot)
}
}()
err = winio.SetFileBasicInfo(f, fileInfo)
if err != nil {
return err
}
w.backupWriter = winio.NewBackupFileWriter(f, true)
w.currentFile = f
w.currentFileName = name
w.currentFileRoot = w.destRoot
w.addedFiles[name] = true
f = nil
return nil
}
fname := name
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
err := mkdirRelative(name, w.root)
if err != nil {
return err
}
fname += ".$wcidirs$"
w.currentIsDir = true
}
f, err := openRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_CREATE, 0)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
removeRelative(fname, w.root)
}
}()
strippedFi := *fileInfo
strippedFi.FileAttributes = 0
err = winio.SetFileBasicInfo(f, &strippedFi)
if err != nil {
return err
}
if hasPathPrefix(name, hivesPath) {
w.backupWriter = winio.NewBackupFileWriter(f, false)
} else {
// The file attributes are written before the stream.
err = binary.Write(f, binary.LittleEndian, uint32(fileInfo.FileAttributes))
if err != nil {
return err
}
}
w.currentFile = f
w.currentFileName = name
w.currentFileRoot = w.root
w.addedFiles[name] = true
f = nil
return nil
}
func (w *legacyLayerWriter) AddLink(name string, target string) error {
if err := w.reset(); err != nil {
return err
}
target = filepath.Clean(target)
var roots []*os.File
if hasPathPrefix(target, filesPath) {
// Look for cross-layer hard link targets in the parent layers, since
// nothing is in the destination path yet.
roots = w.parentRoots
} else if hasPathPrefix(target, utilityVMFilesPath) {
// Since the utility VM is fully cloned into the destination path
// already, look for cross-layer hard link targets directly in the
// destination path.
roots = []*os.File{w.destRoot}
}
if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) {
return errors.New("invalid hard link in layer")
}
// Find to try the target of the link in a previously added file. If that
// fails, search in parent layers.
var selectedRoot *os.File
if _, ok := w.addedFiles[target]; ok {
selectedRoot = w.destRoot
} else {
for _, r := range roots {
if _, err := lstatRelative(target, r); err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
selectedRoot = r
break
}
}
if selectedRoot == nil {
return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target)
}
}
// The link can't be written until after the ImportLayer call.
w.PendingLinks = append(w.PendingLinks, pendingLink{
Path: name,
Target: target,
TargetRoot: selectedRoot,
})
w.addedFiles[name] = true
return nil
}
func (w *legacyLayerWriter) Remove(name string) error {
name = filepath.Clean(name)
if hasPathPrefix(name, filesPath) {
w.Tombstones = append(w.Tombstones, name)
} else if hasPathPrefix(name, utilityVMFilesPath) {
err := w.initUtilityVM()
if err != nil {
return err
}
// Make sure the path exists; os.RemoveAll will not fail if the file is
// already gone, and this needs to be a fatal error for diagnostics
// purposes.
if _, err := lstatRelative(name, w.destRoot); err != nil {
return err
}
err = removeAllRelative(name, w.destRoot)
if err != nil {
return err
}
} else {
return fmt.Errorf("invalid tombstone %s", name)
}
return nil
}
func (w *legacyLayerWriter) Write(b []byte) (int, error) {
if w.backupWriter == nil {
if w.currentFile == nil {
return 0, errors.New("closed")
}
return w.currentFile.Write(b)
}
return w.backupWriter.Write(b)
}
func (w *legacyLayerWriter) Close() error {
if err := w.reset(); err != nil {
return err
}
if err := removeRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) {
return err
}
for _, pd := range w.pendingDirs {
err := mkdirRelative(pd.Path, pd.Root)
if err != nil {
return err
}
}
if w.HasUtilityVM {
err := reapplyDirectoryTimes(w.destRoot, w.uvmDi)
if err != nil {
return err
}
}
return nil
}

7
vendor/github.com/Microsoft/hcsshim/legacy18.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// +build !go1.9
package hcsshim
// Due to a bug in go1.8 and before, directory reparse points need to be skipped
// during filepath.Walk. This is fixed in go1.9
var shouldSkipDirectoryReparse = true

7
vendor/github.com/Microsoft/hcsshim/legacy19.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// +build go1.9
package hcsshim
// Due to a bug in go1.8 and before, directory reparse points need to be skipped
// during filepath.Walk. This is fixed in go1.9
var shouldSkipDirectoryReparse = false

20
vendor/github.com/Microsoft/hcsshim/nametoguid.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
package hcsshim
import "github.com/sirupsen/logrus"
// NameToGuid converts the given string into a GUID using the algorithm in the
// Host Compute Service, ensuring GUIDs generated with the same string are common
// across all clients.
func NameToGuid(name string) (id GUID, err error) {
title := "hcsshim::NameToGuid "
logrus.Debugf(title+"Name %s", name)
err = nameToGuid(name, &id)
if err != nil {
err = makeErrorf(err, title, "name=%s", name)
logrus.Error(err)
return
}
return
}

46
vendor/github.com/Microsoft/hcsshim/preparelayer.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
package hcsshim
import (
"sync"
"github.com/sirupsen/logrus"
)
var prepareLayerLock sync.Mutex
// PrepareLayer finds a mounted read-write layer matching layerId and enables the
// the filesystem filter for use on that layer. This requires the paths to all
// parent layers, and is necessary in order to view or interact with the layer
// as an actual filesystem (reading and writing files, creating directories, etc).
// Disabling the filter must be done via UnprepareLayer.
func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
title := "hcsshim::PrepareLayer "
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
// This lock is a temporary workaround for a Windows bug. Only allowing one
// call to prepareLayer at a time vastly reduces the chance of a timeout.
prepareLayerLock.Lock()
defer prepareLayerLock.Unlock()
err = prepareLayer(&infop, layerId, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s", info.Flavour, layerId)
return nil
}

Some files were not shown because too many files have changed in this diff Show More