mirror of https://github.com/docker/cli.git
Merge pull request #2207 from thaJeztah/remove_engine_activate
Remove "docker engine" subcommands
This commit is contained in:
commit
ebca141311
|
@ -22,9 +22,7 @@ import (
|
||||||
"github.com/docker/cli/cli/streams"
|
"github.com/docker/cli/cli/streams"
|
||||||
"github.com/docker/cli/cli/trust"
|
"github.com/docker/cli/cli/trust"
|
||||||
"github.com/docker/cli/cli/version"
|
"github.com/docker/cli/cli/version"
|
||||||
"github.com/docker/cli/internal/containerizedengine"
|
|
||||||
dopts "github.com/docker/cli/opts"
|
dopts "github.com/docker/cli/opts"
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/api"
|
"github.com/docker/docker/api"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
registrytypes "github.com/docker/docker/api/types/registry"
|
registrytypes "github.com/docker/docker/api/types/registry"
|
||||||
|
@ -61,7 +59,6 @@ type Cli interface {
|
||||||
ManifestStore() manifeststore.Store
|
ManifestStore() manifeststore.Store
|
||||||
RegistryClient(bool) registryclient.RegistryClient
|
RegistryClient(bool) registryclient.RegistryClient
|
||||||
ContentTrustEnabled() bool
|
ContentTrustEnabled() bool
|
||||||
NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error)
|
|
||||||
ContextStore() store.Store
|
ContextStore() store.Store
|
||||||
CurrentContext() string
|
CurrentContext() string
|
||||||
StackOrchestrator(flagValue string) (Orchestrator, error)
|
StackOrchestrator(flagValue string) (Orchestrator, error)
|
||||||
|
@ -71,19 +68,18 @@ type Cli interface {
|
||||||
// DockerCli is an instance the docker command line client.
|
// DockerCli is an instance the docker command line client.
|
||||||
// Instances of the client can be returned from NewDockerCli.
|
// Instances of the client can be returned from NewDockerCli.
|
||||||
type DockerCli struct {
|
type DockerCli struct {
|
||||||
configFile *configfile.ConfigFile
|
configFile *configfile.ConfigFile
|
||||||
in *streams.In
|
in *streams.In
|
||||||
out *streams.Out
|
out *streams.Out
|
||||||
err io.Writer
|
err io.Writer
|
||||||
client client.APIClient
|
client client.APIClient
|
||||||
serverInfo ServerInfo
|
serverInfo ServerInfo
|
||||||
clientInfo *ClientInfo
|
clientInfo *ClientInfo
|
||||||
contentTrust bool
|
contentTrust bool
|
||||||
newContainerizeClient func(string) (clitypes.ContainerizedClient, error)
|
contextStore store.Store
|
||||||
contextStore store.Store
|
currentContext string
|
||||||
currentContext string
|
dockerEndpoint docker.Endpoint
|
||||||
dockerEndpoint docker.Endpoint
|
contextStoreConfig store.Config
|
||||||
contextStoreConfig store.Config
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
|
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
|
||||||
|
@ -407,11 +403,6 @@ func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions
|
||||||
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewContainerizedEngineClient returns a containerized engine client
|
|
||||||
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
|
||||||
return cli.newContainerizeClient(sockPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContextStore returns the ContextStore
|
// ContextStore returns the ContextStore
|
||||||
func (cli *DockerCli) ContextStore() store.Store {
|
func (cli *DockerCli) ContextStore() store.Store {
|
||||||
return cli.contextStore
|
return cli.contextStore
|
||||||
|
@ -471,13 +462,12 @@ type ClientInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDockerCli returns a DockerCli instance with all operators applied on it.
|
// NewDockerCli returns a DockerCli instance with all operators applied on it.
|
||||||
// It applies by default the standard streams, the content trust from
|
// It applies by default the standard streams, and the content trust from
|
||||||
// environment and the default containerized client constructor operations.
|
// environment.
|
||||||
func NewDockerCli(ops ...DockerCliOption) (*DockerCli, error) {
|
func NewDockerCli(ops ...DockerCliOption) (*DockerCli, error) {
|
||||||
cli := &DockerCli{}
|
cli := &DockerCli{}
|
||||||
defaultOps := []DockerCliOption{
|
defaultOps := []DockerCliOption{
|
||||||
WithContentTrustFromEnv(),
|
WithContentTrustFromEnv(),
|
||||||
WithContainerizedClient(containerizedengine.NewClient),
|
|
||||||
}
|
}
|
||||||
cli.contextStoreConfig = DefaultContextStoreConfig()
|
cli.contextStoreConfig = DefaultContextStoreConfig()
|
||||||
ops = append(defaultOps, ops...)
|
ops = append(defaultOps, ops...)
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"github.com/docker/cli/cli/context/docker"
|
"github.com/docker/cli/cli/context/docker"
|
||||||
"github.com/docker/cli/cli/context/store"
|
"github.com/docker/cli/cli/context/store"
|
||||||
"github.com/docker/cli/cli/streams"
|
"github.com/docker/cli/cli/streams"
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/pkg/term"
|
"github.com/docker/docker/pkg/term"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -83,14 +82,6 @@ func WithContentTrust(enabled bool) DockerCliOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithContainerizedClient sets the containerized client constructor on a cli.
|
|
||||||
func WithContainerizedClient(containerizedFn func(string) (clitypes.ContainerizedClient, error)) DockerCliOption {
|
|
||||||
return func(cli *DockerCli) error {
|
|
||||||
cli.newContainerizeClient = containerizedFn
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithContextEndpointType add support for an additional typed endpoint in the context store
|
// WithContextEndpointType add support for an additional typed endpoint in the context store
|
||||||
// Plugins should use this to store additional endpoints configuration in the context store
|
// Plugins should use this to store additional endpoints configuration in the context store
|
||||||
func WithContextEndpointType(endpointName string, endpointType store.TypeGetter) DockerCliOption {
|
func WithContextEndpointType(endpointName string, endpointType store.TypeGetter) DockerCliOption {
|
||||||
|
|
|
@ -14,7 +14,6 @@ import (
|
||||||
cliconfig "github.com/docker/cli/cli/config"
|
cliconfig "github.com/docker/cli/cli/config"
|
||||||
"github.com/docker/cli/cli/config/configfile"
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
"github.com/docker/cli/cli/flags"
|
"github.com/docker/cli/cli/flags"
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/api"
|
"github.com/docker/docker/api"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
@ -281,7 +280,6 @@ func TestNewDockerCliAndOperators(t *testing.T) {
|
||||||
// Test default operations and also overriding default ones
|
// Test default operations and also overriding default ones
|
||||||
cli, err := NewDockerCli(
|
cli, err := NewDockerCli(
|
||||||
WithContentTrust(true),
|
WithContentTrust(true),
|
||||||
WithContainerizedClient(func(string) (clitypes.ContainerizedClient, error) { return nil, nil }),
|
|
||||||
)
|
)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
// Check streams are initialized
|
// Check streams are initialized
|
||||||
|
@ -289,9 +287,6 @@ func TestNewDockerCliAndOperators(t *testing.T) {
|
||||||
assert.Check(t, cli.Out() != nil)
|
assert.Check(t, cli.Out() != nil)
|
||||||
assert.Check(t, cli.Err() != nil)
|
assert.Check(t, cli.Err() != nil)
|
||||||
assert.Equal(t, cli.ContentTrustEnabled(), true)
|
assert.Equal(t, cli.ContentTrustEnabled(), true)
|
||||||
client, err := cli.NewContainerizedEngineClient("")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Equal(t, client, nil)
|
|
||||||
|
|
||||||
// Apply can modify a dockerCli after construction
|
// Apply can modify a dockerCli after construction
|
||||||
inbuf := bytes.NewBuffer([]byte("input"))
|
inbuf := bytes.NewBuffer([]byte("input"))
|
||||||
|
|
|
@ -2,7 +2,6 @@ package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/command/builder"
|
"github.com/docker/cli/cli/command/builder"
|
||||||
|
@ -10,7 +9,6 @@ import (
|
||||||
"github.com/docker/cli/cli/command/config"
|
"github.com/docker/cli/cli/command/config"
|
||||||
"github.com/docker/cli/cli/command/container"
|
"github.com/docker/cli/cli/command/container"
|
||||||
"github.com/docker/cli/cli/command/context"
|
"github.com/docker/cli/cli/command/context"
|
||||||
"github.com/docker/cli/cli/command/engine"
|
|
||||||
"github.com/docker/cli/cli/command/image"
|
"github.com/docker/cli/cli/command/image"
|
||||||
"github.com/docker/cli/cli/command/manifest"
|
"github.com/docker/cli/cli/command/manifest"
|
||||||
"github.com/docker/cli/cli/command/network"
|
"github.com/docker/cli/cli/command/network"
|
||||||
|
@ -125,10 +123,6 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||||
hide(image.NewSaveCommand(dockerCli)),
|
hide(image.NewSaveCommand(dockerCli)),
|
||||||
hide(image.NewTagCommand(dockerCli)),
|
hide(image.NewTagCommand(dockerCli)),
|
||||||
)
|
)
|
||||||
if runtime.GOOS == "linux" {
|
|
||||||
// engine
|
|
||||||
cmd.AddCommand(engine.NewEngineCommand(dockerCli))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func hide(cmd *cobra.Command) *cobra.Command {
|
func hide(cmd *cobra.Command) *cobra.Command {
|
||||||
|
|
|
@ -1,209 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
|
||||||
"github.com/docker/cli/internal/licenseutils"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/licensing/model"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
type activateOptions struct {
|
|
||||||
licenseFile string
|
|
||||||
version string
|
|
||||||
registryPrefix string
|
|
||||||
format string
|
|
||||||
image string
|
|
||||||
quiet bool
|
|
||||||
displayOnly bool
|
|
||||||
sockPath string
|
|
||||||
licenseLoginFunc func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newActivateCommand creates a new `docker engine activate` command
|
|
||||||
func newActivateCommand(dockerCli command.Cli) *cobra.Command {
|
|
||||||
var options activateOptions
|
|
||||||
options.licenseLoginFunc = licenseutils.Login
|
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "activate [OPTIONS]",
|
|
||||||
Short: "Activate Enterprise Edition",
|
|
||||||
Long: `Activate Enterprise Edition.
|
|
||||||
|
|
||||||
With this command you may apply an existing Docker enterprise license, or
|
|
||||||
interactively download one from Docker. In the interactive exchange, you can
|
|
||||||
sign up for a new trial, or download an existing license. If you are
|
|
||||||
currently running a Community Edition engine, the daemon will be updated to
|
|
||||||
the Enterprise Edition Docker engine with additional capabilities and long
|
|
||||||
term support.
|
|
||||||
|
|
||||||
For more information about different Docker Enterprise license types visit
|
|
||||||
https://www.docker.com/licenses
|
|
||||||
|
|
||||||
For non-interactive scriptable deployments, download your license from
|
|
||||||
https://hub.docker.com/ then specify the file with the '--license' flag.
|
|
||||||
`,
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
return runActivate(dockerCli, options)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
flags := cmd.Flags()
|
|
||||||
|
|
||||||
flags.StringVar(&options.licenseFile, "license", "", "License File")
|
|
||||||
flags.StringVar(&options.version, "version", "", "Specify engine version (default is to use currently running version)")
|
|
||||||
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the default location where engine images are pulled")
|
|
||||||
flags.StringVar(&options.image, "engine-image", "", "Specify engine image")
|
|
||||||
flags.StringVar(&options.format, "format", "", "Pretty-print licenses using a Go template")
|
|
||||||
flags.BoolVar(&options.displayOnly, "display-only", false, "only display license information and exit")
|
|
||||||
flags.BoolVar(&options.quiet, "quiet", false, "Only display available licenses by ID")
|
|
||||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
func runActivate(cli command.Cli, options activateOptions) error {
|
|
||||||
if !isRoot() {
|
|
||||||
return errors.New("this command must be run as a privileged user")
|
|
||||||
}
|
|
||||||
ctx := context.Background()
|
|
||||||
client, err := cli.NewContainerizedEngineClient(options.sockPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "unable to access local containerd")
|
|
||||||
}
|
|
||||||
defer client.Close()
|
|
||||||
|
|
||||||
authConfig, err := getRegistryAuth(cli, options.registryPrefix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var license *model.IssuedLicense
|
|
||||||
|
|
||||||
// Lookup on hub if no license provided via params
|
|
||||||
if options.licenseFile == "" {
|
|
||||||
if license, err = getLicenses(ctx, authConfig, cli, options); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if options.displayOnly {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if license, err = licenseutils.LoadLocalIssuedLicense(ctx, options.licenseFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
summary, err := licenseutils.GetLicenseSummary(ctx, *license)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Fprintf(cli.Out(), "License: %s\n", summary)
|
|
||||||
if options.displayOnly {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
dclient := cli.Client()
|
|
||||||
if err = licenseutils.ApplyLicense(ctx, dclient, license); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Short circuit if the user didn't specify a version and we're already running enterprise
|
|
||||||
if options.version == "" {
|
|
||||||
serverVersion, err := dclient.ServerVersion(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if strings.Contains(strings.ToLower(serverVersion.Platform.Name), "enterprise") {
|
|
||||||
fmt.Fprintln(cli.Out(), "Successfully activated engine license on existing enterprise engine.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
options.version = serverVersion.Version
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := clitypes.EngineInitOptions{
|
|
||||||
RegistryPrefix: options.registryPrefix,
|
|
||||||
EngineImage: options.image,
|
|
||||||
EngineVersion: options.version,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := client.ActivateEngine(ctx, opts, cli.Out(), authConfig); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Fprintln(cli.Out(), `Successfully activated engine.
|
|
||||||
Restart docker with 'systemctl restart docker' to complete the activation.`)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLicenses(ctx context.Context, authConfig *types.AuthConfig, cli command.Cli, options activateOptions) (*model.IssuedLicense, error) {
|
|
||||||
user, err := options.licenseLoginFunc(ctx, authConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fmt.Fprintf(cli.Out(), "Looking for existing licenses for %s...\n", user.User.Username)
|
|
||||||
subs, err := user.GetAvailableLicenses(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(subs) == 0 {
|
|
||||||
return doTrialFlow(ctx, cli, user)
|
|
||||||
}
|
|
||||||
|
|
||||||
format := options.format
|
|
||||||
if len(format) == 0 {
|
|
||||||
format = formatter.TableFormatKey
|
|
||||||
}
|
|
||||||
|
|
||||||
updatesCtx := formatter.Context{
|
|
||||||
Output: cli.Out(),
|
|
||||||
Format: NewSubscriptionsFormat(format, options.quiet),
|
|
||||||
Trunc: false,
|
|
||||||
}
|
|
||||||
if err := SubscriptionsWrite(updatesCtx, subs); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if options.displayOnly {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
fmt.Fprintf(cli.Out(), "Please pick a license by number: ")
|
|
||||||
var num int
|
|
||||||
if _, err := fmt.Fscan(cli.In(), &num); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to read user input")
|
|
||||||
}
|
|
||||||
if num < 0 || num >= len(subs) {
|
|
||||||
return nil, fmt.Errorf("invalid choice")
|
|
||||||
}
|
|
||||||
return user.GetIssuedLicense(ctx, subs[num].ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func doTrialFlow(ctx context.Context, cli command.Cli, user licenseutils.HubUser) (*model.IssuedLicense, error) {
|
|
||||||
if !command.PromptForConfirmation(cli.In(), cli.Out(),
|
|
||||||
"No existing licenses found, would you like to set up a new Enterprise Basic Trial license?") {
|
|
||||||
return nil, fmt.Errorf("you must have an existing enterprise license or generate a new trial to use the Enterprise Docker Engine")
|
|
||||||
}
|
|
||||||
targetID := user.User.ID
|
|
||||||
// If the user is a member of any organizations, allow trials generated against them
|
|
||||||
if len(user.Orgs) > 0 {
|
|
||||||
fmt.Fprintf(cli.Out(), "%d\t%s\n", 0, user.User.Username)
|
|
||||||
for i, org := range user.Orgs {
|
|
||||||
fmt.Fprintf(cli.Out(), "%d\t%s\n", i+1, org.Orgname)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(cli.Out(), "Please choose an account to generate the trial in:")
|
|
||||||
var num int
|
|
||||||
if _, err := fmt.Fscan(cli.In(), &num); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to read user input")
|
|
||||||
}
|
|
||||||
if num < 0 || num > len(user.Orgs) {
|
|
||||||
return nil, fmt.Errorf("invalid choice")
|
|
||||||
}
|
|
||||||
if num > 0 {
|
|
||||||
targetID = user.Orgs[num-1].ID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return user.GenerateTrialLicense(ctx, targetID)
|
|
||||||
}
|
|
|
@ -1,148 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/cli/internal/licenseutils"
|
|
||||||
"github.com/docker/cli/internal/test"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"github.com/docker/licensing"
|
|
||||||
"github.com/docker/licensing/model"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
"gotest.tools/fs"
|
|
||||||
"gotest.tools/golden"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// nolint: lll
|
|
||||||
expiredLicense = `{"key_id":"irlYm3b9fdD8hMUXjazF39im7VQSSbAm9tfHK8cKUxJt","private_key":"aH5tTRDAVJpCRS2CRetTQVXIKgWUPfoCHODhDvNPvAbz","authorization":"ewogICAicGF5bG9hZCI6ICJleUpsZUhCcGNtRjBhVzl1SWpvaU1qQXhPQzB3TXkweE9GUXdOem93TURvd01Gb2lMQ0owYjJ0bGJpSTZJbkZtTVMxMlVtRmtialp5YjFaMldXdHJlVXN4VFdKMGNGUmpXR1ozVjA4MVRWZFFTM2cwUnpJd2NIYzlJaXdpYldGNFJXNW5hVzVsY3lJNk1Td2ljMk5oYm01cGJtZEZibUZpYkdWa0lqcDBjblZsTENKc2FXTmxibk5sVkhsd1pTSTZJazltWm14cGJtVWlMQ0owYVdWeUlqb2lVSEp2WkhWamRHbHZiaUo5IiwKICAgInNpZ25hdHVyZXMiOiBbCiAgICAgIHsKICAgICAgICAgImhlYWRlciI6IHsKICAgICAgICAgICAgImp3ayI6IHsKICAgICAgICAgICAgICAgImUiOiAiQVFBQiIsCiAgICAgICAgICAgICAgICJrZXlJRCI6ICJKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVCIsCiAgICAgICAgICAgICAgICJraWQiOiAiSjdMRDo2N1ZSOkw1SFo6VTdCQToyTzRHOjRBTDM6T0YyTjpKSEdCOkVGVEg6NUNWUTpNRkVPOkFFSVQiLAogICAgICAgICAgICAgICAia3R5IjogIlJTQSIsCiAgICAgICAgICAgICAgICJuIjogInlkSXktbFU3bzdQY2VZLTQtcy1DUTVPRWdDeUY4Q3hJY1FJV3VLODRwSWlaY2lZNjczMHlDWW53TFNLVGx3LVU2VUNfUVJlV1Jpb01OTkU1RHM1VFlFWGJHRzZvbG0ycWRXYkJ3Y0NnLTJVVUhfT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTFJdi1tcTdscDVuUV9SeHJUMEtaUkFRVFlMRU1FZkd3bTNoTU9fZ2VMUFMtaGdLUHRJSGxrZzZfV2NveFRHb0tQNzlkX3dhSFl4R05sN1doU25laUJTeGJwYlFBS2syMWxnNzk4WGI3dlp5RUFURE1yUlI5TWVFNkFkajVISnBZM0NveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTy13YTJLTjhVd2dJSW0waTVJMXVXOUdrcTR6akJ5NXpoZ3F1VVhiRzliV1BBT1lycTVRYTgxRHhHY0JsSnlIWUFwLUREUEU5VEdnNHpZbVhqSm54WnFIRWR1R3FkZXZaOFhNSTB1a2ZrR0lJMTR3VU9pTUlJSXJYbEVjQmZfNDZJOGdRV0R6eHljWmVfSkdYLUxBdWF5WHJ5clVGZWhWTlVkWlVsOXdYTmFKQi1rYUNxejVRd2FSOTNzR3ctUVNmdEQwTnZMZTdDeU9ILUU2dmc2U3RfTmVUdmd2OFluaENpWElsWjhIT2ZJd05lN3RFRl9VY3o1T2JQeWttM3R5bHJOVWp0MFZ5QW10dGFjVkkyaUdpaGNVUHJtazRsVklaN1ZEX0xTVy1pN3lvU3VydHBzUFhjZTJwS0RJbzMwbEpHaE9fM0tVbWwyU1VaQ3F6SjF5RW1LcHlzSDVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVIgogICAgICAgICAgICB9LAogICAgICAgICAgICAiYWxnIjogIlJTMjU2IgogICAgICAgICB9LAogICAgICAgICAic2lnbmF0dXJlIjogIm5saTZIdzRrbW5KcTBSUmRXaGVfbkhZS2VJLVpKenM1U0d5SUpDakh1dWtnVzhBYklpVzFZYWJJR2NqWUt0QTY4dWN6T1hyUXZreGxWQXJLSlgzMDJzN0RpbzcxTlNPRzJVcnhsSjlibDFpd0F3a3ZyTEQ2T0p5MGxGLVg4WnRabXhPVmNQZmwzcmJwZFQ0dnlnWTdNcU1QRXdmb0IxTmlWZDYyZ1cxU2NSREZZcWw3R0FVaFVKNkp4QU15VzVaOXl5YVE0NV8wd0RMUk5mRjA5YWNXeVowTjRxVS1hZjhrUTZUUWZUX05ERzNCR3pRb2V3cHlEajRiMFBHb0diOFhLdDlwekpFdEdxM3lQM25VMFFBbk90a2gwTnZac1l1UFcyUnhDT3lRNEYzVlR3UkF2eF9HSTZrMVRpYmlKNnByUWluUy16Sjh6RE8zUjBuakE3OFBwNXcxcVpaUE9BdmtzZFNSYzJDcVMtcWhpTmF5YUhOVHpVNnpyOXlOZHR2S0o1QjNST0FmNUtjYXNiWURjTnVpeXBUNk90LUtqQ2I1dmYtWVpnc2FRNzJBdFBhSU4yeUpNREZHbmEwM0hpSjMxcTJRUlp5eTZrd3RYaGtwcDhTdEdIcHYxSWRaV09SVWttb0g5SFBzSGk4SExRLTZlM0tEY2x1RUQyMTNpZnljaVhtN0YzdHdaTTNHeDd1UXR1SldHaUlTZ2Z0QW9lVjZfUmI2VThkMmZxNzZuWHYxak5nckRRcE5waEZFd2tCdGRtZHZ2THByZVVYX3BWangza1AxN3pWbXFKNmNOOWkwWUc4WHg2VmRzcUxsRXUxQ2Rhd3Q0eko1M3VHMFlKTjRnUDZwc25yUS1uM0U1aFdlMDJ3d3dBZ3F3bGlPdmd4V1RTeXJyLXY2eDI0IiwKICAgICAgICAgInByb3RlY3RlZCI6ICJleUptYjNKdFlYUk1aVzVuZEdnaU9qRTNNeXdpWm05eWJXRjBWR0ZwYkNJNkltWlJJaXdpZEdsdFpTSTZJakl3TVRjdE1EVXRNRFZVTWpFNk5UYzZNek5hSW4wIgogICAgICB9CiAgIF0KfQ=="}`
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestActivateNoContainerd(t *testing.T) {
|
|
||||||
testCli.SetContainerizedEngineClient(
|
|
||||||
func(string) (clitypes.ContainerizedClient, error) {
|
|
||||||
return nil, fmt.Errorf("some error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
isRoot = func() bool { return true }
|
|
||||||
cmd := newActivateCommand(testCli)
|
|
||||||
cmd.Flags().Set("license", "invalidpath")
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
err := cmd.Execute()
|
|
||||||
assert.ErrorContains(t, err, "unable to access local containerd")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestActivateBadLicense(t *testing.T) {
|
|
||||||
isRoot = func() bool { return true }
|
|
||||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
|
||||||
c.SetContainerizedEngineClient(
|
|
||||||
func(string) (clitypes.ContainerizedClient, error) {
|
|
||||||
return &fakeContainerizedEngineClient{}, nil
|
|
||||||
},
|
|
||||||
)
|
|
||||||
cmd := newActivateCommand(c)
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
cmd.Flags().Set("license", "invalidpath")
|
|
||||||
err := cmd.Execute()
|
|
||||||
assert.Assert(t, os.IsNotExist(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestActivateExpiredLicenseDryRun(t *testing.T) {
|
|
||||||
dir := fs.NewDir(t, "license", fs.WithFile("docker.lic", expiredLicense, fs.WithMode(0644)))
|
|
||||||
defer dir.Remove()
|
|
||||||
filename := dir.Join("docker.lic")
|
|
||||||
isRoot = func() bool { return true }
|
|
||||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
|
||||||
c.SetContainerizedEngineClient(
|
|
||||||
func(string) (clitypes.ContainerizedClient, error) {
|
|
||||||
return &fakeContainerizedEngineClient{}, nil
|
|
||||||
},
|
|
||||||
)
|
|
||||||
cmd := newActivateCommand(c)
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
cmd.Flags().Set("license", filename)
|
|
||||||
cmd.Flags().Set("display-only", "true")
|
|
||||||
c.OutBuffer().Reset()
|
|
||||||
err := cmd.Execute()
|
|
||||||
assert.NilError(t, err)
|
|
||||||
golden.Assert(t, c.OutBuffer().String(), "expired-license-display-only.golden")
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockLicenseClient struct{}
|
|
||||||
|
|
||||||
func (c mockLicenseClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) {
|
|
||||||
return "", fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c mockLicenseClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
|
|
||||||
return "", fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
|
||||||
expires := time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC)
|
|
||||||
return []*model.Subscription{
|
|
||||||
{
|
|
||||||
State: "active",
|
|
||||||
Expires: &expires,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error {
|
|
||||||
return fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
func (c mockLicenseClient) SummarizeLicense(res *model.CheckResponse) *model.Subscription {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func TestActivateDisplayOnlyHub(t *testing.T) {
|
|
||||||
isRoot = func() bool { return true }
|
|
||||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
|
||||||
c.SetContainerizedEngineClient(
|
|
||||||
func(string) (clitypes.ContainerizedClient, error) {
|
|
||||||
return &fakeContainerizedEngineClient{}, nil
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
hubUser := licenseutils.HubUser{
|
|
||||||
Client: mockLicenseClient{},
|
|
||||||
}
|
|
||||||
options := activateOptions{
|
|
||||||
licenseLoginFunc: func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error) {
|
|
||||||
return hubUser, nil
|
|
||||||
},
|
|
||||||
displayOnly: true,
|
|
||||||
}
|
|
||||||
c.OutBuffer().Reset()
|
|
||||||
err := runActivate(c, options)
|
|
||||||
|
|
||||||
assert.NilError(t, err)
|
|
||||||
golden.Assert(t, c.OutBuffer().String(), "expired-hub-license-display-only.golden")
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
isRoot = func() bool {
|
|
||||||
return unix.Geteuid() == 0
|
|
||||||
}
|
|
||||||
)
|
|
|
@ -1,9 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package engine
|
|
||||||
|
|
||||||
var (
|
|
||||||
isRoot = func() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
)
|
|
|
@ -1,34 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/docker/cli/cli/trust"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
registrytypes "github.com/docker/docker/api/types/registry"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getRegistryAuth(cli command.Cli, registryPrefix string) (*types.AuthConfig, error) {
|
|
||||||
if registryPrefix == "" {
|
|
||||||
registryPrefix = clitypes.RegistryPrefix
|
|
||||||
}
|
|
||||||
distributionRef, err := reference.ParseNormalizedNamed(registryPrefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to parse image name: %s", registryPrefix)
|
|
||||||
}
|
|
||||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(context.Background(), nil, authResolver(cli), distributionRef.String())
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to get imgRefAndAuth")
|
|
||||||
}
|
|
||||||
return imgRefAndAuth.AuthConfig(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func authResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
|
||||||
return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
|
||||||
return command.ResolveAuthConfig(ctx, cli, index)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,125 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
|
||||||
"github.com/docker/cli/internal/versions"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
type checkOptions struct {
|
|
||||||
registryPrefix string
|
|
||||||
preReleases bool
|
|
||||||
engineImage string
|
|
||||||
downgrades bool
|
|
||||||
upgrades bool
|
|
||||||
format string
|
|
||||||
quiet bool
|
|
||||||
sockPath string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
|
|
||||||
var options checkOptions
|
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "check [OPTIONS]",
|
|
||||||
Short: "Check for available engine updates",
|
|
||||||
Args: cli.NoArgs,
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
return runCheck(dockerCli, options)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
flags := cmd.Flags()
|
|
||||||
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the existing location where engine images are pulled")
|
|
||||||
flags.BoolVar(&options.downgrades, "downgrades", false, "Report downgrades (default omits older versions)")
|
|
||||||
flags.BoolVar(&options.preReleases, "pre-releases", false, "Include pre-release versions")
|
|
||||||
flags.StringVar(&options.engineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)")
|
|
||||||
flags.BoolVar(&options.upgrades, "upgrades", true, "Report available upgrades")
|
|
||||||
flags.StringVar(&options.format, "format", "", "Pretty-print updates using a Go template")
|
|
||||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display available versions")
|
|
||||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
func runCheck(dockerCli command.Cli, options checkOptions) error {
|
|
||||||
if !isRoot() {
|
|
||||||
return errors.New("this command must be run as a privileged user")
|
|
||||||
}
|
|
||||||
ctx := context.Background()
|
|
||||||
client := dockerCli.Client()
|
|
||||||
serverVersion, err := client.ServerVersion(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
availVersions, err := versions.GetEngineVersions(ctx, dockerCli.RegistryClient(false), options.registryPrefix, options.engineImage, serverVersion.Version)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
availUpdates := []clitypes.Update{
|
|
||||||
{Type: "current", Version: serverVersion.Version},
|
|
||||||
}
|
|
||||||
if len(availVersions.Patches) > 0 {
|
|
||||||
availUpdates = append(availUpdates,
|
|
||||||
processVersions(
|
|
||||||
serverVersion.Version,
|
|
||||||
"patch",
|
|
||||||
options.preReleases,
|
|
||||||
availVersions.Patches)...)
|
|
||||||
}
|
|
||||||
if options.upgrades {
|
|
||||||
availUpdates = append(availUpdates,
|
|
||||||
processVersions(
|
|
||||||
serverVersion.Version,
|
|
||||||
"upgrade",
|
|
||||||
options.preReleases,
|
|
||||||
availVersions.Upgrades)...)
|
|
||||||
}
|
|
||||||
if options.downgrades {
|
|
||||||
availUpdates = append(availUpdates,
|
|
||||||
processVersions(
|
|
||||||
serverVersion.Version,
|
|
||||||
"downgrade",
|
|
||||||
options.preReleases,
|
|
||||||
availVersions.Downgrades)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
format := options.format
|
|
||||||
if len(format) == 0 {
|
|
||||||
format = formatter.TableFormatKey
|
|
||||||
}
|
|
||||||
|
|
||||||
updatesCtx := formatter.Context{
|
|
||||||
Output: dockerCli.Out(),
|
|
||||||
Format: NewUpdatesFormat(format, options.quiet),
|
|
||||||
Trunc: false,
|
|
||||||
}
|
|
||||||
return UpdatesWrite(updatesCtx, availUpdates)
|
|
||||||
}
|
|
||||||
|
|
||||||
func processVersions(currentVersion, verType string,
|
|
||||||
includePrerelease bool,
|
|
||||||
availVersions []clitypes.DockerVersion) []clitypes.Update {
|
|
||||||
availUpdates := []clitypes.Update{}
|
|
||||||
for _, ver := range availVersions {
|
|
||||||
if !includePrerelease && ver.Prerelease() != "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ver.Tag != currentVersion {
|
|
||||||
availUpdates = append(availUpdates, clitypes.Update{
|
|
||||||
Type: verType,
|
|
||||||
Version: ver.Tag,
|
|
||||||
Notes: fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, ver.Tag),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return availUpdates
|
|
||||||
}
|
|
|
@ -1,114 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
manifesttypes "github.com/docker/cli/cli/manifest/types"
|
|
||||||
"github.com/docker/cli/internal/test"
|
|
||||||
"github.com/docker/distribution"
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
"gotest.tools/golden"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
testCli = test.NewFakeCli(&client.Client{})
|
|
||||||
)
|
|
||||||
|
|
||||||
type verClient struct {
|
|
||||||
client.Client
|
|
||||||
ver types.Version
|
|
||||||
verErr error
|
|
||||||
info types.Info
|
|
||||||
infoErr error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *verClient) ServerVersion(ctx context.Context) (types.Version, error) {
|
|
||||||
return c.ver, c.verErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *verClient) Info(ctx context.Context) (types.Info, error) {
|
|
||||||
return c.info, c.infoErr
|
|
||||||
}
|
|
||||||
|
|
||||||
type testRegistryClient struct {
|
|
||||||
tags []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c testRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
|
|
||||||
return manifesttypes.ImageManifest{}, nil
|
|
||||||
}
|
|
||||||
func (c testRegistryClient) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (c testRegistryClient) MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c testRegistryClient) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
func (c testRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
|
|
||||||
return c.tags, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckForUpdatesNoCurrentVersion(t *testing.T) {
|
|
||||||
isRoot = func() bool { return true }
|
|
||||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
|
||||||
c.SetRegistryClient(testRegistryClient{})
|
|
||||||
cmd := newCheckForUpdatesCommand(c)
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
err := cmd.Execute()
|
|
||||||
assert.ErrorContains(t, err, "no such file or directory")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckForUpdatesGetEngineVersionsHappy(t *testing.T) {
|
|
||||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil})
|
|
||||||
c.SetRegistryClient(testRegistryClient{[]string{
|
|
||||||
"1.0.1", "1.0.2", "1.0.3-beta1",
|
|
||||||
"1.1.1", "1.1.2", "1.1.3-beta1",
|
|
||||||
"1.2.0", "2.0.0", "2.1.0-beta1",
|
|
||||||
}})
|
|
||||||
|
|
||||||
isRoot = func() bool { return true }
|
|
||||||
cmd := newCheckForUpdatesCommand(c)
|
|
||||||
cmd.Flags().Set("pre-releases", "true")
|
|
||||||
cmd.Flags().Set("downgrades", "true")
|
|
||||||
cmd.Flags().Set("engine-image", "engine-community")
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
err := cmd.Execute()
|
|
||||||
assert.NilError(t, err)
|
|
||||||
golden.Assert(t, c.OutBuffer().String(), "check-all.golden")
|
|
||||||
|
|
||||||
c.OutBuffer().Reset()
|
|
||||||
cmd.Flags().Set("pre-releases", "false")
|
|
||||||
cmd.Flags().Set("downgrades", "true")
|
|
||||||
err = cmd.Execute()
|
|
||||||
assert.NilError(t, err)
|
|
||||||
fmt.Println(c.OutBuffer().String())
|
|
||||||
golden.Assert(t, c.OutBuffer().String(), "check-no-prerelease.golden")
|
|
||||||
|
|
||||||
c.OutBuffer().Reset()
|
|
||||||
cmd.Flags().Set("pre-releases", "false")
|
|
||||||
cmd.Flags().Set("downgrades", "false")
|
|
||||||
err = cmd.Execute()
|
|
||||||
assert.NilError(t, err)
|
|
||||||
fmt.Println(c.OutBuffer().String())
|
|
||||||
golden.Assert(t, c.OutBuffer().String(), "check-no-downgrades.golden")
|
|
||||||
|
|
||||||
c.OutBuffer().Reset()
|
|
||||||
cmd.Flags().Set("pre-releases", "false")
|
|
||||||
cmd.Flags().Set("downgrades", "false")
|
|
||||||
cmd.Flags().Set("upgrades", "false")
|
|
||||||
err = cmd.Execute()
|
|
||||||
assert.NilError(t, err)
|
|
||||||
fmt.Println(c.OutBuffer().String())
|
|
||||||
golden.Assert(t, c.OutBuffer().String(), "check-patches-only.golden")
|
|
||||||
}
|
|
|
@ -1,101 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
registryclient "github.com/docker/cli/cli/registry/client"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
fakeContainerizedEngineClient struct {
|
|
||||||
closeFunc func() error
|
|
||||||
activateEngineFunc func(ctx context.Context,
|
|
||||||
opts clitypes.EngineInitOptions,
|
|
||||||
out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig) error
|
|
||||||
initEngineFunc func(ctx context.Context,
|
|
||||||
opts clitypes.EngineInitOptions,
|
|
||||||
out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig,
|
|
||||||
healthfn func(context.Context) error) error
|
|
||||||
doUpdateFunc func(ctx context.Context,
|
|
||||||
opts clitypes.EngineInitOptions,
|
|
||||||
out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig) error
|
|
||||||
getEngineVersionsFunc func(ctx context.Context,
|
|
||||||
registryClient registryclient.RegistryClient,
|
|
||||||
currentVersion,
|
|
||||||
imageName string) (clitypes.AvailableVersions, error)
|
|
||||||
|
|
||||||
getEngineFunc func(ctx context.Context) (containerd.Container, error)
|
|
||||||
removeEngineFunc func(ctx context.Context) error
|
|
||||||
getCurrentEngineVersionFunc func(ctx context.Context) (clitypes.EngineInitOptions, error)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (w *fakeContainerizedEngineClient) Close() error {
|
|
||||||
if w.closeFunc != nil {
|
|
||||||
return w.closeFunc()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fakeContainerizedEngineClient) ActivateEngine(ctx context.Context,
|
|
||||||
opts clitypes.EngineInitOptions,
|
|
||||||
out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig) error {
|
|
||||||
if w.activateEngineFunc != nil {
|
|
||||||
return w.activateEngineFunc(ctx, opts, out, authConfig)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context,
|
|
||||||
opts clitypes.EngineInitOptions,
|
|
||||||
out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig,
|
|
||||||
healthfn func(context.Context) error) error {
|
|
||||||
if w.initEngineFunc != nil {
|
|
||||||
return w.initEngineFunc(ctx, opts, out, authConfig, healthfn)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerizedEngineClient) DoUpdate(ctx context.Context,
|
|
||||||
opts clitypes.EngineInitOptions,
|
|
||||||
out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig) error {
|
|
||||||
if w.doUpdateFunc != nil {
|
|
||||||
return w.doUpdateFunc(ctx, opts, out, authConfig)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerizedEngineClient) GetEngineVersions(ctx context.Context,
|
|
||||||
registryClient registryclient.RegistryClient,
|
|
||||||
currentVersion, imageName string) (clitypes.AvailableVersions, error) {
|
|
||||||
|
|
||||||
if w.getEngineVersionsFunc != nil {
|
|
||||||
return w.getEngineVersionsFunc(ctx, registryClient, currentVersion, imageName)
|
|
||||||
}
|
|
||||||
return clitypes.AvailableVersions{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containerd.Container, error) {
|
|
||||||
if w.getEngineFunc != nil {
|
|
||||||
return w.getEngineFunc(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context) error {
|
|
||||||
if w.removeEngineFunc != nil {
|
|
||||||
return w.removeEngineFunc(ctx)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (clitypes.EngineInitOptions, error) {
|
|
||||||
if w.getCurrentEngineVersionFunc != nil {
|
|
||||||
return w.getCurrentEngineVersionFunc(ctx)
|
|
||||||
}
|
|
||||||
return clitypes.EngineInitOptions{}, nil
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/cli/cli"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewEngineCommand returns a cobra command for `engine` subcommands
|
|
||||||
func NewEngineCommand(dockerCli command.Cli) *cobra.Command {
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "engine COMMAND",
|
|
||||||
Short: "Manage the docker engine",
|
|
||||||
Args: cli.NoArgs,
|
|
||||||
RunE: command.ShowHelp(dockerCli.Err()),
|
|
||||||
}
|
|
||||||
cmd.AddCommand(
|
|
||||||
newActivateCommand(dockerCli),
|
|
||||||
newCheckForUpdatesCommand(dockerCli),
|
|
||||||
newUpdateCommand(dockerCli),
|
|
||||||
)
|
|
||||||
return cmd
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewEngineCommand(t *testing.T) {
|
|
||||||
cmd := NewEngineCommand(testCli)
|
|
||||||
|
|
||||||
subcommands := cmd.Commands()
|
|
||||||
assert.Assert(t, len(subcommands) == 3)
|
|
||||||
}
|
|
|
@ -1,10 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type extendedEngineInitOptions struct {
|
|
||||||
clitypes.EngineInitOptions
|
|
||||||
sockPath string
|
|
||||||
}
|
|
|
@ -1,155 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
|
||||||
"github.com/docker/cli/internal/licenseutils"
|
|
||||||
"github.com/docker/licensing/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultSubscriptionsTableFormat = "table {{.Num}}\t{{.Owner}}\t{{.ProductID}}\t{{.Expires}}\t{{.ComponentsString}}"
|
|
||||||
defaultSubscriptionsQuietFormat = "{{.Num}}:{{.Summary}}"
|
|
||||||
|
|
||||||
numHeader = "NUM"
|
|
||||||
ownerHeader = "OWNER"
|
|
||||||
licenseNameHeader = "NAME"
|
|
||||||
idHeader = "ID"
|
|
||||||
dockerIDHeader = "DOCKER ID"
|
|
||||||
productIDHeader = "PRODUCT ID"
|
|
||||||
productRatePlanHeader = "PRODUCT RATE PLAN"
|
|
||||||
productRatePlanIDHeader = "PRODUCT RATE PLAN ID"
|
|
||||||
startHeader = "START"
|
|
||||||
expiresHeader = "EXPIRES"
|
|
||||||
stateHeader = "STATE"
|
|
||||||
eusaHeader = "EUSA"
|
|
||||||
pricingComponentsHeader = "PRICING COMPONENTS"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewSubscriptionsFormat returns a Format for rendering using a license Context
|
|
||||||
func NewSubscriptionsFormat(source string, quiet bool) formatter.Format {
|
|
||||||
switch source {
|
|
||||||
case formatter.TableFormatKey:
|
|
||||||
if quiet {
|
|
||||||
return defaultSubscriptionsQuietFormat
|
|
||||||
}
|
|
||||||
return defaultSubscriptionsTableFormat
|
|
||||||
case formatter.RawFormatKey:
|
|
||||||
if quiet {
|
|
||||||
return `license: {{.ID}}`
|
|
||||||
}
|
|
||||||
return `license: {{.ID}}\nname: {{.Name}}\nowner: {{.Owner}}\ncomponents: {{.ComponentsString}}\n`
|
|
||||||
}
|
|
||||||
return formatter.Format(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscriptionsWrite writes the context
|
|
||||||
func SubscriptionsWrite(ctx formatter.Context, subs []licenseutils.LicenseDisplay) error {
|
|
||||||
render := func(format func(subContext formatter.SubContext) error) error {
|
|
||||||
for _, sub := range subs {
|
|
||||||
licenseCtx := &licenseContext{trunc: ctx.Trunc, l: sub}
|
|
||||||
if err := format(licenseCtx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
licenseCtx := licenseContext{}
|
|
||||||
licenseCtx.Header = map[string]string{
|
|
||||||
"Num": numHeader,
|
|
||||||
"Owner": ownerHeader,
|
|
||||||
"Name": licenseNameHeader,
|
|
||||||
"ID": idHeader,
|
|
||||||
"DockerID": dockerIDHeader,
|
|
||||||
"ProductID": productIDHeader,
|
|
||||||
"ProductRatePlan": productRatePlanHeader,
|
|
||||||
"ProductRatePlanID": productRatePlanIDHeader,
|
|
||||||
"Start": startHeader,
|
|
||||||
"Expires": expiresHeader,
|
|
||||||
"State": stateHeader,
|
|
||||||
"Eusa": eusaHeader,
|
|
||||||
"ComponentsString": pricingComponentsHeader,
|
|
||||||
}
|
|
||||||
return ctx.Write(&licenseCtx, render)
|
|
||||||
}
|
|
||||||
|
|
||||||
type licenseContext struct {
|
|
||||||
formatter.HeaderContext
|
|
||||||
trunc bool
|
|
||||||
l licenseutils.LicenseDisplay
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) MarshalJSON() ([]byte, error) {
|
|
||||||
return formatter.MarshalJSON(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) Num() int {
|
|
||||||
return c.l.Num
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) Owner() string {
|
|
||||||
return c.l.Owner
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) ComponentsString() string {
|
|
||||||
return c.l.ComponentsString
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) Summary() string {
|
|
||||||
return c.l.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) Name() string {
|
|
||||||
return c.l.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) ID() string {
|
|
||||||
return c.l.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) DockerID() string {
|
|
||||||
return c.l.DockerID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) ProductID() string {
|
|
||||||
return c.l.ProductID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) ProductRatePlan() string {
|
|
||||||
return c.l.ProductRatePlan
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) ProductRatePlanID() string {
|
|
||||||
return c.l.ProductRatePlanID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) Start() *time.Time {
|
|
||||||
return c.l.Start
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) Expires() *time.Time {
|
|
||||||
return c.l.Expires
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) State() string {
|
|
||||||
return c.l.State
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) Eusa() *model.EusaState {
|
|
||||||
return c.l.Eusa
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *licenseContext) PricingComponents() []model.SubscriptionPricingComponent {
|
|
||||||
// Dereference the pricing component pointers in the pricing components
|
|
||||||
// so it can be rendered properly with the template formatter
|
|
||||||
|
|
||||||
var ret []model.SubscriptionPricingComponent
|
|
||||||
for _, spc := range c.l.PricingComponents {
|
|
||||||
if spc == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ret = append(ret, *spc)
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
|
@ -1,257 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
|
||||||
"github.com/docker/cli/internal/licenseutils"
|
|
||||||
"github.com/docker/licensing/model"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
is "gotest.tools/assert/cmp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSubscriptionContextWrite(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
context formatter.Context
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
// Errors
|
|
||||||
{
|
|
||||||
formatter.Context{Format: "{{InvalidFunction}}"},
|
|
||||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: "{{nil}}"},
|
|
||||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// Table format
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewSubscriptionsFormat("table", false)},
|
|
||||||
`NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS
|
|
||||||
1 owner1 productid1 2020-01-01 10:00:00 +0000 UTC compstring
|
|
||||||
2 owner2 productid2 2020-01-01 10:00:00 +0000 UTC compstring
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewSubscriptionsFormat("table", true)},
|
|
||||||
`1:License Name: name1 Components: 10 nodes Expiration date: 2020-01-01
|
|
||||||
2:License Name: name2 Components: 20 nodes Expiration date: 2020-01-01
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewSubscriptionsFormat("table {{.Owner}}", false)},
|
|
||||||
`OWNER
|
|
||||||
owner1
|
|
||||||
owner2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewSubscriptionsFormat("table {{.Owner}}", true)},
|
|
||||||
`OWNER
|
|
||||||
owner1
|
|
||||||
owner2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// Raw Format
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewSubscriptionsFormat("raw", false)},
|
|
||||||
`license: id1
|
|
||||||
name: name1
|
|
||||||
owner: owner1
|
|
||||||
components: compstring
|
|
||||||
|
|
||||||
license: id2
|
|
||||||
name: name2
|
|
||||||
owner: owner2
|
|
||||||
components: compstring
|
|
||||||
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewSubscriptionsFormat("raw", true)},
|
|
||||||
`license: id1
|
|
||||||
license: id2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// Custom Format
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewSubscriptionsFormat("{{.Owner}}", false)},
|
|
||||||
`owner1
|
|
||||||
owner2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC")
|
|
||||||
|
|
||||||
for _, testcase := range cases {
|
|
||||||
subscriptions := []licenseutils.LicenseDisplay{
|
|
||||||
{
|
|
||||||
Num: 1,
|
|
||||||
Owner: "owner1",
|
|
||||||
Subscription: model.Subscription{
|
|
||||||
ID: "id1",
|
|
||||||
Name: "name1",
|
|
||||||
ProductID: "productid1",
|
|
||||||
Expires: &expiration,
|
|
||||||
PricingComponents: model.PricingComponents{
|
|
||||||
&model.SubscriptionPricingComponent{
|
|
||||||
Name: "nodes",
|
|
||||||
Value: 10,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ComponentsString: "compstring",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Num: 2,
|
|
||||||
Owner: "owner2",
|
|
||||||
Subscription: model.Subscription{
|
|
||||||
ID: "id2",
|
|
||||||
Name: "name2",
|
|
||||||
ProductID: "productid2",
|
|
||||||
Expires: &expiration,
|
|
||||||
PricingComponents: model.PricingComponents{
|
|
||||||
&model.SubscriptionPricingComponent{
|
|
||||||
Name: "nodes",
|
|
||||||
Value: 20,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ComponentsString: "compstring",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
out := &bytes.Buffer{}
|
|
||||||
testcase.context.Output = out
|
|
||||||
err := SubscriptionsWrite(testcase.context, subscriptions)
|
|
||||||
if err != nil {
|
|
||||||
assert.Error(t, err, testcase.expected)
|
|
||||||
} else {
|
|
||||||
assert.Check(t, is.Equal(testcase.expected, out.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSubscriptionContextWriteJSON(t *testing.T) {
|
|
||||||
expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC")
|
|
||||||
subscriptions := []licenseutils.LicenseDisplay{
|
|
||||||
{
|
|
||||||
Num: 1,
|
|
||||||
Owner: "owner1",
|
|
||||||
Subscription: model.Subscription{
|
|
||||||
ID: "id1",
|
|
||||||
Name: "name1",
|
|
||||||
ProductID: "productid1",
|
|
||||||
Expires: &expiration,
|
|
||||||
PricingComponents: model.PricingComponents{
|
|
||||||
&model.SubscriptionPricingComponent{
|
|
||||||
Name: "nodes",
|
|
||||||
Value: 10,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ComponentsString: "compstring",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Num: 2,
|
|
||||||
Owner: "owner2",
|
|
||||||
Subscription: model.Subscription{
|
|
||||||
ID: "id2",
|
|
||||||
Name: "name2",
|
|
||||||
ProductID: "productid2",
|
|
||||||
Expires: &expiration,
|
|
||||||
PricingComponents: model.PricingComponents{
|
|
||||||
&model.SubscriptionPricingComponent{
|
|
||||||
Name: "nodes",
|
|
||||||
Value: 20,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ComponentsString: "compstring",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedJSONs := []map[string]interface{}{
|
|
||||||
{
|
|
||||||
"Owner": "owner1",
|
|
||||||
"ComponentsString": "compstring",
|
|
||||||
"Expires": "2020-01-01T10:00:00Z",
|
|
||||||
"DockerID": "",
|
|
||||||
"Eusa": nil,
|
|
||||||
"ID": "id1",
|
|
||||||
"Start": nil,
|
|
||||||
"Name": "name1",
|
|
||||||
"Num": float64(1),
|
|
||||||
"PricingComponents": []interface{}{
|
|
||||||
map[string]interface{}{
|
|
||||||
"name": "nodes",
|
|
||||||
"value": float64(10),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"ProductID": "productid1",
|
|
||||||
"ProductRatePlan": "",
|
|
||||||
"ProductRatePlanID": "",
|
|
||||||
"State": "",
|
|
||||||
"Summary": "License Name: name1\tComponents: 10 nodes\tExpiration date: 2020-01-01",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Owner": "owner2",
|
|
||||||
"ComponentsString": "compstring",
|
|
||||||
"Expires": "2020-01-01T10:00:00Z",
|
|
||||||
"DockerID": "",
|
|
||||||
"Eusa": nil,
|
|
||||||
"ID": "id2",
|
|
||||||
"Start": nil,
|
|
||||||
"Name": "name2",
|
|
||||||
"Num": float64(2),
|
|
||||||
"PricingComponents": []interface{}{
|
|
||||||
map[string]interface{}{
|
|
||||||
"name": "nodes",
|
|
||||||
"value": float64(20),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"ProductID": "productid2",
|
|
||||||
"ProductRatePlan": "",
|
|
||||||
"ProductRatePlanID": "",
|
|
||||||
"State": "",
|
|
||||||
"Summary": "License Name: name2\tComponents: 20 nodes\tExpiration date: 2020-01-01",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
out := &bytes.Buffer{}
|
|
||||||
err := SubscriptionsWrite(formatter.Context{Format: "{{json .}}", Output: out}, subscriptions)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
|
||||||
var m map[string]interface{}
|
|
||||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
assert.Check(t, is.DeepEqual(expectedJSONs[i], m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSubscriptionContextWriteJSONField(t *testing.T) {
|
|
||||||
subscriptions := []licenseutils.LicenseDisplay{
|
|
||||||
{Num: 1, Owner: "owner1"},
|
|
||||||
{Num: 2, Owner: "owner2"},
|
|
||||||
}
|
|
||||||
out := &bytes.Buffer{}
|
|
||||||
err := SubscriptionsWrite(formatter.Context{Format: "{{json .Owner}}", Output: out}, subscriptions)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
|
||||||
var s string
|
|
||||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
assert.Check(t, is.Equal(subscriptions[i].Owner, s))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,11 +0,0 @@
|
||||||
TYPE VERSION NOTES
|
|
||||||
current 1.1.0
|
|
||||||
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
|
||||||
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
|
||||||
patch 1.1.3-beta1 https://docs.docker.com/releasenotes/1.1.3-beta1
|
|
||||||
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
|
||||||
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
|
||||||
upgrade 2.1.0-beta1 https://docs.docker.com/releasenotes/2.1.0-beta1
|
|
||||||
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
|
|
||||||
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
|
|
||||||
downgrade 1.0.3-beta1 https://docs.docker.com/releasenotes/1.0.3-beta1
|
|
|
@ -1,6 +0,0 @@
|
||||||
TYPE VERSION NOTES
|
|
||||||
current 1.1.0
|
|
||||||
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
|
||||||
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
|
||||||
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
|
||||||
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
|
|
@ -1,8 +0,0 @@
|
||||||
TYPE VERSION NOTES
|
|
||||||
current 1.1.0
|
|
||||||
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
|
||||||
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
|
||||||
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
|
||||||
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
|
||||||
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
|
|
||||||
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
|
|
|
@ -1,4 +0,0 @@
|
||||||
TYPE VERSION NOTES
|
|
||||||
current 1.1.0
|
|
||||||
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
|
||||||
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
|
|
@ -1,3 +0,0 @@
|
||||||
Looking for existing licenses for ...
|
|
||||||
NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS
|
|
||||||
0 2010-01-01 00:00:00 +0000 UTC
|
|
|
@ -1 +0,0 @@
|
||||||
License: Components: 1 Nodes Expiration date: 2018-03-18 Expired! You will no longer receive updates. Please renew at https://docker.com/licensing
|
|
|
@ -1,55 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
|
||||||
var options extendedEngineInitOptions
|
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "update [OPTIONS]",
|
|
||||||
Short: "Update a local engine",
|
|
||||||
Args: cli.NoArgs,
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
return runUpdate(dockerCli, options)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
flags := cmd.Flags()
|
|
||||||
|
|
||||||
flags.StringVar(&options.EngineVersion, "version", "", "Specify engine version")
|
|
||||||
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)")
|
|
||||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the current location where engine images are pulled")
|
|
||||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
|
||||||
if !isRoot() {
|
|
||||||
return errors.New("this command must be run as a privileged user")
|
|
||||||
}
|
|
||||||
ctx := context.Background()
|
|
||||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "unable to access local containerd")
|
|
||||||
}
|
|
||||||
defer client.Close()
|
|
||||||
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Fprintln(dockerCli.Out(), `Successfully updated engine.
|
|
||||||
Restart docker with 'systemctl restart docker' to complete the update.`)
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,40 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/cli/internal/test"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestUpdateNoContainerd(t *testing.T) {
|
|
||||||
testCli.SetContainerizedEngineClient(
|
|
||||||
func(string) (clitypes.ContainerizedClient, error) {
|
|
||||||
return nil, fmt.Errorf("some error")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
cmd := newUpdateCommand(testCli)
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
err := cmd.Execute()
|
|
||||||
assert.ErrorContains(t, err, "unable to access local containerd")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateHappy(t *testing.T) {
|
|
||||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil})
|
|
||||||
c.SetContainerizedEngineClient(
|
|
||||||
func(string) (clitypes.ContainerizedClient, error) {
|
|
||||||
return &fakeContainerizedEngineClient{}, nil
|
|
||||||
},
|
|
||||||
)
|
|
||||||
cmd := newUpdateCommand(c)
|
|
||||||
cmd.Flags().Set("registry-prefix", clitypes.RegistryPrefix)
|
|
||||||
cmd.Flags().Set("version", "someversion")
|
|
||||||
cmd.Flags().Set("engine-image", "someimage")
|
|
||||||
err := cmd.Execute()
|
|
||||||
assert.NilError(t, err)
|
|
||||||
}
|
|
|
@ -1,74 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultUpdatesTableFormat = "table {{.Type}}\t{{.Version}}\t{{.Notes}}"
|
|
||||||
defaultUpdatesQuietFormat = "{{.Version}}"
|
|
||||||
|
|
||||||
updatesTypeHeader = "TYPE"
|
|
||||||
versionHeader = "VERSION"
|
|
||||||
notesHeader = "NOTES"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewUpdatesFormat returns a Format for rendering using a updates context
|
|
||||||
func NewUpdatesFormat(source string, quiet bool) formatter.Format {
|
|
||||||
switch source {
|
|
||||||
case formatter.TableFormatKey:
|
|
||||||
if quiet {
|
|
||||||
return defaultUpdatesQuietFormat
|
|
||||||
}
|
|
||||||
return defaultUpdatesTableFormat
|
|
||||||
case formatter.RawFormatKey:
|
|
||||||
if quiet {
|
|
||||||
return `update_version: {{.Version}}`
|
|
||||||
}
|
|
||||||
return `update_version: {{.Version}}\ntype: {{.Type}}\nnotes: {{.Notes}}\n`
|
|
||||||
}
|
|
||||||
return formatter.Format(source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdatesWrite writes the context
|
|
||||||
func UpdatesWrite(ctx formatter.Context, availableUpdates []clitypes.Update) error {
|
|
||||||
render := func(format func(subContext formatter.SubContext) error) error {
|
|
||||||
for _, update := range availableUpdates {
|
|
||||||
updatesCtx := &updateContext{trunc: ctx.Trunc, u: update}
|
|
||||||
if err := format(updatesCtx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
updatesCtx := updateContext{}
|
|
||||||
updatesCtx.Header = map[string]string{
|
|
||||||
"Type": updatesTypeHeader,
|
|
||||||
"Version": versionHeader,
|
|
||||||
"Notes": notesHeader,
|
|
||||||
}
|
|
||||||
return ctx.Write(&updatesCtx, render)
|
|
||||||
}
|
|
||||||
|
|
||||||
type updateContext struct {
|
|
||||||
formatter.HeaderContext
|
|
||||||
trunc bool
|
|
||||||
u clitypes.Update
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *updateContext) MarshalJSON() ([]byte, error) {
|
|
||||||
return formatter.MarshalJSON(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *updateContext) Type() string {
|
|
||||||
return c.u.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *updateContext) Version() string {
|
|
||||||
return c.u.Version
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *updateContext) Notes() string {
|
|
||||||
return c.u.Notes
|
|
||||||
}
|
|
|
@ -1,144 +0,0 @@
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command/formatter"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
is "gotest.tools/assert/cmp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestUpdateContextWrite(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
context formatter.Context
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
// Errors
|
|
||||||
{
|
|
||||||
formatter.Context{Format: "{{InvalidFunction}}"},
|
|
||||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: "{{nil}}"},
|
|
||||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// Table format
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewUpdatesFormat("table", false)},
|
|
||||||
`TYPE VERSION NOTES
|
|
||||||
updateType1 version1 description 1
|
|
||||||
updateType2 version2 description 2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewUpdatesFormat("table", true)},
|
|
||||||
`version1
|
|
||||||
version2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewUpdatesFormat("table {{.Version}}", false)},
|
|
||||||
`VERSION
|
|
||||||
version1
|
|
||||||
version2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewUpdatesFormat("table {{.Version}}", true)},
|
|
||||||
`VERSION
|
|
||||||
version1
|
|
||||||
version2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// Raw Format
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewUpdatesFormat("raw", false)},
|
|
||||||
`update_version: version1
|
|
||||||
type: updateType1
|
|
||||||
notes: description 1
|
|
||||||
|
|
||||||
update_version: version2
|
|
||||||
type: updateType2
|
|
||||||
notes: description 2
|
|
||||||
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewUpdatesFormat("raw", true)},
|
|
||||||
`update_version: version1
|
|
||||||
update_version: version2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// Custom Format
|
|
||||||
{
|
|
||||||
formatter.Context{Format: NewUpdatesFormat("{{.Version}}", false)},
|
|
||||||
`version1
|
|
||||||
version2
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, testcase := range cases {
|
|
||||||
updates := []clitypes.Update{
|
|
||||||
{Type: "updateType1", Version: "version1", Notes: "description 1"},
|
|
||||||
{Type: "updateType2", Version: "version2", Notes: "description 2"},
|
|
||||||
}
|
|
||||||
out := &bytes.Buffer{}
|
|
||||||
testcase.context.Output = out
|
|
||||||
err := UpdatesWrite(testcase.context, updates)
|
|
||||||
if err != nil {
|
|
||||||
assert.Error(t, err, testcase.expected)
|
|
||||||
} else {
|
|
||||||
assert.Check(t, is.Equal(testcase.expected, out.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateContextWriteJSON(t *testing.T) {
|
|
||||||
updates := []clitypes.Update{
|
|
||||||
{Type: "updateType1", Version: "version1", Notes: "note1"},
|
|
||||||
{Type: "updateType2", Version: "version2", Notes: "note2"},
|
|
||||||
}
|
|
||||||
expectedJSONs := []map[string]interface{}{
|
|
||||||
{"Version": "version1", "Notes": "note1", "Type": "updateType1"},
|
|
||||||
{"Version": "version2", "Notes": "note2", "Type": "updateType2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
out := &bytes.Buffer{}
|
|
||||||
err := UpdatesWrite(formatter.Context{Format: "{{json .}}", Output: out}, updates)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
|
||||||
var m map[string]interface{}
|
|
||||||
if err := json.Unmarshal([]byte(line), &m); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
assert.Check(t, is.DeepEqual(expectedJSONs[i], m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateContextWriteJSONField(t *testing.T) {
|
|
||||||
updates := []clitypes.Update{
|
|
||||||
{Type: "updateType1", Version: "version1"},
|
|
||||||
{Type: "updateType2", Version: "version2"},
|
|
||||||
}
|
|
||||||
out := &bytes.Buffer{}
|
|
||||||
err := UpdatesWrite(formatter.Context{Format: "{{json .Type}}", Output: out}, updates)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
|
|
||||||
var s string
|
|
||||||
if err := json.Unmarshal([]byte(line), &s); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
assert.Check(t, is.Equal(updates[i].Type, s))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -2700,67 +2700,6 @@ _docker_diff() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
_docker_engine() {
|
|
||||||
local subcommands="
|
|
||||||
activate
|
|
||||||
check
|
|
||||||
update
|
|
||||||
"
|
|
||||||
__docker_subcommands "$subcommands" && return
|
|
||||||
|
|
||||||
case "$cur" in
|
|
||||||
-*)
|
|
||||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) )
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
_docker_engine_activate() {
|
|
||||||
case "$prev" in
|
|
||||||
--containerd|--engine-image|--format|--license|--registry-prefix|--version)
|
|
||||||
return
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
case "$cur" in
|
|
||||||
-*)
|
|
||||||
COMPREPLY=( $( compgen -W "--containerd --display-only --engine-image --format --help --license --quiet --registry-prefix --version" -- "$cur" ) )
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
_docker_engine_check() {
|
|
||||||
case "$prev" in
|
|
||||||
--containerd|--engine-image|--format|--registry-prefix)
|
|
||||||
return
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
case "$cur" in
|
|
||||||
-*)
|
|
||||||
COMPREPLY=( $( compgen -W "--containerd --downgrades --engine-image --format --help --pre-releases --quiet -q --registry-prefix --upgrades" -- "$cur" ) )
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
_docker_engine_update() {
|
|
||||||
case "$prev" in
|
|
||||||
--containerd|--engine-image|--registry-prefix|--version)
|
|
||||||
return
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
case "$cur" in
|
|
||||||
-*)
|
|
||||||
COMPREPLY=( $( compgen -W "--containerd --engine-image --help --registry-prefix --version" -- "$cur" ) )
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
_docker_events() {
|
_docker_events() {
|
||||||
_docker_system_events
|
_docker_system_events
|
||||||
}
|
}
|
||||||
|
@ -5456,7 +5395,6 @@ _docker() {
|
||||||
config
|
config
|
||||||
container
|
container
|
||||||
context
|
context
|
||||||
engine
|
|
||||||
image
|
image
|
||||||
network
|
network
|
||||||
node
|
node
|
||||||
|
|
|
@ -2,6 +2,11 @@
|
||||||
title: "Use the Docker command line"
|
title: "Use the Docker command line"
|
||||||
description: "Docker's CLI command description and usage"
|
description: "Docker's CLI command description and usage"
|
||||||
keywords: "Docker, Docker documentation, CLI, command line"
|
keywords: "Docker, Docker documentation, CLI, command line"
|
||||||
|
redirect_from:
|
||||||
|
- /engine/reference/commandline/engine/
|
||||||
|
- /engine/reference/commandline/engine_activate/
|
||||||
|
- /engine/reference/commandline/engine_check/
|
||||||
|
- /engine/reference/commandline/engine_update/
|
||||||
---
|
---
|
||||||
|
|
||||||
<!-- This file is maintained within the docker/cli GitHub
|
<!-- This file is maintained within the docker/cli GitHub
|
||||||
|
|
|
@ -1,234 +0,0 @@
|
||||||
package containerizedengine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/containerd/containerd/cio"
|
|
||||||
"github.com/containerd/containerd/containers"
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/oci"
|
|
||||||
prototypes "github.com/gogo/protobuf/types"
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
fakeContainerdClient struct {
|
|
||||||
containersFunc func(ctx context.Context, filters ...string) ([]containerd.Container, error)
|
|
||||||
newContainerFunc func(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
|
|
||||||
pullFunc func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
|
|
||||||
getImageFunc func(ctx context.Context, ref string) (containerd.Image, error)
|
|
||||||
contentStoreFunc func() content.Store
|
|
||||||
containerServiceFunc func() containers.Store
|
|
||||||
installFunc func(context.Context, containerd.Image, ...containerd.InstallOpts) error
|
|
||||||
versionFunc func(ctx context.Context) (containerd.Version, error)
|
|
||||||
}
|
|
||||||
fakeContainer struct {
|
|
||||||
idFunc func() string
|
|
||||||
infoFunc func(context.Context) (containers.Container, error)
|
|
||||||
deleteFunc func(context.Context, ...containerd.DeleteOpts) error
|
|
||||||
newTaskFunc func(context.Context, cio.Creator, ...containerd.NewTaskOpts) (containerd.Task, error)
|
|
||||||
specFunc func(context.Context) (*oci.Spec, error)
|
|
||||||
taskFunc func(context.Context, cio.Attach) (containerd.Task, error)
|
|
||||||
imageFunc func(context.Context) (containerd.Image, error)
|
|
||||||
labelsFunc func(context.Context) (map[string]string, error)
|
|
||||||
setLabelsFunc func(context.Context, map[string]string) (map[string]string, error)
|
|
||||||
extensionsFunc func(context.Context) (map[string]prototypes.Any, error)
|
|
||||||
updateFunc func(context.Context, ...containerd.UpdateContainerOpts) error
|
|
||||||
checkpointFunc func(context.Context, string, ...containerd.CheckpointOpts) (containerd.Image, error)
|
|
||||||
}
|
|
||||||
fakeImage struct {
|
|
||||||
nameFunc func() string
|
|
||||||
targetFunc func() ocispec.Descriptor
|
|
||||||
labelFunc func() map[string]string
|
|
||||||
unpackFunc func(context.Context, string) error
|
|
||||||
rootFSFunc func(ctx context.Context) ([]digest.Digest, error)
|
|
||||||
sizeFunc func(ctx context.Context) (int64, error)
|
|
||||||
configFunc func(ctx context.Context) (ocispec.Descriptor, error)
|
|
||||||
isUnpackedFunc func(context.Context, string) (bool, error)
|
|
||||||
contentStoreFunc func() content.Store
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (w *fakeContainerdClient) Containers(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
|
||||||
if w.containersFunc != nil {
|
|
||||||
return w.containersFunc(ctx, filters...)
|
|
||||||
}
|
|
||||||
return []containerd.Container{}, nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerdClient) NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error) {
|
|
||||||
if w.newContainerFunc != nil {
|
|
||||||
return w.newContainerFunc(ctx, id, opts...)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerdClient) Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
|
||||||
if w.pullFunc != nil {
|
|
||||||
return w.pullFunc(ctx, ref, opts...)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerdClient) GetImage(ctx context.Context, ref string) (containerd.Image, error) {
|
|
||||||
if w.getImageFunc != nil {
|
|
||||||
return w.getImageFunc(ctx, ref)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerdClient) ContentStore() content.Store {
|
|
||||||
if w.contentStoreFunc != nil {
|
|
||||||
return w.contentStoreFunc()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerdClient) ContainerService() containers.Store {
|
|
||||||
if w.containerServiceFunc != nil {
|
|
||||||
return w.containerServiceFunc()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerdClient) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerdClient) Install(ctx context.Context, image containerd.Image, args ...containerd.InstallOpts) error {
|
|
||||||
if w.installFunc != nil {
|
|
||||||
return w.installFunc(ctx, image, args...)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (w *fakeContainerdClient) Version(ctx context.Context) (containerd.Version, error) {
|
|
||||||
if w.versionFunc != nil {
|
|
||||||
return w.versionFunc(ctx)
|
|
||||||
}
|
|
||||||
return containerd.Version{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeContainer) ID() string {
|
|
||||||
if c.idFunc != nil {
|
|
||||||
return c.idFunc()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) Info(ctx context.Context) (containers.Container, error) {
|
|
||||||
if c.infoFunc != nil {
|
|
||||||
return c.infoFunc(ctx)
|
|
||||||
}
|
|
||||||
return containers.Container{}, nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) Delete(ctx context.Context, opts ...containerd.DeleteOpts) error {
|
|
||||||
if c.deleteFunc != nil {
|
|
||||||
return c.deleteFunc(ctx, opts...)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) NewTask(ctx context.Context, ioc cio.Creator, opts ...containerd.NewTaskOpts) (containerd.Task, error) {
|
|
||||||
if c.newTaskFunc != nil {
|
|
||||||
return c.newTaskFunc(ctx, ioc, opts...)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) Spec(ctx context.Context) (*oci.Spec, error) {
|
|
||||||
if c.specFunc != nil {
|
|
||||||
return c.specFunc(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) Task(ctx context.Context, attach cio.Attach) (containerd.Task, error) {
|
|
||||||
if c.taskFunc != nil {
|
|
||||||
return c.taskFunc(ctx, attach)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) Image(ctx context.Context) (containerd.Image, error) {
|
|
||||||
if c.imageFunc != nil {
|
|
||||||
return c.imageFunc(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) Labels(ctx context.Context) (map[string]string, error) {
|
|
||||||
if c.labelsFunc != nil {
|
|
||||||
return c.labelsFunc(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
|
|
||||||
if c.setLabelsFunc != nil {
|
|
||||||
return c.setLabelsFunc(ctx, labels)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
|
|
||||||
if c.extensionsFunc != nil {
|
|
||||||
return c.extensionsFunc(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (c *fakeContainer) Update(ctx context.Context, opts ...containerd.UpdateContainerOpts) error {
|
|
||||||
if c.updateFunc != nil {
|
|
||||||
return c.updateFunc(ctx, opts...)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeContainer) Checkpoint(ctx context.Context, ref string, opts ...containerd.CheckpointOpts) (containerd.Image, error) {
|
|
||||||
if c.checkpointFunc != nil {
|
|
||||||
return c.checkpointFunc(ctx, ref, opts...)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *fakeImage) Name() string {
|
|
||||||
if i.nameFunc != nil {
|
|
||||||
return i.nameFunc()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
func (i *fakeImage) Target() ocispec.Descriptor {
|
|
||||||
if i.targetFunc != nil {
|
|
||||||
return i.targetFunc()
|
|
||||||
}
|
|
||||||
return ocispec.Descriptor{}
|
|
||||||
}
|
|
||||||
func (i *fakeImage) Labels() map[string]string {
|
|
||||||
if i.labelFunc != nil {
|
|
||||||
return i.labelFunc()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *fakeImage) Unpack(ctx context.Context, name string) error {
|
|
||||||
if i.unpackFunc != nil {
|
|
||||||
return i.unpackFunc(ctx, name)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (i *fakeImage) RootFS(ctx context.Context) ([]digest.Digest, error) {
|
|
||||||
if i.rootFSFunc != nil {
|
|
||||||
return i.rootFSFunc(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (i *fakeImage) Size(ctx context.Context) (int64, error) {
|
|
||||||
if i.sizeFunc != nil {
|
|
||||||
return i.sizeFunc(ctx)
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
func (i *fakeImage) Config(ctx context.Context) (ocispec.Descriptor, error) {
|
|
||||||
if i.configFunc != nil {
|
|
||||||
return i.configFunc(ctx)
|
|
||||||
}
|
|
||||||
return ocispec.Descriptor{}, nil
|
|
||||||
}
|
|
||||||
func (i *fakeImage) IsUnpacked(ctx context.Context, name string) (bool, error) {
|
|
||||||
if i.isUnpackedFunc != nil {
|
|
||||||
return i.isUnpackedFunc(ctx, name)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
func (i *fakeImage) ContentStore() content.Store {
|
|
||||||
if i.contentStoreFunc != nil {
|
|
||||||
return i.contentStoreFunc()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,78 +0,0 @@
|
||||||
package containerizedengine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/containerd/containerd/images"
|
|
||||||
"github.com/containerd/containerd/remotes/docker"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewClient returns a new containerizedengine client
|
|
||||||
// This client can be used to manage the lifecycle of
|
|
||||||
// dockerd running as a container on containerd.
|
|
||||||
func NewClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
|
||||||
if sockPath == "" {
|
|
||||||
sockPath = containerdSockPath
|
|
||||||
}
|
|
||||||
cclient, err := containerd.New(sockPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &baseClient{
|
|
||||||
cclient: cclient,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close will close the underlying clients
|
|
||||||
func (c *baseClient) Close() error {
|
|
||||||
return c.cclient.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *baseClient) pullWithAuth(ctx context.Context, imageName string, out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig) (containerd.Image, error) {
|
|
||||||
|
|
||||||
resolver := docker.NewResolver(docker.ResolverOptions{
|
|
||||||
Credentials: func(string) (string, string, error) {
|
|
||||||
return authConfig.Username, authConfig.Password, nil
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
ongoing := newJobs(imageName)
|
|
||||||
pctx, stopProgress := context.WithCancel(ctx)
|
|
||||||
progress := make(chan struct{})
|
|
||||||
bufin, bufout := io.Pipe()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
showProgress(pctx, ongoing, c.cclient.ContentStore(), bufout)
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
jsonmessage.DisplayJSONMessagesToStream(bufin, out, nil)
|
|
||||||
close(progress)
|
|
||||||
}()
|
|
||||||
|
|
||||||
h := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
|
||||||
if desc.MediaType != images.MediaTypeDockerSchema1Manifest {
|
|
||||||
ongoing.add(desc)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
image, err := c.cclient.Pull(ctx, imageName,
|
|
||||||
containerd.WithResolver(resolver),
|
|
||||||
containerd.WithImageHandler(h),
|
|
||||||
containerd.WithPullUnpack)
|
|
||||||
stopProgress()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
<-progress
|
|
||||||
return image, nil
|
|
||||||
}
|
|
|
@ -1,45 +0,0 @@
|
||||||
package containerizedengine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/docker/cli/cli/streams"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPullWithAuthPullFail(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
client := baseClient{
|
|
||||||
cclient: &fakeContainerdClient{
|
|
||||||
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
|
||||||
return nil, fmt.Errorf("pull failure")
|
|
||||||
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
imageName := "testnamegoeshere"
|
|
||||||
|
|
||||||
_, err := client.pullWithAuth(ctx, imageName, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, "pull failure")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPullWithAuthPullPass(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
client := baseClient{
|
|
||||||
cclient: &fakeContainerdClient{
|
|
||||||
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
|
||||||
return nil, nil
|
|
||||||
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
imageName := "testnamegoeshere"
|
|
||||||
|
|
||||||
_, err := client.pullWithAuth(ctx, imageName, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.NilError(t, err)
|
|
||||||
}
|
|
|
@ -1,214 +0,0 @@
|
||||||
package containerizedengine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/containerd/remotes"
|
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, out io.WriteCloser) {
|
|
||||||
var (
|
|
||||||
ticker = time.NewTicker(100 * time.Millisecond)
|
|
||||||
start = time.Now()
|
|
||||||
enc = json.NewEncoder(out)
|
|
||||||
statuses = map[string]statusInfo{}
|
|
||||||
done bool
|
|
||||||
)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
outer:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
|
|
||||||
resolved := "resolved"
|
|
||||||
if !ongoing.isResolved() {
|
|
||||||
resolved = "resolving"
|
|
||||||
}
|
|
||||||
statuses[ongoing.name] = statusInfo{
|
|
||||||
Ref: ongoing.name,
|
|
||||||
Status: resolved,
|
|
||||||
}
|
|
||||||
keys := []string{ongoing.name}
|
|
||||||
|
|
||||||
activeSeen := map[string]struct{}{}
|
|
||||||
if !done {
|
|
||||||
active, err := cs.ListStatuses(ctx, "")
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("active check failed: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// update status of active entries!
|
|
||||||
for _, active := range active {
|
|
||||||
statuses[active.Ref] = statusInfo{
|
|
||||||
Ref: active.Ref,
|
|
||||||
Status: "downloading",
|
|
||||||
Offset: active.Offset,
|
|
||||||
Total: active.Total,
|
|
||||||
StartedAt: active.StartedAt,
|
|
||||||
UpdatedAt: active.UpdatedAt,
|
|
||||||
}
|
|
||||||
activeSeen[active.Ref] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err := updateNonActive(ctx, ongoing, cs, statuses, &keys, activeSeen, &done, start)
|
|
||||||
if err != nil {
|
|
||||||
continue outer
|
|
||||||
}
|
|
||||||
|
|
||||||
var ordered []statusInfo
|
|
||||||
for _, key := range keys {
|
|
||||||
ordered = append(ordered, statuses[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, si := range ordered {
|
|
||||||
jm := si.JSONMessage()
|
|
||||||
err := enc.Encode(jm)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("failed to encode progress message: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if done {
|
|
||||||
out.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
done = true // allow ui to update once more
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateNonActive(ctx context.Context, ongoing *jobs, cs content.Store, statuses map[string]statusInfo, keys *[]string, activeSeen map[string]struct{}, done *bool, start time.Time) error {
|
|
||||||
for _, j := range ongoing.jobs() {
|
|
||||||
key := remotes.MakeRefKey(ctx, j)
|
|
||||||
*keys = append(*keys, key)
|
|
||||||
if _, ok := activeSeen[key]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
status, ok := statuses[key]
|
|
||||||
if !*done && (!ok || status.Status == "downloading") {
|
|
||||||
info, err := cs.Info(ctx, j.Digest)
|
|
||||||
if err != nil {
|
|
||||||
if !errdefs.IsNotFound(err) {
|
|
||||||
logrus.Debugf("failed to get content info: %s", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
statuses[key] = statusInfo{
|
|
||||||
Ref: key,
|
|
||||||
Status: "waiting",
|
|
||||||
}
|
|
||||||
} else if info.CreatedAt.After(start) {
|
|
||||||
statuses[key] = statusInfo{
|
|
||||||
Ref: key,
|
|
||||||
Status: "done",
|
|
||||||
Offset: info.Size,
|
|
||||||
Total: info.Size,
|
|
||||||
UpdatedAt: info.CreatedAt,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
statuses[key] = statusInfo{
|
|
||||||
Ref: key,
|
|
||||||
Status: "exists",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if *done {
|
|
||||||
if ok {
|
|
||||||
if status.Status != "done" && status.Status != "exists" {
|
|
||||||
status.Status = "done"
|
|
||||||
statuses[key] = status
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
statuses[key] = statusInfo{
|
|
||||||
Ref: key,
|
|
||||||
Status: "done",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type jobs struct {
|
|
||||||
name string
|
|
||||||
added map[digest.Digest]struct{}
|
|
||||||
descs []ocispec.Descriptor
|
|
||||||
mu sync.Mutex
|
|
||||||
resolved bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newJobs(name string) *jobs {
|
|
||||||
return &jobs{
|
|
||||||
name: name,
|
|
||||||
added: map[digest.Digest]struct{}{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *jobs) add(desc ocispec.Descriptor) {
|
|
||||||
j.mu.Lock()
|
|
||||||
defer j.mu.Unlock()
|
|
||||||
j.resolved = true
|
|
||||||
|
|
||||||
if _, ok := j.added[desc.Digest]; ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
j.descs = append(j.descs, desc)
|
|
||||||
j.added[desc.Digest] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *jobs) jobs() []ocispec.Descriptor {
|
|
||||||
j.mu.Lock()
|
|
||||||
defer j.mu.Unlock()
|
|
||||||
|
|
||||||
var descs []ocispec.Descriptor
|
|
||||||
return append(descs, j.descs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *jobs) isResolved() bool {
|
|
||||||
j.mu.Lock()
|
|
||||||
defer j.mu.Unlock()
|
|
||||||
return j.resolved
|
|
||||||
}
|
|
||||||
|
|
||||||
// statusInfo holds the status info for an upload or download
|
|
||||||
type statusInfo struct {
|
|
||||||
Ref string
|
|
||||||
Status string
|
|
||||||
Offset int64
|
|
||||||
Total int64
|
|
||||||
StartedAt time.Time
|
|
||||||
UpdatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s statusInfo) JSONMessage() jsonmessage.JSONMessage {
|
|
||||||
// Shorten the ID to use up less width on the display
|
|
||||||
id := s.Ref
|
|
||||||
if strings.Contains(id, ":") {
|
|
||||||
split := strings.SplitN(id, ":", 2)
|
|
||||||
id = split[1]
|
|
||||||
}
|
|
||||||
id = fmt.Sprintf("%.12s", id)
|
|
||||||
|
|
||||||
return jsonmessage.JSONMessage{
|
|
||||||
ID: id,
|
|
||||||
Status: s.Status,
|
|
||||||
Progress: &jsonmessage.JSONProgress{
|
|
||||||
Current: s.Offset,
|
|
||||||
Total: s.Total,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,49 +0,0 @@
|
||||||
package containerizedengine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/containerd/containerd/containers"
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
containerdSockPath = "/run/containerd/containerd.sock"
|
|
||||||
engineNamespace = "com.docker"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrEngineAlreadyPresent returned when engine already present and should not be
|
|
||||||
ErrEngineAlreadyPresent = errors.New("engine already present, use the update command to change versions")
|
|
||||||
|
|
||||||
// ErrEngineNotPresent returned when the engine is not present and should be
|
|
||||||
ErrEngineNotPresent = errors.New("engine not present")
|
|
||||||
|
|
||||||
// ErrMalformedConfigFileParam returned if the engine config file parameter is malformed
|
|
||||||
ErrMalformedConfigFileParam = errors.New("malformed --config-file param on engine")
|
|
||||||
|
|
||||||
// ErrEngineConfigLookupFailure returned if unable to lookup existing engine configuration
|
|
||||||
ErrEngineConfigLookupFailure = errors.New("unable to lookup existing engine configuration")
|
|
||||||
|
|
||||||
// ErrEngineShutdownTimeout returned if the engine failed to shutdown in time
|
|
||||||
ErrEngineShutdownTimeout = errors.New("timeout waiting for engine to exit")
|
|
||||||
)
|
|
||||||
|
|
||||||
type baseClient struct {
|
|
||||||
cclient containerdClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// containerdClient abstracts the containerd client to aid in testability
|
|
||||||
type containerdClient interface {
|
|
||||||
Containers(ctx context.Context, filters ...string) ([]containerd.Container, error)
|
|
||||||
NewContainer(ctx context.Context, id string, opts ...containerd.NewContainerOpts) (containerd.Container, error)
|
|
||||||
Pull(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error)
|
|
||||||
GetImage(ctx context.Context, ref string) (containerd.Image, error)
|
|
||||||
Close() error
|
|
||||||
ContentStore() content.Store
|
|
||||||
ContainerService() containers.Store
|
|
||||||
Install(context.Context, containerd.Image, ...containerd.InstallOpts) error
|
|
||||||
Version(ctx context.Context) (containerd.Version, error)
|
|
||||||
}
|
|
|
@ -1,183 +0,0 @@
|
||||||
package containerizedengine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/containerd/images"
|
|
||||||
"github.com/containerd/containerd/namespaces"
|
|
||||||
"github.com/docker/cli/internal/versions"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
ver "github.com/hashicorp/go-version"
|
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ActivateEngine will switch the image from the CE to EE image
|
|
||||||
func (c *baseClient) ActivateEngine(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig) error {
|
|
||||||
|
|
||||||
// If the user didn't specify an image, determine the correct enterprise image to use
|
|
||||||
if opts.EngineImage == "" {
|
|
||||||
localMetadata, err := versions.GetCurrentRuntimeMetadata(opts.RuntimeMetadataDir)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "unable to determine the installed engine version. Specify which engine image to update with --engine-image")
|
|
||||||
}
|
|
||||||
|
|
||||||
engineImage := localMetadata.EngineImage
|
|
||||||
if engineImage == clitypes.EnterpriseEngineImage || engineImage == clitypes.CommunityEngineImage {
|
|
||||||
opts.EngineImage = clitypes.EnterpriseEngineImage
|
|
||||||
} else {
|
|
||||||
// Chop off the standard prefix and retain any trailing OS specific image details
|
|
||||||
// e.g., engine-community-dm -> engine-enterprise-dm
|
|
||||||
engineImage = strings.TrimPrefix(engineImage, clitypes.EnterpriseEngineImage)
|
|
||||||
engineImage = strings.TrimPrefix(engineImage, clitypes.CommunityEngineImage)
|
|
||||||
opts.EngineImage = clitypes.EnterpriseEngineImage + engineImage
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
|
||||||
return c.DoUpdate(ctx, opts, out, authConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoUpdate performs the underlying engine update
|
|
||||||
func (c *baseClient) DoUpdate(ctx context.Context, opts clitypes.EngineInitOptions, out clitypes.OutStream,
|
|
||||||
authConfig *types.AuthConfig) error {
|
|
||||||
|
|
||||||
ctx = namespaces.WithNamespace(ctx, engineNamespace)
|
|
||||||
if opts.EngineVersion == "" {
|
|
||||||
// TODO - Future enhancement: This could be improved to be
|
|
||||||
// smart about figuring out the latest patch rev for the
|
|
||||||
// current engine version and automatically apply it so users
|
|
||||||
// could stay in sync by simply having a scheduled
|
|
||||||
// `docker engine update`
|
|
||||||
return fmt.Errorf("pick the version you want to update to with --version")
|
|
||||||
}
|
|
||||||
var localMetadata *clitypes.RuntimeMetadata
|
|
||||||
if opts.EngineImage == "" {
|
|
||||||
var err error
|
|
||||||
localMetadata, err = versions.GetCurrentRuntimeMetadata(opts.RuntimeMetadataDir)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "unable to determine the installed engine version. Specify which engine image to update with --engine-image set to 'engine-community' or 'engine-enterprise'")
|
|
||||||
}
|
|
||||||
opts.EngineImage = localMetadata.EngineImage
|
|
||||||
}
|
|
||||||
|
|
||||||
imageName := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
|
||||||
|
|
||||||
// Look for desired image
|
|
||||||
image, err := c.cclient.GetImage(ctx, imageName)
|
|
||||||
if err != nil {
|
|
||||||
if errdefs.IsNotFound(err) {
|
|
||||||
image, err = c.pullWithAuth(ctx, imageName, out, authConfig)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "unable to pull image %s", imageName)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return errors.Wrapf(err, "unable to check for image %s", imageName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we're safe to proceed
|
|
||||||
newMetadata, err := c.PreflightCheck(ctx, image)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if localMetadata != nil {
|
|
||||||
if localMetadata.Platform != newMetadata.Platform {
|
|
||||||
fmt.Fprintf(out, "\nNotice: you have switched to \"%s\". Refer to %s for update instructions.\n\n", newMetadata.Platform, getReleaseNotesURL(imageName))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.cclient.Install(ctx, image, containerd.WithInstallReplace, containerd.WithInstallPath("/usr")); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return versions.WriteRuntimeMetadata(opts.RuntimeMetadataDir, newMetadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreflightCheck verifies the specified image is compatible with the local system before proceeding to update/activate
|
|
||||||
// If things look good, the RuntimeMetadata for the new image is returned and can be written out to the host
|
|
||||||
func (c *baseClient) PreflightCheck(ctx context.Context, image containerd.Image) (*clitypes.RuntimeMetadata, error) {
|
|
||||||
var metadata clitypes.RuntimeMetadata
|
|
||||||
ic, err := image.Config(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
ociimage v1.Image
|
|
||||||
config v1.ImageConfig
|
|
||||||
)
|
|
||||||
switch ic.MediaType {
|
|
||||||
case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
|
|
||||||
p, err := content.ReadBlob(ctx, image.ContentStore(), ic)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(p, &ociimage); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config = ociimage.Config
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown image %s config media type %s", image.Name(), ic.MediaType)
|
|
||||||
}
|
|
||||||
|
|
||||||
metadataString, ok := config.Labels["com.docker."+clitypes.RuntimeMetadataName]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("image %s does not contain runtime metadata label %s", image.Name(), clitypes.RuntimeMetadataName)
|
|
||||||
}
|
|
||||||
err = json.Unmarshal([]byte(metadataString), &metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "malformed runtime metadata file in %s", image.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Current CLI only supports host install runtime
|
|
||||||
if metadata.Runtime != "host_install" {
|
|
||||||
return nil, fmt.Errorf("unsupported daemon image: %s\nConsult the release notes at %s for upgrade instructions", metadata.Runtime, getReleaseNotesURL(image.Name()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify local containerd is new enough
|
|
||||||
localVersion, err := c.cclient.Version(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if metadata.ContainerdMinVersion != "" {
|
|
||||||
lv, err := ver.NewVersion(localVersion.Version)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mv, err := ver.NewVersion(metadata.ContainerdMinVersion)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if lv.LessThan(mv) {
|
|
||||||
return nil, fmt.Errorf("local containerd is too old: %s - this engine version requires %s or newer.\nConsult the release notes at %s for upgrade instructions",
|
|
||||||
localVersion.Version, metadata.ContainerdMinVersion, getReleaseNotesURL(image.Name()))
|
|
||||||
}
|
|
||||||
} // If omitted on metadata, no hard dependency on containerd version beyond 18.09 baseline
|
|
||||||
|
|
||||||
// All checks look OK, proceed with update
|
|
||||||
return &metadata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getReleaseNotesURL returns a release notes url
|
|
||||||
// If the image name does not contain a version tag, the base release notes URL is returned
|
|
||||||
func getReleaseNotesURL(imageName string) string {
|
|
||||||
versionTag := ""
|
|
||||||
distributionRef, err := reference.ParseNormalizedNamed(imageName)
|
|
||||||
if err == nil {
|
|
||||||
taggedRef, ok := distributionRef.(reference.NamedTagged)
|
|
||||||
if ok {
|
|
||||||
versionTag = taggedRef.Tag()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, versionTag)
|
|
||||||
}
|
|
|
@ -1,300 +0,0 @@
|
||||||
package containerizedengine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/containerd/containerd/cio"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/docker/cli/cli/streams"
|
|
||||||
"github.com/docker/cli/internal/versions"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestActivateImagePermutations(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
lookedup := "not called yet"
|
|
||||||
expectedError := fmt.Errorf("expected error")
|
|
||||||
client := baseClient{
|
|
||||||
cclient: &fakeContainerdClient{
|
|
||||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
|
||||||
lookedup = ref
|
|
||||||
return nil, expectedError
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
opts := clitypes.EngineInitOptions{
|
|
||||||
EngineVersion: "engineversiongoeshere",
|
|
||||||
RegistryPrefix: "registryprefixgoeshere",
|
|
||||||
ConfigFile: "/tmp/configfilegoeshere",
|
|
||||||
RuntimeMetadataDir: tmpdir,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.ActivateEngine(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, expectedError.Error())
|
|
||||||
assert.Equal(t, lookedup, fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, clitypes.EnterpriseEngineImage, opts.EngineVersion))
|
|
||||||
|
|
||||||
metadata = clitypes.RuntimeMetadata{EngineImage: clitypes.CommunityEngineImage}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
err = client.ActivateEngine(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, expectedError.Error())
|
|
||||||
assert.Equal(t, lookedup, fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, clitypes.EnterpriseEngineImage, opts.EngineVersion))
|
|
||||||
|
|
||||||
metadata = clitypes.RuntimeMetadata{EngineImage: clitypes.CommunityEngineImage + "-dm"}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
err = client.ActivateEngine(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, expectedError.Error())
|
|
||||||
assert.Equal(t, lookedup, fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, clitypes.EnterpriseEngineImage+"-dm", opts.EngineVersion))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestActivateConfigFailure(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
registryPrefix := "registryprefixgoeshere"
|
|
||||||
image := &fakeImage{
|
|
||||||
nameFunc: func() string {
|
|
||||||
return registryPrefix + "/" + clitypes.EnterpriseEngineImage + ":engineversion"
|
|
||||||
},
|
|
||||||
configFunc: func(ctx context.Context) (ocispec.Descriptor, error) {
|
|
||||||
return ocispec.Descriptor{}, fmt.Errorf("config lookup failure")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
container := &fakeContainer{
|
|
||||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
|
||||||
return image, nil
|
|
||||||
},
|
|
||||||
taskFunc: func(context.Context, cio.Attach) (containerd.Task, error) {
|
|
||||||
return nil, errdefs.ErrNotFound
|
|
||||||
},
|
|
||||||
labelsFunc: func(context.Context) (map[string]string, error) {
|
|
||||||
return map[string]string{}, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
client := baseClient{
|
|
||||||
cclient: &fakeContainerdClient{
|
|
||||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
|
||||||
return []containerd.Container{container}, nil
|
|
||||||
},
|
|
||||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
|
||||||
return image, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
tmpdir, err := ioutil.TempDir("", "engindir")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.CommunityEngineImage}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
opts := clitypes.EngineInitOptions{
|
|
||||||
EngineVersion: "engineversiongoeshere",
|
|
||||||
RegistryPrefix: "registryprefixgoeshere",
|
|
||||||
ConfigFile: "/tmp/configfilegoeshere",
|
|
||||||
EngineImage: clitypes.EnterpriseEngineImage,
|
|
||||||
RuntimeMetadataDir: tmpdir,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.ActivateEngine(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, "config lookup failure")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestActivateDoUpdateFail(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
registryPrefix := "registryprefixgoeshere"
|
|
||||||
image := &fakeImage{
|
|
||||||
nameFunc: func() string {
|
|
||||||
return registryPrefix + "/ce-engine:engineversion"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
container := &fakeContainer{
|
|
||||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
|
||||||
return image, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
client := baseClient{
|
|
||||||
cclient: &fakeContainerdClient{
|
|
||||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
|
||||||
return []containerd.Container{container}, nil
|
|
||||||
},
|
|
||||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
|
||||||
return nil, fmt.Errorf("something went wrong")
|
|
||||||
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.CommunityEngineImage}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
opts := clitypes.EngineInitOptions{
|
|
||||||
EngineVersion: "engineversiongoeshere",
|
|
||||||
RegistryPrefix: "registryprefixgoeshere",
|
|
||||||
ConfigFile: "/tmp/configfilegoeshere",
|
|
||||||
EngineImage: clitypes.EnterpriseEngineImage,
|
|
||||||
RuntimeMetadataDir: tmpdir,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.ActivateEngine(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, "check for image")
|
|
||||||
assert.ErrorContains(t, err, "something went wrong")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDoUpdateNoVersion(t *testing.T) {
|
|
||||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
ctx := context.Background()
|
|
||||||
opts := clitypes.EngineInitOptions{
|
|
||||||
EngineVersion: "",
|
|
||||||
RegistryPrefix: "registryprefixgoeshere",
|
|
||||||
ConfigFile: "/tmp/configfilegoeshere",
|
|
||||||
EngineImage: clitypes.EnterpriseEngineImage,
|
|
||||||
RuntimeMetadataDir: tmpdir,
|
|
||||||
}
|
|
||||||
|
|
||||||
client := baseClient{}
|
|
||||||
err = client.DoUpdate(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, "pick the version you")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDoUpdateImageMiscError(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
opts := clitypes.EngineInitOptions{
|
|
||||||
EngineVersion: "engineversiongoeshere",
|
|
||||||
RegistryPrefix: "registryprefixgoeshere",
|
|
||||||
ConfigFile: "/tmp/configfilegoeshere",
|
|
||||||
EngineImage: "testnamegoeshere",
|
|
||||||
RuntimeMetadataDir: tmpdir,
|
|
||||||
}
|
|
||||||
client := baseClient{
|
|
||||||
cclient: &fakeContainerdClient{
|
|
||||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
|
||||||
return nil, fmt.Errorf("something went wrong")
|
|
||||||
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.DoUpdate(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, "check for image")
|
|
||||||
assert.ErrorContains(t, err, "something went wrong")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDoUpdatePullFail(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
opts := clitypes.EngineInitOptions{
|
|
||||||
EngineVersion: "engineversiongoeshere",
|
|
||||||
RegistryPrefix: "registryprefixgoeshere",
|
|
||||||
ConfigFile: "/tmp/configfilegoeshere",
|
|
||||||
EngineImage: "testnamegoeshere",
|
|
||||||
RuntimeMetadataDir: tmpdir,
|
|
||||||
}
|
|
||||||
client := baseClient{
|
|
||||||
cclient: &fakeContainerdClient{
|
|
||||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
|
||||||
return nil, errdefs.ErrNotFound
|
|
||||||
|
|
||||||
},
|
|
||||||
pullFunc: func(ctx context.Context, ref string, opts ...containerd.RemoteOpt) (containerd.Image, error) {
|
|
||||||
return nil, fmt.Errorf("pull failure")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.DoUpdate(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, "unable to pull")
|
|
||||||
assert.ErrorContains(t, err, "pull failure")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestActivateDoUpdateVerifyImageName(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
registryPrefix := "registryprefixgoeshere"
|
|
||||||
image := &fakeImage{
|
|
||||||
nameFunc: func() string {
|
|
||||||
return registryPrefix + "/ce-engine:engineversion"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
container := &fakeContainer{
|
|
||||||
imageFunc: func(context.Context) (containerd.Image, error) {
|
|
||||||
return image, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
requestedImage := "unset"
|
|
||||||
client := baseClient{
|
|
||||||
cclient: &fakeContainerdClient{
|
|
||||||
containersFunc: func(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
|
||||||
return []containerd.Container{container}, nil
|
|
||||||
},
|
|
||||||
getImageFunc: func(ctx context.Context, ref string) (containerd.Image, error) {
|
|
||||||
requestedImage = ref
|
|
||||||
return nil, fmt.Errorf("something went wrong")
|
|
||||||
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
tmpdir, err := ioutil.TempDir("", "enginedir")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
metadata := clitypes.RuntimeMetadata{EngineImage: clitypes.EnterpriseEngineImage}
|
|
||||||
err = versions.WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
opts := clitypes.EngineInitOptions{
|
|
||||||
EngineVersion: "engineversiongoeshere",
|
|
||||||
RegistryPrefix: "registryprefixgoeshere",
|
|
||||||
EngineImage: "testnamegoeshere",
|
|
||||||
ConfigFile: "/tmp/configfilegoeshere",
|
|
||||||
RuntimeMetadataDir: tmpdir,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.ActivateEngine(ctx, opts, streams.NewOut(&bytes.Buffer{}), &types.AuthConfig{})
|
|
||||||
assert.ErrorContains(t, err, "check for image")
|
|
||||||
assert.ErrorContains(t, err, "something went wrong")
|
|
||||||
expectedImage := fmt.Sprintf("%s/%s:%s", opts.RegistryPrefix, opts.EngineImage, opts.EngineVersion)
|
|
||||||
assert.Assert(t, requestedImage == expectedImage, "%s != %s", requestedImage, expectedImage)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetReleaseNotesURL(t *testing.T) {
|
|
||||||
imageName := "bogus image name #$%&@!"
|
|
||||||
url := getReleaseNotesURL(imageName)
|
|
||||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/")
|
|
||||||
imageName = "foo.bar/valid/repowithouttag"
|
|
||||||
url = getReleaseNotesURL(imageName)
|
|
||||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/")
|
|
||||||
imageName = "foo.bar/valid/repowithouttag:tag123"
|
|
||||||
url = getReleaseNotesURL(imageName)
|
|
||||||
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/tag123")
|
|
||||||
}
|
|
|
@ -1,111 +0,0 @@
|
||||||
package licenseutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/docker/licensing"
|
|
||||||
"github.com/docker/licensing/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
fakeLicensingClient struct {
|
|
||||||
loginViaAuthFunc func(ctx context.Context, username, password string) (authToken string, err error)
|
|
||||||
getHubUserOrgsFunc func(ctx context.Context, authToken string) (orgs []model.Org, err error)
|
|
||||||
getHubUserByNameFunc func(ctx context.Context, username string) (user *model.User, err error)
|
|
||||||
verifyLicenseFunc func(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error)
|
|
||||||
generateNewTrialSubscriptionFunc func(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error)
|
|
||||||
listSubscriptionsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error)
|
|
||||||
listSubscriptionsDetailsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error)
|
|
||||||
downloadLicenseFromHubFunc func(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error)
|
|
||||||
parseLicenseFunc func(license []byte) (parsedLicense *model.IssuedLicense, err error)
|
|
||||||
storeLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error
|
|
||||||
loadLocalLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error)
|
|
||||||
summarizeLicenseFunc func(*model.CheckResponse) *model.Subscription
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) {
|
|
||||||
if c.loginViaAuthFunc != nil {
|
|
||||||
return c.loginViaAuthFunc(ctx, username, password)
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) {
|
|
||||||
if c.getHubUserOrgsFunc != nil {
|
|
||||||
return c.getHubUserOrgsFunc(ctx, authToken)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) {
|
|
||||||
if c.getHubUserByNameFunc != nil {
|
|
||||||
return c.getHubUserByNameFunc(ctx, username)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
|
|
||||||
if c.verifyLicenseFunc != nil {
|
|
||||||
return c.verifyLicenseFunc(ctx, license)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
|
|
||||||
if c.generateNewTrialSubscriptionFunc != nil {
|
|
||||||
return c.generateNewTrialSubscriptionFunc(ctx, authToken, dockerID)
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
|
||||||
if c.listSubscriptionsFunc != nil {
|
|
||||||
return c.listSubscriptionsFunc(ctx, authToken, dockerID)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) {
|
|
||||||
if c.listSubscriptionsDetailsFunc != nil {
|
|
||||||
return c.listSubscriptionsDetailsFunc(ctx, authToken, dockerID)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) {
|
|
||||||
if c.downloadLicenseFromHubFunc != nil {
|
|
||||||
return c.downloadLicenseFromHubFunc(ctx, authToken, subscriptionID)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) {
|
|
||||||
if c.parseLicenseFunc != nil {
|
|
||||||
return c.parseLicenseFunc(license)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error {
|
|
||||||
if c.storeLicenseFunc != nil {
|
|
||||||
return c.storeLicenseFunc(ctx, dclnt, licenses, localRootDir)
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) {
|
|
||||||
if c.loadLocalLicenseFunc != nil {
|
|
||||||
return c.loadLocalLicenseFunc(ctx, dclnt)
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakeLicensingClient) SummarizeLicense(cr *model.CheckResponse) *model.Subscription {
|
|
||||||
if c.summarizeLicenseFunc != nil {
|
|
||||||
return c.summarizeLicenseFunc(cr)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
package licenseutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/licensing/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// licensingDefaultBaseURI is the default license server base URL
|
|
||||||
licensingDefaultBaseURI = "https://store.docker.com"
|
|
||||||
|
|
||||||
// licensingPublicKey is the official public license key for store.docker.com
|
|
||||||
// nolint: lll
|
|
||||||
licensingPublicKeys = []string{
|
|
||||||
"LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0Ka2lkOiBKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVAoKTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUF5ZEl5K2xVN283UGNlWSs0K3MrQwpRNU9FZ0N5RjhDeEljUUlXdUs4NHBJaVpjaVk2NzMweUNZbndMU0tUbHcrVTZVQy9RUmVXUmlvTU5ORTVEczVUCllFWGJHRzZvbG0ycWRXYkJ3Y0NnKzJVVUgvT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTEKSXYrbXE3bHA1blEvUnhyVDBLWlJBUVRZTEVNRWZHd20zaE1PL2dlTFBTK2hnS1B0SUhsa2c2L1djb3hUR29LUAo3OWQvd2FIWXhHTmw3V2hTbmVpQlN4YnBiUUFLazIxbGc3OThYYjd2WnlFQVRETXJSUjlNZUU2QWRqNUhKcFkzCkNveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTyt3YTJLTjhVd2dJSW0waTVJMXVXOUdrcTQKempCeTV6aGdxdVVYYkc5YldQQU9ZcnE1UWE4MUR4R2NCbEp5SFlBcCtERFBFOVRHZzR6WW1YakpueFpxSEVkdQpHcWRldlo4WE1JMHVrZmtHSUkxNHdVT2lNSUlJclhsRWNCZi80Nkk4Z1FXRHp4eWNaZS9KR1grTEF1YXlYcnlyClVGZWhWTlVkWlVsOXdYTmFKQitrYUNxejVRd2FSOTNzR3crUVNmdEQwTnZMZTdDeU9IK0U2dmc2U3QvTmVUdmcKdjhZbmhDaVhJbFo4SE9mSXdOZTd0RUYvVWN6NU9iUHlrbTN0eWxyTlVqdDBWeUFtdHRhY1ZJMmlHaWhjVVBybQprNGxWSVo3VkQvTFNXK2k3eW9TdXJ0cHNQWGNlMnBLRElvMzBsSkdoTy8zS1VtbDJTVVpDcXpKMXlFbUtweXNICjVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVQ0F3RUFBUT09Ci0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo=",
|
|
||||||
"LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0Ka2V5SUQ6IFpGSjI6Q1c1Szo1M0tSOlo0NUg6NlpVQzpJNFhFOlpUS1A6TVQ1UjpQWFpMOlNTNE46RjQ0NDo0U1Q0CmtpZDogWkZKMjpDVzVLOjUzS1I6WjQ1SDo2WlVDOkk0WEU6WlRLUDpNVDVSOlBYWkw6U1M0TjpGNDQ0OjRTVDQKCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBd1FhVVRaUFhQZnloZFZVdkJkbXkKZlViYXZYL1pmdkNkMCtGREdNb0ZQazlUTlE1aVZPSkhaUVVNa2N2d2QrdVdaV3dvdWtEUGhZaWxEQTZ6Y3krQQowdERFQkF0Nmc5TGM3UFNXU1BZMTJpbWxnbC85RmJzQnZsSjFRc1RJNGlPUjQ1K0FsMHMxMWhaNG0wR1k4UXQ4CnpFN0RYU1BNUzVRTHlUcHlEemZkQURVcWFGRVcxNTVOQ3BaKzZ6N0lHZCt0V2xjalB3QzQwb3ppbWM1bXVUSWgKb2w1WG1hUFREYk45VzhDWGQ1ZWdUeEExZU43YTA3MWR0R1RialFMUEhvb0QxRURsbitvZjZ2VGFReUphWWJmQgpNRHF2NFdraG9QSzJPWWZ5OXVLR1lTNS9ieHIzUWVTUGRoWVFrQzl2YVZsRUtuTjFZaER6VXZVZGR1c3lyRUdICjd3SURBUUFCCi0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo=",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
|
|
||||||
// LicenseDisplay stores license details for display
|
|
||||||
LicenseDisplay struct {
|
|
||||||
model.Subscription
|
|
||||||
Num int
|
|
||||||
Owner string
|
|
||||||
ComponentsString string
|
|
||||||
}
|
|
||||||
)
|
|
|
@ -1,202 +0,0 @@
|
||||||
package licenseutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/licensing"
|
|
||||||
"github.com/docker/licensing/model"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HubUser wraps a licensing client and holds key information
|
|
||||||
// for a user to avoid multiple lookups
|
|
||||||
type HubUser struct {
|
|
||||||
Client licensing.Client
|
|
||||||
token string
|
|
||||||
User model.User
|
|
||||||
Orgs []model.Org
|
|
||||||
}
|
|
||||||
|
|
||||||
//GetOrgByID finds the org by the ID in the users list of orgs
|
|
||||||
func (u HubUser) GetOrgByID(orgID string) (model.Org, error) {
|
|
||||||
for _, org := range u.Orgs {
|
|
||||||
if org.ID == orgID {
|
|
||||||
return org, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return model.Org{}, fmt.Errorf("org %s not found", orgID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getClient() (licensing.Client, error) {
|
|
||||||
baseURI, err := url.Parse(licensingDefaultBaseURI)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return licensing.New(&licensing.Config{
|
|
||||||
BaseURI: *baseURI,
|
|
||||||
HTTPClient: &http.Client{},
|
|
||||||
PublicKeys: licensingPublicKeys,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Login to the license server and return a client that can be used to look up and download license files or generate new trial licenses
|
|
||||||
func Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {
|
|
||||||
lclient, err := getClient()
|
|
||||||
if err != nil {
|
|
||||||
return HubUser{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// For licensing we know they must have a valid login session
|
|
||||||
if authConfig.Username == "" {
|
|
||||||
return HubUser{}, fmt.Errorf("you must be logged in to access licenses. Please use 'docker login' then try again")
|
|
||||||
}
|
|
||||||
token, err := lclient.LoginViaAuth(ctx, authConfig.Username, authConfig.Password)
|
|
||||||
if err != nil {
|
|
||||||
return HubUser{}, err
|
|
||||||
}
|
|
||||||
user, err := lclient.GetHubUserByName(ctx, authConfig.Username)
|
|
||||||
if err != nil {
|
|
||||||
return HubUser{}, err
|
|
||||||
}
|
|
||||||
orgs, err := lclient.GetHubUserOrgs(ctx, token)
|
|
||||||
if err != nil {
|
|
||||||
return HubUser{}, err
|
|
||||||
}
|
|
||||||
return HubUser{
|
|
||||||
Client: lclient,
|
|
||||||
token: token,
|
|
||||||
User: *user,
|
|
||||||
Orgs: orgs,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAvailableLicenses finds all available licenses for a given account and their orgs
|
|
||||||
func (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, error) {
|
|
||||||
subs, err := u.Client.ListSubscriptions(ctx, u.token, u.User.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, org := range u.Orgs {
|
|
||||||
orgSub, err := u.Client.ListSubscriptions(ctx, u.token, org.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
subs = append(subs, orgSub...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert the SubscriptionDetails to a more user-friendly type to render in the CLI
|
|
||||||
|
|
||||||
res := []LicenseDisplay{}
|
|
||||||
|
|
||||||
// Filter out expired licenses
|
|
||||||
i := 0
|
|
||||||
for _, s := range subs {
|
|
||||||
if s.State == "active" && s.Expires != nil {
|
|
||||||
owner := ""
|
|
||||||
if s.DockerID == u.User.ID {
|
|
||||||
owner = u.User.Username
|
|
||||||
} else {
|
|
||||||
ownerOrg, err := u.GetOrgByID(s.DockerID)
|
|
||||||
if err == nil {
|
|
||||||
owner = ownerOrg.Orgname
|
|
||||||
} else {
|
|
||||||
owner = "unknown"
|
|
||||||
logrus.Debugf("Unable to lookup org ID %s: %s", s.DockerID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
comps := []string{}
|
|
||||||
for _, pc := range s.PricingComponents {
|
|
||||||
comps = append(comps, fmt.Sprintf("%s:%d", pc.Name, pc.Value))
|
|
||||||
}
|
|
||||||
res = append(res, LicenseDisplay{
|
|
||||||
Subscription: *s,
|
|
||||||
Num: i,
|
|
||||||
Owner: owner,
|
|
||||||
ComponentsString: strings.Join(comps, ","),
|
|
||||||
})
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateTrialLicense will generate a new trial license for the specified user or org
|
|
||||||
func (u HubUser) GenerateTrialLicense(ctx context.Context, targetID string) (*model.IssuedLicense, error) {
|
|
||||||
subID, err := u.Client.GenerateNewTrialSubscription(ctx, u.token, targetID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return u.Client.DownloadLicenseFromHub(ctx, u.token, subID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetIssuedLicense will download a license by ID
|
|
||||||
func (u HubUser) GetIssuedLicense(ctx context.Context, ID string) (*model.IssuedLicense, error) {
|
|
||||||
return u.Client.DownloadLicenseFromHub(ctx, u.token, ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadLocalIssuedLicense will load a local license file
|
|
||||||
func LoadLocalIssuedLicense(ctx context.Context, filename string) (*model.IssuedLicense, error) {
|
|
||||||
lclient, err := getClient()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return doLoadLocalIssuedLicense(ctx, filename, lclient)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLicenseSummary summarizes the license for the user
|
|
||||||
func GetLicenseSummary(ctx context.Context, license model.IssuedLicense) (string, error) {
|
|
||||||
lclient, err := getClient()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
cr, err := lclient.VerifyLicense(ctx, license)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return lclient.SummarizeLicense(cr).String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func doLoadLocalIssuedLicense(ctx context.Context, filename string, lclient licensing.Client) (*model.IssuedLicense, error) {
|
|
||||||
var license model.IssuedLicense
|
|
||||||
data, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// The file may contain a leading BOM, which will choke the
|
|
||||||
// json deserializer.
|
|
||||||
data = bytes.TrimPrefix(data, []byte("\xef\xbb\xbf"))
|
|
||||||
|
|
||||||
err = json.Unmarshal(data, &license)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "malformed license file")
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = lclient.VerifyLicense(ctx, license)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &license, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyLicense will store a license on the local system
|
|
||||||
func ApplyLicense(ctx context.Context, dclient licensing.WrappedDockerClient, license *model.IssuedLicense) error {
|
|
||||||
info, err := dclient.Info(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return licensing.StoreLicense(ctx, dclient, license, info.DockerRootDir)
|
|
||||||
}
|
|
|
@ -1,234 +0,0 @@
|
||||||
package licenseutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/licensing/model"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoginNoAuth(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
_, err := Login(ctx, &types.AuthConfig{})
|
|
||||||
|
|
||||||
assert.ErrorContains(t, err, "must be logged in")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetOrgByID(t *testing.T) {
|
|
||||||
orgs := []model.Org{
|
|
||||||
{ID: "id1"},
|
|
||||||
{ID: "id2"},
|
|
||||||
}
|
|
||||||
u := HubUser{
|
|
||||||
Orgs: orgs,
|
|
||||||
}
|
|
||||||
o, err := u.GetOrgByID("id1")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Assert(t, o.ID == "id1")
|
|
||||||
o, err = u.GetOrgByID("id2")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Assert(t, o.ID == "id2")
|
|
||||||
o, err = u.GetOrgByID("id3")
|
|
||||||
assert.ErrorContains(t, err, "not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetAvailableLicensesListFail(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
user := HubUser{
|
|
||||||
Client: &fakeLicensingClient{
|
|
||||||
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
|
||||||
return nil, fmt.Errorf("list subscriptions error")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err := user.GetAvailableLicenses(ctx)
|
|
||||||
assert.ErrorContains(t, err, "list subscriptions error")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetAvailableLicensesOrgFail(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
user := HubUser{
|
|
||||||
Orgs: []model.Org{
|
|
||||||
{ID: "orgid"},
|
|
||||||
},
|
|
||||||
Client: &fakeLicensingClient{
|
|
||||||
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
|
||||||
if dockerID == "orgid" {
|
|
||||||
return nil, fmt.Errorf("list subscriptions org error")
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err := user.GetAvailableLicenses(ctx)
|
|
||||||
assert.ErrorContains(t, err, "list subscriptions org error")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetAvailableLicensesHappy(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
expiration := time.Now().Add(3600 * time.Second)
|
|
||||||
user := HubUser{
|
|
||||||
User: model.User{
|
|
||||||
ID: "userid",
|
|
||||||
Username: "username",
|
|
||||||
},
|
|
||||||
Orgs: []model.Org{
|
|
||||||
{
|
|
||||||
ID: "orgid",
|
|
||||||
Orgname: "orgname",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Client: &fakeLicensingClient{
|
|
||||||
listSubscriptionsFunc: func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
|
||||||
if dockerID == "orgid" {
|
|
||||||
return []*model.Subscription{
|
|
||||||
{
|
|
||||||
State: "expired",
|
|
||||||
Expires: &expiration,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
State: "active",
|
|
||||||
DockerID: "orgid",
|
|
||||||
Expires: &expiration,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
State: "active",
|
|
||||||
DockerID: "invalidid",
|
|
||||||
Expires: &expiration,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
} else if dockerID == "userid" {
|
|
||||||
return []*model.Subscription{
|
|
||||||
{
|
|
||||||
State: "expired",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
State: "active",
|
|
||||||
DockerID: "userid",
|
|
||||||
Expires: &expiration,
|
|
||||||
PricingComponents: model.PricingComponents{
|
|
||||||
{
|
|
||||||
Name: "comp1",
|
|
||||||
Value: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "comp2",
|
|
||||||
Value: 2,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
subs, err := user.GetAvailableLicenses(ctx)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Assert(t, len(subs) == 3)
|
|
||||||
assert.Assert(t, subs[0].Owner == "username")
|
|
||||||
assert.Assert(t, subs[0].State == "active")
|
|
||||||
assert.Assert(t, subs[0].ComponentsString == "comp1:1,comp2:2")
|
|
||||||
assert.Assert(t, subs[1].Owner == "orgname")
|
|
||||||
assert.Assert(t, subs[1].State == "active")
|
|
||||||
assert.Assert(t, subs[2].Owner == "unknown")
|
|
||||||
assert.Assert(t, subs[2].State == "active")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateTrialFail(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
user := HubUser{
|
|
||||||
Client: &fakeLicensingClient{
|
|
||||||
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
|
|
||||||
return "", fmt.Errorf("generate trial failure")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
targetID := "targetidgoeshere"
|
|
||||||
_, err := user.GenerateTrialLicense(ctx, targetID)
|
|
||||||
assert.ErrorContains(t, err, "generate trial failure")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateTrialHappy(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
user := HubUser{
|
|
||||||
Client: &fakeLicensingClient{
|
|
||||||
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
|
|
||||||
return "subid", nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
targetID := "targetidgoeshere"
|
|
||||||
_, err := user.GenerateTrialLicense(ctx, targetID)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetIssuedLicense(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
user := HubUser{
|
|
||||||
Client: &fakeLicensingClient{},
|
|
||||||
}
|
|
||||||
id := "idgoeshere"
|
|
||||||
_, err := user.GetIssuedLicense(ctx, id)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadLocalIssuedLicenseNotExist(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
tmpdir, err := ioutil.TempDir("", "licensing-test")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
filename := filepath.Join(tmpdir, "subscription.lic")
|
|
||||||
_, err = LoadLocalIssuedLicense(ctx, filename)
|
|
||||||
assert.ErrorContains(t, err, "no such file")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadLocalIssuedLicenseNotJson(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
tmpdir, err := ioutil.TempDir("", "licensing-test")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
filename := filepath.Join(tmpdir, "subscription.lic")
|
|
||||||
err = ioutil.WriteFile(filename, []byte("not json"), 0644)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
_, err = LoadLocalIssuedLicense(ctx, filename)
|
|
||||||
assert.ErrorContains(t, err, "malformed license file")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadLocalIssuedLicenseNoVerify(t *testing.T) {
|
|
||||||
lclient := &fakeLicensingClient{
|
|
||||||
verifyLicenseFunc: func(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
|
|
||||||
return nil, fmt.Errorf("verification failed")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
ctx := context.Background()
|
|
||||||
tmpdir, err := ioutil.TempDir("", "licensing-test")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
filename := filepath.Join(tmpdir, "subscription.lic")
|
|
||||||
err = ioutil.WriteFile(filename, []byte("{}"), 0644)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
_, err = doLoadLocalIssuedLicense(ctx, filename, lclient)
|
|
||||||
assert.ErrorContains(t, err, "verification failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadLocalIssuedLicenseHappy(t *testing.T) {
|
|
||||||
lclient := &fakeLicensingClient{}
|
|
||||||
ctx := context.Background()
|
|
||||||
tmpdir, err := ioutil.TempDir("", "licensing-test")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
filename := filepath.Join(tmpdir, "subscription.lic")
|
|
||||||
err = ioutil.WriteFile(filename, []byte("{}"), 0644)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
_, err = doLoadLocalIssuedLicense(ctx, filename, lclient)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
package containerized
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/containers"
|
|
||||||
"github.com/containerd/containerd/oci"
|
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WithAllCapabilities enables all capabilities required to run privileged containers
|
|
||||||
func WithAllCapabilities(_ context.Context, _ oci.Client, c *containers.Container, s *specs.Spec) error {
|
|
||||||
caps := []string{
|
|
||||||
"CAP_CHOWN",
|
|
||||||
"CAP_DAC_OVERRIDE",
|
|
||||||
"CAP_DAC_READ_SEARCH",
|
|
||||||
"CAP_FOWNER",
|
|
||||||
"CAP_FSETID",
|
|
||||||
"CAP_KILL",
|
|
||||||
"CAP_SETGID",
|
|
||||||
"CAP_SETUID",
|
|
||||||
"CAP_SETPCAP",
|
|
||||||
"CAP_LINUX_IMMUTABLE",
|
|
||||||
"CAP_NET_BIND_SERVICE",
|
|
||||||
"CAP_NET_BROADCAST",
|
|
||||||
"CAP_NET_ADMIN",
|
|
||||||
"CAP_NET_RAW",
|
|
||||||
"CAP_IPC_LOCK",
|
|
||||||
"CAP_IPC_OWNER",
|
|
||||||
"CAP_SYS_MODULE",
|
|
||||||
"CAP_SYS_RAWIO",
|
|
||||||
"CAP_SYS_CHROOT",
|
|
||||||
"CAP_SYS_PTRACE",
|
|
||||||
"CAP_SYS_PACCT",
|
|
||||||
"CAP_SYS_ADMIN",
|
|
||||||
"CAP_SYS_BOOT",
|
|
||||||
"CAP_SYS_NICE",
|
|
||||||
"CAP_SYS_RESOURCE",
|
|
||||||
"CAP_SYS_TIME",
|
|
||||||
"CAP_SYS_TTY_CONFIG",
|
|
||||||
"CAP_MKNOD",
|
|
||||||
"CAP_LEASE",
|
|
||||||
"CAP_AUDIT_WRITE",
|
|
||||||
"CAP_AUDIT_CONTROL",
|
|
||||||
"CAP_SETFCAP",
|
|
||||||
"CAP_MAC_OVERRIDE",
|
|
||||||
"CAP_MAC_ADMIN",
|
|
||||||
"CAP_SYSLOG",
|
|
||||||
"CAP_WAKE_ALARM",
|
|
||||||
"CAP_BLOCK_SUSPEND",
|
|
||||||
"CAP_AUDIT_READ",
|
|
||||||
}
|
|
||||||
if s.Process.Capabilities == nil {
|
|
||||||
s.Process.Capabilities = &specs.LinuxCapabilities{}
|
|
||||||
}
|
|
||||||
s.Process.Capabilities.Bounding = caps
|
|
||||||
s.Process.Capabilities.Effective = caps
|
|
||||||
s.Process.Capabilities.Inheritable = caps
|
|
||||||
s.Process.Capabilities.Permitted = caps
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
package containerized
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/containers"
|
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestWithAllCapabilities(t *testing.T) {
|
|
||||||
c := &containers.Container{}
|
|
||||||
s := &specs.Spec{
|
|
||||||
Process: &specs.Process{},
|
|
||||||
}
|
|
||||||
ctx := context.Background()
|
|
||||||
err := WithAllCapabilities(ctx, nil, c, s)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Assert(t, len(s.Process.Capabilities.Bounding) > 0)
|
|
||||||
}
|
|
|
@ -1,74 +0,0 @@
|
||||||
package containerized
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AtomicImageUpdate will perform an update of the given container with the new image
|
|
||||||
// and verify success via the provided healthcheckFn. If the healthcheck fails, the
|
|
||||||
// container will be reverted to the prior image
|
|
||||||
func AtomicImageUpdate(ctx context.Context, container containerd.Container, image containerd.Image, healthcheckFn func() error) error {
|
|
||||||
updateCompleted := false
|
|
||||||
err := pauseAndRun(ctx, container, func() error {
|
|
||||||
if err := container.Update(ctx, WithUpgrade(image)); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to update to new image")
|
|
||||||
}
|
|
||||||
updateCompleted = true
|
|
||||||
task, err := container.Task(ctx, nil)
|
|
||||||
if err != nil {
|
|
||||||
if errdefs.IsNotFound(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.Wrap(err, "failed to lookup task")
|
|
||||||
}
|
|
||||||
return task.Kill(ctx, sigTERM)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if updateCompleted {
|
|
||||||
logrus.WithError(err).Error("failed to update, rolling back")
|
|
||||||
return rollBack(ctx, container)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := healthcheckFn(); err != nil {
|
|
||||||
logrus.WithError(err).Error("failed health check, rolling back")
|
|
||||||
return rollBack(ctx, container)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func rollBack(ctx context.Context, container containerd.Container) error {
|
|
||||||
return pauseAndRun(ctx, container, func() error {
|
|
||||||
if err := container.Update(ctx, WithRollback); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
task, err := container.Task(ctx, nil)
|
|
||||||
if err != nil {
|
|
||||||
if errdefs.IsNotFound(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.Wrap(err, "failed to lookup task")
|
|
||||||
}
|
|
||||||
return task.Kill(ctx, sigTERM)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func pauseAndRun(ctx context.Context, container containerd.Container, fn func() error) error {
|
|
||||||
task, err := container.Task(ctx, nil)
|
|
||||||
if err != nil {
|
|
||||||
if errdefs.IsNotFound(err) {
|
|
||||||
return fn()
|
|
||||||
}
|
|
||||||
return errors.Wrap(err, "failed to lookup task")
|
|
||||||
}
|
|
||||||
if err := task.Pause(ctx); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to pause task")
|
|
||||||
}
|
|
||||||
defer task.Resume(ctx)
|
|
||||||
return fn()
|
|
||||||
}
|
|
|
@ -1,12 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package containerized
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// sigTERM maps to unix.SIGTERM
|
|
||||||
sigTERM = unix.SIGTERM
|
|
||||||
)
|
|
|
@ -1,12 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package containerized
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// sigTERM all signals are ignored by containerd kill windows
|
|
||||||
sigTERM = syscall.Signal(0)
|
|
||||||
)
|
|
|
@ -1,158 +0,0 @@
|
||||||
package containerized
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/containerd/containerd/containers"
|
|
||||||
"github.com/containerd/containerd/diff/apply"
|
|
||||||
"github.com/containerd/containerd/mount"
|
|
||||||
"github.com/containerd/containerd/rootfs"
|
|
||||||
"github.com/containerd/containerd/snapshots"
|
|
||||||
"github.com/opencontainers/image-spec/identity"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
gcRoot = "containerd.io/gc.root"
|
|
||||||
timestampFormat = "01-02-2006-15:04:05"
|
|
||||||
previousRevision = "docker.com/revision.previous"
|
|
||||||
imageLabel = "docker.com/revision.image"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrNoPreviousRevision returned if the container has to previous revision
|
|
||||||
var ErrNoPreviousRevision = errors.New("no previous revision")
|
|
||||||
|
|
||||||
// WithNewSnapshot creates a new snapshot managed by containerized
|
|
||||||
func WithNewSnapshot(i containerd.Image) containerd.NewContainerOpts {
|
|
||||||
return func(ctx context.Context, client *containerd.Client, c *containers.Container) error {
|
|
||||||
if c.Snapshotter == "" {
|
|
||||||
c.Snapshotter = containerd.DefaultSnapshotter
|
|
||||||
}
|
|
||||||
r, err := create(ctx, client, i, c.ID, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.SnapshotKey = r.Key
|
|
||||||
c.Image = i.Name()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithUpgrade upgrades an existing container's image to a new one
|
|
||||||
func WithUpgrade(i containerd.Image) containerd.UpdateContainerOpts {
|
|
||||||
return func(ctx context.Context, client *containerd.Client, c *containers.Container) error {
|
|
||||||
revision, err := save(ctx, client, i, c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.Image = i.Name()
|
|
||||||
c.SnapshotKey = revision.Key
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRollback rolls back to the previous container's revision
|
|
||||||
func WithRollback(ctx context.Context, client *containerd.Client, c *containers.Container) error {
|
|
||||||
prev, err := previous(ctx, client, c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ss := client.SnapshotService(c.Snapshotter)
|
|
||||||
sInfo, err := ss.Stat(ctx, prev.Key)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
snapshotImage, ok := sInfo.Labels[imageLabel]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("snapshot %s does not have a service image label", prev.Key)
|
|
||||||
}
|
|
||||||
if snapshotImage == "" {
|
|
||||||
return fmt.Errorf("snapshot %s has an empty service image label", prev.Key)
|
|
||||||
}
|
|
||||||
c.Image = snapshotImage
|
|
||||||
c.SnapshotKey = prev.Key
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRevision(id string) *revision {
|
|
||||||
now := time.Now()
|
|
||||||
return &revision{
|
|
||||||
Timestamp: now,
|
|
||||||
Key: fmt.Sprintf("boss.io.%s.%s", id, now.Format(timestampFormat)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type revision struct {
|
|
||||||
Timestamp time.Time
|
|
||||||
Key string
|
|
||||||
mounts []mount.Mount
|
|
||||||
}
|
|
||||||
|
|
||||||
// nolint: interfacer
|
|
||||||
func create(ctx context.Context, client *containerd.Client, i containerd.Image, id string, previous string) (*revision, error) {
|
|
||||||
diffIDs, err := i.RootFS(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
parent = identity.ChainID(diffIDs).String()
|
|
||||||
r = newRevision(id)
|
|
||||||
)
|
|
||||||
labels := map[string]string{
|
|
||||||
gcRoot: r.Timestamp.Format(time.RFC3339),
|
|
||||||
imageLabel: i.Name(),
|
|
||||||
}
|
|
||||||
if previous != "" {
|
|
||||||
labels[previousRevision] = previous
|
|
||||||
}
|
|
||||||
mounts, err := client.SnapshotService(containerd.DefaultSnapshotter).Prepare(ctx, r.Key, parent, snapshots.WithLabels(labels))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r.mounts = mounts
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func save(ctx context.Context, client *containerd.Client, updatedImage containerd.Image, c *containers.Container) (*revision, error) {
|
|
||||||
snapshot, err := create(ctx, client, updatedImage, c.ID, c.SnapshotKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
service := client.SnapshotService(c.Snapshotter)
|
|
||||||
// create a diff from the existing snapshot
|
|
||||||
diff, err := rootfs.CreateDiff(ctx, c.SnapshotKey, service, client.DiffService())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
applier := apply.NewFileSystemApplier(client.ContentStore())
|
|
||||||
if _, err := applier.Apply(ctx, diff, snapshot.mounts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return snapshot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nolint: interfacer
|
|
||||||
func previous(ctx context.Context, client *containerd.Client, c *containers.Container) (*revision, error) {
|
|
||||||
service := client.SnapshotService(c.Snapshotter)
|
|
||||||
sInfo, err := service.Stat(ctx, c.SnapshotKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
key := sInfo.Labels[previousRevision]
|
|
||||||
if key == "" {
|
|
||||||
return nil, ErrNoPreviousRevision
|
|
||||||
}
|
|
||||||
parts := strings.Split(key, ".")
|
|
||||||
timestamp, err := time.Parse(timestampFormat, parts[len(parts)-1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &revision{
|
|
||||||
Timestamp: timestamp,
|
|
||||||
Key: key,
|
|
||||||
}, nil
|
|
||||||
}
|
|
|
@ -15,7 +15,6 @@ import (
|
||||||
registryclient "github.com/docker/cli/cli/registry/client"
|
registryclient "github.com/docker/cli/cli/registry/client"
|
||||||
"github.com/docker/cli/cli/streams"
|
"github.com/docker/cli/cli/streams"
|
||||||
"github.com/docker/cli/cli/trust"
|
"github.com/docker/cli/cli/trust"
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
notaryclient "github.com/theupdateframework/notary/client"
|
notaryclient "github.com/theupdateframework/notary/client"
|
||||||
)
|
)
|
||||||
|
@ -23,27 +22,25 @@ import (
|
||||||
// NotaryClientFuncType defines a function that returns a fake notary client
|
// NotaryClientFuncType defines a function that returns a fake notary client
|
||||||
type NotaryClientFuncType func(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
type NotaryClientFuncType func(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
||||||
type clientInfoFuncType func() command.ClientInfo
|
type clientInfoFuncType func() command.ClientInfo
|
||||||
type containerizedEngineFuncType func(string) (clitypes.ContainerizedClient, error)
|
|
||||||
|
|
||||||
// FakeCli emulates the default DockerCli
|
// FakeCli emulates the default DockerCli
|
||||||
type FakeCli struct {
|
type FakeCli struct {
|
||||||
command.DockerCli
|
command.DockerCli
|
||||||
client client.APIClient
|
client client.APIClient
|
||||||
configfile *configfile.ConfigFile
|
configfile *configfile.ConfigFile
|
||||||
out *streams.Out
|
out *streams.Out
|
||||||
outBuffer *bytes.Buffer
|
outBuffer *bytes.Buffer
|
||||||
err *bytes.Buffer
|
err *bytes.Buffer
|
||||||
in *streams.In
|
in *streams.In
|
||||||
server command.ServerInfo
|
server command.ServerInfo
|
||||||
clientInfoFunc clientInfoFuncType
|
clientInfoFunc clientInfoFuncType
|
||||||
notaryClientFunc NotaryClientFuncType
|
notaryClientFunc NotaryClientFuncType
|
||||||
manifestStore manifeststore.Store
|
manifestStore manifeststore.Store
|
||||||
registryClient registryclient.RegistryClient
|
registryClient registryclient.RegistryClient
|
||||||
contentTrust bool
|
contentTrust bool
|
||||||
containerizedEngineClientFunc containerizedEngineFuncType
|
contextStore store.Store
|
||||||
contextStore store.Store
|
currentContext string
|
||||||
currentContext string
|
dockerEndpoint docker.Endpoint
|
||||||
dockerEndpoint docker.Endpoint
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFakeCli returns a fake for the command.Cli interface
|
// NewFakeCli returns a fake for the command.Cli interface
|
||||||
|
@ -218,19 +215,6 @@ func EnableContentTrust(c *FakeCli) {
|
||||||
c.contentTrust = true
|
c.contentTrust = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewContainerizedEngineClient returns a containerized engine client
|
|
||||||
func (c *FakeCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
|
||||||
if c.containerizedEngineClientFunc != nil {
|
|
||||||
return c.containerizedEngineClientFunc(sockPath)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("no containerized engine client available unless defined")
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetContainerizedEngineClient on the fake cli
|
|
||||||
func (c *FakeCli) SetContainerizedEngineClient(containerizedEngineClientFunc containerizedEngineFuncType) {
|
|
||||||
c.containerizedEngineClientFunc = containerizedEngineClientFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// StackOrchestrator return the selected stack orchestrator
|
// StackOrchestrator return the selected stack orchestrator
|
||||||
func (c *FakeCli) StackOrchestrator(flagValue string) (command.Orchestrator, error) {
|
func (c *FakeCli) StackOrchestrator(flagValue string) (command.Orchestrator, error) {
|
||||||
configOrchestrator := ""
|
configOrchestrator := ""
|
||||||
|
|
|
@ -1,126 +0,0 @@
|
||||||
package versions
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
registryclient "github.com/docker/cli/cli/registry/client"
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
ver "github.com/hashicorp/go-version"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// defaultRuntimeMetadataDir is the location where the metadata file is stored
|
|
||||||
defaultRuntimeMetadataDir = "/var/lib/docker-engine"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetEngineVersions reports the versions of the engine that are available
|
|
||||||
func GetEngineVersions(ctx context.Context, registryClient registryclient.RegistryClient, registryPrefix, imageName, versionString string) (clitypes.AvailableVersions, error) {
|
|
||||||
if imageName == "" {
|
|
||||||
var err error
|
|
||||||
localMetadata, err := GetCurrentRuntimeMetadata("")
|
|
||||||
if err != nil {
|
|
||||||
return clitypes.AvailableVersions{}, err
|
|
||||||
}
|
|
||||||
imageName = localMetadata.EngineImage
|
|
||||||
}
|
|
||||||
imageRef, err := reference.ParseNormalizedNamed(path.Join(registryPrefix, imageName))
|
|
||||||
if err != nil {
|
|
||||||
return clitypes.AvailableVersions{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
tags, err := registryClient.GetTags(ctx, imageRef)
|
|
||||||
if err != nil {
|
|
||||||
return clitypes.AvailableVersions{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return parseTags(tags, versionString)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseTags(tags []string, currentVersion string) (clitypes.AvailableVersions, error) {
|
|
||||||
var ret clitypes.AvailableVersions
|
|
||||||
currentVer, err := ver.NewVersion(currentVersion)
|
|
||||||
if err != nil {
|
|
||||||
return ret, errors.Wrapf(err, "failed to parse existing version %s", currentVersion)
|
|
||||||
}
|
|
||||||
downgrades := []clitypes.DockerVersion{}
|
|
||||||
patches := []clitypes.DockerVersion{}
|
|
||||||
upgrades := []clitypes.DockerVersion{}
|
|
||||||
currentSegments := currentVer.Segments()
|
|
||||||
for _, tag := range tags {
|
|
||||||
tmp, err := ver.NewVersion(tag)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Unable to parse %s: %s", tag, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
testVersion := clitypes.DockerVersion{Version: *tmp, Tag: tag}
|
|
||||||
if testVersion.LessThan(currentVer) {
|
|
||||||
downgrades = append(downgrades, testVersion)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
testSegments := testVersion.Segments()
|
|
||||||
// lib always provides min 3 segments
|
|
||||||
if testSegments[0] == currentSegments[0] &&
|
|
||||||
testSegments[1] == currentSegments[1] {
|
|
||||||
patches = append(patches, testVersion)
|
|
||||||
} else {
|
|
||||||
upgrades = append(upgrades, testVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Slice(downgrades, func(i, j int) bool {
|
|
||||||
return downgrades[i].Version.LessThan(&downgrades[j].Version)
|
|
||||||
})
|
|
||||||
sort.Slice(patches, func(i, j int) bool {
|
|
||||||
return patches[i].Version.LessThan(&patches[j].Version)
|
|
||||||
})
|
|
||||||
sort.Slice(upgrades, func(i, j int) bool {
|
|
||||||
return upgrades[i].Version.LessThan(&upgrades[j].Version)
|
|
||||||
})
|
|
||||||
ret.Downgrades = downgrades
|
|
||||||
ret.Patches = patches
|
|
||||||
ret.Upgrades = upgrades
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCurrentRuntimeMetadata loads the current daemon runtime metadata information from the local host
|
|
||||||
func GetCurrentRuntimeMetadata(metadataDir string) (*clitypes.RuntimeMetadata, error) {
|
|
||||||
if metadataDir == "" {
|
|
||||||
metadataDir = defaultRuntimeMetadataDir
|
|
||||||
}
|
|
||||||
filename := filepath.Join(metadataDir, clitypes.RuntimeMetadataName+".json")
|
|
||||||
|
|
||||||
data, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var res clitypes.RuntimeMetadata
|
|
||||||
err = json.Unmarshal(data, &res)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "malformed runtime metadata file %s", filename)
|
|
||||||
}
|
|
||||||
return &res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteRuntimeMetadata stores the metadata on the local system
|
|
||||||
func WriteRuntimeMetadata(metadataDir string, metadata *clitypes.RuntimeMetadata) error {
|
|
||||||
if metadataDir == "" {
|
|
||||||
metadataDir = defaultRuntimeMetadataDir
|
|
||||||
}
|
|
||||||
filename := filepath.Join(metadataDir, clitypes.RuntimeMetadataName+".json")
|
|
||||||
|
|
||||||
data, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Remove(filename)
|
|
||||||
return ioutil.WriteFile(filename, data, 0644)
|
|
||||||
}
|
|
|
@ -1,105 +0,0 @@
|
||||||
package versions
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
clitypes "github.com/docker/cli/types"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseTagsSimple(t *testing.T) {
|
|
||||||
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
|
|
||||||
currentVersion := "1.1.0"
|
|
||||||
res, err := parseTags(tags, currentVersion)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
assert.Assert(t, err, "already present")
|
|
||||||
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1.0.0")
|
|
||||||
assert.Assert(t, len(res.Patches) == 2 && res.Patches[0].Tag == "1.1.1" && res.Patches[1].Tag == "1.1.2")
|
|
||||||
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseConfirmMinSegments(t *testing.T) {
|
|
||||||
tags := []string{"1", "1.1.1", "2"}
|
|
||||||
currentVersion := "1.1"
|
|
||||||
res, err := parseTags(tags, currentVersion)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
assert.Assert(t, err, "already present")
|
|
||||||
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1")
|
|
||||||
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
|
|
||||||
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "2")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseTagsFilterPrerelease(t *testing.T) {
|
|
||||||
tags := []string{"1.0.0", "1.1.1", "1.2.2", "1.1.0-beta1"}
|
|
||||||
currentVersion := "1.1.0"
|
|
||||||
res, err := parseTags(tags, currentVersion)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
assert.Assert(t, err, "already present")
|
|
||||||
assert.Assert(t, len(res.Downgrades) == 2 && res.Downgrades[0].Tag == "1.0.0")
|
|
||||||
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
|
|
||||||
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseTagsBadTag(t *testing.T) {
|
|
||||||
tags := []string{"1.0.0", "1.1.1", "1.2.2", "notasemanticversion"}
|
|
||||||
currentVersion := "1.1.0"
|
|
||||||
res, err := parseTags(tags, currentVersion)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
assert.Assert(t, err, "already present")
|
|
||||||
assert.Assert(t, len(res.Downgrades) == 1 && res.Downgrades[0].Tag == "1.0.0")
|
|
||||||
assert.Assert(t, len(res.Patches) == 1 && res.Patches[0].Tag == "1.1.1")
|
|
||||||
assert.Assert(t, len(res.Upgrades) == 1 && res.Upgrades[0].Tag == "1.2.2")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseBadCurrent(t *testing.T) {
|
|
||||||
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
|
|
||||||
currentVersion := "notasemanticversion"
|
|
||||||
_, err := parseTags(tags, currentVersion)
|
|
||||||
assert.ErrorContains(t, err, "failed to parse existing")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseBadCurrent2(t *testing.T) {
|
|
||||||
tags := []string{"1.0.0", "1.1.2", "1.1.1", "1.2.2"}
|
|
||||||
currentVersion := ""
|
|
||||||
_, err := parseTags(tags, currentVersion)
|
|
||||||
assert.ErrorContains(t, err, "failed to parse existing")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetCurrentRuntimeMetadataNotPresent(t *testing.T) {
|
|
||||||
tmpdir, err := ioutil.TempDir("", "docker-root")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
_, err = GetCurrentRuntimeMetadata(tmpdir)
|
|
||||||
assert.ErrorType(t, err, os.IsNotExist)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetCurrentRuntimeMetadataBadJson(t *testing.T) {
|
|
||||||
tmpdir, err := ioutil.TempDir("", "docker-root")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
filename := filepath.Join(tmpdir, clitypes.RuntimeMetadataName+".json")
|
|
||||||
err = ioutil.WriteFile(filename, []byte("not json"), 0644)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
_, err = GetCurrentRuntimeMetadata(tmpdir)
|
|
||||||
assert.ErrorContains(t, err, "malformed runtime metadata file")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetCurrentRuntimeMetadataHappyPath(t *testing.T) {
|
|
||||||
tmpdir, err := ioutil.TempDir("", "docker-root")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
metadata := clitypes.RuntimeMetadata{Platform: "platformgoeshere"}
|
|
||||||
err = WriteRuntimeMetadata(tmpdir, &metadata)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
res, err := GetCurrentRuntimeMetadata(tmpdir)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
assert.Equal(t, res.Platform, "platformgoeshere")
|
|
||||||
}
|
|
|
@ -1,88 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
ver "github.com/hashicorp/go-version"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// CommunityEngineImage is the repo name for the community engine
|
|
||||||
CommunityEngineImage = "engine-community"
|
|
||||||
|
|
||||||
// EnterpriseEngineImage is the repo name for the enterprise engine
|
|
||||||
EnterpriseEngineImage = "engine-enterprise"
|
|
||||||
|
|
||||||
// RegistryPrefix is the default prefix used to pull engine images
|
|
||||||
RegistryPrefix = "docker.io/store/docker"
|
|
||||||
|
|
||||||
// ReleaseNotePrefix is where to point users to for release notes
|
|
||||||
ReleaseNotePrefix = "https://docs.docker.com/releasenotes"
|
|
||||||
|
|
||||||
// RuntimeMetadataName is the name of the runtime metadata file
|
|
||||||
// When stored as a label on the container it is prefixed by "com.docker."
|
|
||||||
RuntimeMetadataName = "distribution_based_engine"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerizedClient can be used to manage the lifecycle of
|
|
||||||
// dockerd running as a container on containerd.
|
|
||||||
type ContainerizedClient interface {
|
|
||||||
Close() error
|
|
||||||
ActivateEngine(ctx context.Context,
|
|
||||||
opts EngineInitOptions,
|
|
||||||
out OutStream,
|
|
||||||
authConfig *types.AuthConfig) error
|
|
||||||
DoUpdate(ctx context.Context,
|
|
||||||
opts EngineInitOptions,
|
|
||||||
out OutStream,
|
|
||||||
authConfig *types.AuthConfig) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// EngineInitOptions contains the configuration settings
|
|
||||||
// use during initialization of a containerized docker engine
|
|
||||||
type EngineInitOptions struct {
|
|
||||||
RegistryPrefix string
|
|
||||||
EngineImage string
|
|
||||||
EngineVersion string
|
|
||||||
ConfigFile string
|
|
||||||
RuntimeMetadataDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
// AvailableVersions groups the available versions which were discovered
|
|
||||||
type AvailableVersions struct {
|
|
||||||
Downgrades []DockerVersion
|
|
||||||
Patches []DockerVersion
|
|
||||||
Upgrades []DockerVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
// DockerVersion wraps a semantic version to retain the original tag
|
|
||||||
// since the docker date based versions don't strictly follow semantic
|
|
||||||
// versioning (leading zeros, etc.)
|
|
||||||
type DockerVersion struct {
|
|
||||||
ver.Version
|
|
||||||
Tag string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update stores available updates for rendering in a table
|
|
||||||
type Update struct {
|
|
||||||
Type string
|
|
||||||
Version string
|
|
||||||
Notes string
|
|
||||||
}
|
|
||||||
|
|
||||||
// OutStream is an output stream used to write normal program output.
|
|
||||||
type OutStream interface {
|
|
||||||
io.Writer
|
|
||||||
FD() uintptr
|
|
||||||
IsTerminal() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// RuntimeMetadata holds platform information about the daemon
|
|
||||||
type RuntimeMetadata struct {
|
|
||||||
Platform string `json:"platform"`
|
|
||||||
ContainerdMinVersion string `json:"containerd_min_version"`
|
|
||||||
Runtime string `json:"runtime"`
|
|
||||||
EngineImage string `json:"engine_image"`
|
|
||||||
}
|
|
|
@ -1,19 +1,14 @@
|
||||||
cloud.google.com/go 0ebda48a7f143b1cce9eb37a8c1106ac762a3430 # v0.34.0
|
cloud.google.com/go 0ebda48a7f143b1cce9eb37a8c1106ac762a3430 # v0.34.0
|
||||||
github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403
|
github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403
|
||||||
github.com/asaskevich/govalidator f9ffefc3facfbe0caee3fea233cbb6e8208f4541
|
|
||||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||||
github.com/beorn7/perks e7f67b54abbeac9c40a31de0f81159e4cafebd6a
|
github.com/beorn7/perks e7f67b54abbeac9c40a31de0f81159e4cafebd6a
|
||||||
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
|
||||||
github.com/containerd/containerd 7c1e88399ec0b0b077121d9d5ad97e647b11c870
|
github.com/containerd/containerd 7c1e88399ec0b0b077121d9d5ad97e647b11c870
|
||||||
github.com/containerd/continuity aaeac12a7ffcd198ae25440a9dff125c2e2703a7
|
github.com/containerd/continuity aaeac12a7ffcd198ae25440a9dff125c2e2703a7
|
||||||
github.com/containerd/fifo a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c
|
|
||||||
github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f
|
|
||||||
github.com/containerd/typeurl 2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d
|
|
||||||
github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12
|
github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12
|
||||||
github.com/cpuguy83/go-md2man 20f5889cbdc3c73dbd2862796665e7c465ade7d1 # v1.0.8
|
github.com/cpuguy83/go-md2man 20f5889cbdc3c73dbd2862796665e7c465ade7d1 # v1.0.8
|
||||||
github.com/creack/pty 3a6a957789163cacdfe0e291617a1c8e80612c11 # v1.1.9
|
github.com/creack/pty 3a6a957789163cacdfe0e291617a1c8e80612c11 # v1.1.9
|
||||||
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
|
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
|
||||||
github.com/dgrijalva/jwt-go 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e # v3.2.0
|
|
||||||
github.com/docker/compose-on-kubernetes 78e6a00beda64ac8ccb9fec787e601fe2ce0d5bb # v0.5.0-alpha1
|
github.com/docker/compose-on-kubernetes 78e6a00beda64ac8ccb9fec787e601fe2ce0d5bb # v0.5.0-alpha1
|
||||||
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||||
github.com/docker/docker a09e6e323e55e1a9b21df9c2c555f5668df3ac9b
|
github.com/docker/docker a09e6e323e55e1a9b21df9c2c555f5668df3ac9b
|
||||||
|
@ -23,8 +18,6 @@ github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79
|
||||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||||
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
|
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
|
||||||
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
|
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
|
||||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
|
||||||
github.com/docker/licensing 5f0f1276fe42dd721c1c093604995a9f758ace21
|
|
||||||
github.com/docker/swarmkit 7dded76ec532741c1ad9736cd2bb6d6661f0a386
|
github.com/docker/swarmkit 7dded76ec532741c1ad9736cd2bb6d6661f0a386
|
||||||
github.com/evanphx/json-patch 72bf35d0ff611848c1dc9df0f976c81192392fa5 # v4.1.0
|
github.com/evanphx/json-patch 72bf35d0ff611848c1dc9df0f976c81192392fa5 # v4.1.0
|
||||||
github.com/gofrs/flock 392e7fae8f1b0bdbd67dad7237d23f618feb6dbb # v0.7.1
|
github.com/gofrs/flock 392e7fae8f1b0bdbd67dad7237d23f618feb6dbb # v0.7.1
|
||||||
|
@ -35,12 +28,10 @@ github.com/golang/protobuf aa810b61a9c79d51363740d207bb
|
||||||
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
||||||
github.com/google/gofuzz 24818f796faf91cd76ec7bddd72458fbced7a6c1
|
github.com/google/gofuzz 24818f796faf91cd76ec7bddd72458fbced7a6c1
|
||||||
github.com/google/shlex c34317bd91bf98fab745d77b03933cf8769299fe
|
github.com/google/shlex c34317bd91bf98fab745d77b03933cf8769299fe
|
||||||
github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
|
|
||||||
github.com/googleapis/gnostic 7c663266750e7d82587642f65e60bc4083f1f84e # v0.2.0
|
github.com/googleapis/gnostic 7c663266750e7d82587642f65e60bc4083f1f84e # v0.2.0
|
||||||
github.com/gorilla/mux 00bdffe0f3c77e27d2cf6f5c70232a2d3e4d9c15 # v1.7.3
|
github.com/gorilla/mux 00bdffe0f3c77e27d2cf6f5c70232a2d3e4d9c15 # v1.7.3
|
||||||
github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b
|
github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b
|
||||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||||
github.com/hashicorp/go-version ac23dc3fea5d1a983c43f6a0f6e2c13f0195d8bd # v1.2.0
|
|
||||||
github.com/hashicorp/golang-lru 0fb14efe8c47ae851c0034ed7a448854d3d34cf3
|
github.com/hashicorp/golang-lru 0fb14efe8c47ae851c0034ed7a448854d3d34cf3
|
||||||
github.com/imdario/mergo 7c29201646fa3de8506f701213473dd407f19646 # v0.3.7
|
github.com/imdario/mergo 7c29201646fa3de8506f701213473dd407f19646 # v0.3.7
|
||||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 # v1.0.0
|
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 # v1.0.0
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2014 Alex Saskevich
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
|
@ -1,496 +0,0 @@
|
||||||
govalidator
|
|
||||||
===========
|
|
||||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
|
|
||||||
[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
|
|
||||||
|
|
||||||
A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
|
|
||||||
|
|
||||||
#### Installation
|
|
||||||
Make sure that Go is installed on your computer.
|
|
||||||
Type the following command in your terminal:
|
|
||||||
|
|
||||||
go get github.com/asaskevich/govalidator
|
|
||||||
|
|
||||||
or you can get specified release of the package with `gopkg.in`:
|
|
||||||
|
|
||||||
go get gopkg.in/asaskevich/govalidator.v4
|
|
||||||
|
|
||||||
After it the package is ready to use.
|
|
||||||
|
|
||||||
|
|
||||||
#### Import package in your project
|
|
||||||
Add following line in your `*.go` file:
|
|
||||||
```go
|
|
||||||
import "github.com/asaskevich/govalidator"
|
|
||||||
```
|
|
||||||
If you are unhappy to use long `govalidator`, you can do something like this:
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
valid "github.com/asaskevich/govalidator"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Activate behavior to require all fields have a validation tag by default
|
|
||||||
`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
|
|
||||||
|
|
||||||
`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/asaskevich/govalidator"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
govalidator.SetFieldsRequiredByDefault(true)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Here's some code to explain it:
|
|
||||||
```go
|
|
||||||
// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
|
|
||||||
type exampleStruct struct {
|
|
||||||
Name string ``
|
|
||||||
Email string `valid:"email"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// this, however, will only fail when Email is empty or an invalid email address:
|
|
||||||
type exampleStruct2 struct {
|
|
||||||
Name string `valid:"-"`
|
|
||||||
Email string `valid:"email"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// lastly, this will only fail when Email is an invalid email address but not when it's empty:
|
|
||||||
type exampleStruct2 struct {
|
|
||||||
Name string `valid:"-"`
|
|
||||||
Email string `valid:"email,optional"`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
|
|
||||||
##### Custom validator function signature
|
|
||||||
A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
|
|
||||||
```go
|
|
||||||
import "github.com/asaskevich/govalidator"
|
|
||||||
|
|
||||||
// old signature
|
|
||||||
func(i interface{}) bool
|
|
||||||
|
|
||||||
// new signature
|
|
||||||
func(i interface{}, o interface{}) bool
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Adding a custom validator
|
|
||||||
This was changed to prevent data races when accessing custom validators.
|
|
||||||
```go
|
|
||||||
import "github.com/asaskevich/govalidator"
|
|
||||||
|
|
||||||
// before
|
|
||||||
govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
|
||||||
// ...
|
|
||||||
})
|
|
||||||
|
|
||||||
// after
|
|
||||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
|
||||||
// ...
|
|
||||||
}))
|
|
||||||
```
|
|
||||||
|
|
||||||
#### List of functions:
|
|
||||||
```go
|
|
||||||
func Abs(value float64) float64
|
|
||||||
func BlackList(str, chars string) string
|
|
||||||
func ByteLength(str string, params ...string) bool
|
|
||||||
func CamelCaseToUnderscore(str string) string
|
|
||||||
func Contains(str, substring string) bool
|
|
||||||
func Count(array []interface{}, iterator ConditionIterator) int
|
|
||||||
func Each(array []interface{}, iterator Iterator)
|
|
||||||
func ErrorByField(e error, field string) string
|
|
||||||
func ErrorsByField(e error) map[string]string
|
|
||||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{}
|
|
||||||
func Find(array []interface{}, iterator ConditionIterator) interface{}
|
|
||||||
func GetLine(s string, index int) (string, error)
|
|
||||||
func GetLines(s string) []string
|
|
||||||
func InRange(value, left, right float64) bool
|
|
||||||
func IsASCII(str string) bool
|
|
||||||
func IsAlpha(str string) bool
|
|
||||||
func IsAlphanumeric(str string) bool
|
|
||||||
func IsBase64(str string) bool
|
|
||||||
func IsByteLength(str string, min, max int) bool
|
|
||||||
func IsCIDR(str string) bool
|
|
||||||
func IsCreditCard(str string) bool
|
|
||||||
func IsDNSName(str string) bool
|
|
||||||
func IsDataURI(str string) bool
|
|
||||||
func IsDialString(str string) bool
|
|
||||||
func IsDivisibleBy(str, num string) bool
|
|
||||||
func IsEmail(str string) bool
|
|
||||||
func IsFilePath(str string) (bool, int)
|
|
||||||
func IsFloat(str string) bool
|
|
||||||
func IsFullWidth(str string) bool
|
|
||||||
func IsHalfWidth(str string) bool
|
|
||||||
func IsHexadecimal(str string) bool
|
|
||||||
func IsHexcolor(str string) bool
|
|
||||||
func IsHost(str string) bool
|
|
||||||
func IsIP(str string) bool
|
|
||||||
func IsIPv4(str string) bool
|
|
||||||
func IsIPv6(str string) bool
|
|
||||||
func IsISBN(str string, version int) bool
|
|
||||||
func IsISBN10(str string) bool
|
|
||||||
func IsISBN13(str string) bool
|
|
||||||
func IsISO3166Alpha2(str string) bool
|
|
||||||
func IsISO3166Alpha3(str string) bool
|
|
||||||
func IsISO693Alpha2(str string) bool
|
|
||||||
func IsISO693Alpha3b(str string) bool
|
|
||||||
func IsISO4217(str string) bool
|
|
||||||
func IsIn(str string, params ...string) bool
|
|
||||||
func IsInt(str string) bool
|
|
||||||
func IsJSON(str string) bool
|
|
||||||
func IsLatitude(str string) bool
|
|
||||||
func IsLongitude(str string) bool
|
|
||||||
func IsLowerCase(str string) bool
|
|
||||||
func IsMAC(str string) bool
|
|
||||||
func IsMongoID(str string) bool
|
|
||||||
func IsMultibyte(str string) bool
|
|
||||||
func IsNatural(value float64) bool
|
|
||||||
func IsNegative(value float64) bool
|
|
||||||
func IsNonNegative(value float64) bool
|
|
||||||
func IsNonPositive(value float64) bool
|
|
||||||
func IsNull(str string) bool
|
|
||||||
func IsNumeric(str string) bool
|
|
||||||
func IsPort(str string) bool
|
|
||||||
func IsPositive(value float64) bool
|
|
||||||
func IsPrintableASCII(str string) bool
|
|
||||||
func IsRFC3339(str string) bool
|
|
||||||
func IsRFC3339WithoutZone(str string) bool
|
|
||||||
func IsRGBcolor(str string) bool
|
|
||||||
func IsRequestURI(rawurl string) bool
|
|
||||||
func IsRequestURL(rawurl string) bool
|
|
||||||
func IsSSN(str string) bool
|
|
||||||
func IsSemver(str string) bool
|
|
||||||
func IsTime(str string, format string) bool
|
|
||||||
func IsURL(str string) bool
|
|
||||||
func IsUTFDigit(str string) bool
|
|
||||||
func IsUTFLetter(str string) bool
|
|
||||||
func IsUTFLetterNumeric(str string) bool
|
|
||||||
func IsUTFNumeric(str string) bool
|
|
||||||
func IsUUID(str string) bool
|
|
||||||
func IsUUIDv3(str string) bool
|
|
||||||
func IsUUIDv4(str string) bool
|
|
||||||
func IsUUIDv5(str string) bool
|
|
||||||
func IsUpperCase(str string) bool
|
|
||||||
func IsVariableWidth(str string) bool
|
|
||||||
func IsWhole(value float64) bool
|
|
||||||
func LeftTrim(str, chars string) string
|
|
||||||
func Map(array []interface{}, iterator ResultIterator) []interface{}
|
|
||||||
func Matches(str, pattern string) bool
|
|
||||||
func NormalizeEmail(str string) (string, error)
|
|
||||||
func PadBoth(str string, padStr string, padLen int) string
|
|
||||||
func PadLeft(str string, padStr string, padLen int) string
|
|
||||||
func PadRight(str string, padStr string, padLen int) string
|
|
||||||
func Range(str string, params ...string) bool
|
|
||||||
func RemoveTags(s string) string
|
|
||||||
func ReplacePattern(str, pattern, replace string) string
|
|
||||||
func Reverse(s string) string
|
|
||||||
func RightTrim(str, chars string) string
|
|
||||||
func RuneLength(str string, params ...string) bool
|
|
||||||
func SafeFileName(str string) string
|
|
||||||
func SetFieldsRequiredByDefault(value bool)
|
|
||||||
func Sign(value float64) float64
|
|
||||||
func StringLength(str string, params ...string) bool
|
|
||||||
func StringMatches(s string, params ...string) bool
|
|
||||||
func StripLow(str string, keepNewLines bool) string
|
|
||||||
func ToBoolean(str string) (bool, error)
|
|
||||||
func ToFloat(str string) (float64, error)
|
|
||||||
func ToInt(str string) (int64, error)
|
|
||||||
func ToJSON(obj interface{}) (string, error)
|
|
||||||
func ToString(obj interface{}) string
|
|
||||||
func Trim(str, chars string) string
|
|
||||||
func Truncate(str string, length int, ending string) string
|
|
||||||
func UnderscoreToCamelCase(s string) string
|
|
||||||
func ValidateStruct(s interface{}) (bool, error)
|
|
||||||
func WhiteList(str, chars string) string
|
|
||||||
type ConditionIterator
|
|
||||||
type CustomTypeValidator
|
|
||||||
type Error
|
|
||||||
func (e Error) Error() string
|
|
||||||
type Errors
|
|
||||||
func (es Errors) Error() string
|
|
||||||
func (es Errors) Errors() []error
|
|
||||||
type ISO3166Entry
|
|
||||||
type Iterator
|
|
||||||
type ParamValidator
|
|
||||||
type ResultIterator
|
|
||||||
type UnsupportedTypeError
|
|
||||||
func (e *UnsupportedTypeError) Error() string
|
|
||||||
type Validator
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Examples
|
|
||||||
###### IsURL
|
|
||||||
```go
|
|
||||||
println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
|
|
||||||
```
|
|
||||||
###### ToString
|
|
||||||
```go
|
|
||||||
type User struct {
|
|
||||||
FirstName string
|
|
||||||
LastName string
|
|
||||||
}
|
|
||||||
|
|
||||||
str := govalidator.ToString(&User{"John", "Juan"})
|
|
||||||
println(str)
|
|
||||||
```
|
|
||||||
###### Each, Map, Filter, Count for slices
|
|
||||||
Each iterates over the slice/array and calls Iterator for every item
|
|
||||||
```go
|
|
||||||
data := []interface{}{1, 2, 3, 4, 5}
|
|
||||||
var fn govalidator.Iterator = func(value interface{}, index int) {
|
|
||||||
println(value.(int))
|
|
||||||
}
|
|
||||||
govalidator.Each(data, fn)
|
|
||||||
```
|
|
||||||
```go
|
|
||||||
data := []interface{}{1, 2, 3, 4, 5}
|
|
||||||
var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
|
|
||||||
return value.(int) * 3
|
|
||||||
}
|
|
||||||
_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
|
|
||||||
```
|
|
||||||
```go
|
|
||||||
data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
|
||||||
var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
|
|
||||||
return value.(int)%2 == 0
|
|
||||||
}
|
|
||||||
_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
|
|
||||||
_ = govalidator.Count(data, fn) // result = 5
|
|
||||||
```
|
|
||||||
###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
|
|
||||||
If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
|
|
||||||
```go
|
|
||||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
|
||||||
return str == "duck"
|
|
||||||
})
|
|
||||||
```
|
|
||||||
For completely custom validators (interface-based), see below.
|
|
||||||
|
|
||||||
Here is a list of available validators for struct fields (validator - used function):
|
|
||||||
```go
|
|
||||||
"email": IsEmail,
|
|
||||||
"url": IsURL,
|
|
||||||
"dialstring": IsDialString,
|
|
||||||
"requrl": IsRequestURL,
|
|
||||||
"requri": IsRequestURI,
|
|
||||||
"alpha": IsAlpha,
|
|
||||||
"utfletter": IsUTFLetter,
|
|
||||||
"alphanum": IsAlphanumeric,
|
|
||||||
"utfletternum": IsUTFLetterNumeric,
|
|
||||||
"numeric": IsNumeric,
|
|
||||||
"utfnumeric": IsUTFNumeric,
|
|
||||||
"utfdigit": IsUTFDigit,
|
|
||||||
"hexadecimal": IsHexadecimal,
|
|
||||||
"hexcolor": IsHexcolor,
|
|
||||||
"rgbcolor": IsRGBcolor,
|
|
||||||
"lowercase": IsLowerCase,
|
|
||||||
"uppercase": IsUpperCase,
|
|
||||||
"int": IsInt,
|
|
||||||
"float": IsFloat,
|
|
||||||
"null": IsNull,
|
|
||||||
"uuid": IsUUID,
|
|
||||||
"uuidv3": IsUUIDv3,
|
|
||||||
"uuidv4": IsUUIDv4,
|
|
||||||
"uuidv5": IsUUIDv5,
|
|
||||||
"creditcard": IsCreditCard,
|
|
||||||
"isbn10": IsISBN10,
|
|
||||||
"isbn13": IsISBN13,
|
|
||||||
"json": IsJSON,
|
|
||||||
"multibyte": IsMultibyte,
|
|
||||||
"ascii": IsASCII,
|
|
||||||
"printableascii": IsPrintableASCII,
|
|
||||||
"fullwidth": IsFullWidth,
|
|
||||||
"halfwidth": IsHalfWidth,
|
|
||||||
"variablewidth": IsVariableWidth,
|
|
||||||
"base64": IsBase64,
|
|
||||||
"datauri": IsDataURI,
|
|
||||||
"ip": IsIP,
|
|
||||||
"port": IsPort,
|
|
||||||
"ipv4": IsIPv4,
|
|
||||||
"ipv6": IsIPv6,
|
|
||||||
"dns": IsDNSName,
|
|
||||||
"host": IsHost,
|
|
||||||
"mac": IsMAC,
|
|
||||||
"latitude": IsLatitude,
|
|
||||||
"longitude": IsLongitude,
|
|
||||||
"ssn": IsSSN,
|
|
||||||
"semver": IsSemver,
|
|
||||||
"rfc3339": IsRFC3339,
|
|
||||||
"rfc3339WithoutZone": IsRFC3339WithoutZone,
|
|
||||||
"ISO3166Alpha2": IsISO3166Alpha2,
|
|
||||||
"ISO3166Alpha3": IsISO3166Alpha3,
|
|
||||||
```
|
|
||||||
Validators with parameters
|
|
||||||
|
|
||||||
```go
|
|
||||||
"range(min|max)": Range,
|
|
||||||
"length(min|max)": ByteLength,
|
|
||||||
"runelength(min|max)": RuneLength,
|
|
||||||
"matches(pattern)": StringMatches,
|
|
||||||
"in(string1|string2|...|stringN)": IsIn,
|
|
||||||
```
|
|
||||||
|
|
||||||
And here is small example of usage:
|
|
||||||
```go
|
|
||||||
type Post struct {
|
|
||||||
Title string `valid:"alphanum,required"`
|
|
||||||
Message string `valid:"duck,ascii"`
|
|
||||||
AuthorIP string `valid:"ipv4"`
|
|
||||||
Date string `valid:"-"`
|
|
||||||
}
|
|
||||||
post := &Post{
|
|
||||||
Title: "My Example Post",
|
|
||||||
Message: "duck",
|
|
||||||
AuthorIP: "123.234.54.3",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add your own struct validation tags
|
|
||||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
|
||||||
return str == "duck"
|
|
||||||
})
|
|
||||||
|
|
||||||
result, err := govalidator.ValidateStruct(post)
|
|
||||||
if err != nil {
|
|
||||||
println("error: " + err.Error())
|
|
||||||
}
|
|
||||||
println(result)
|
|
||||||
```
|
|
||||||
###### WhiteList
|
|
||||||
```go
|
|
||||||
// Remove all characters from string ignoring characters between "a" and "z"
|
|
||||||
println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
|
|
||||||
```
|
|
||||||
|
|
||||||
###### Custom validation functions
|
|
||||||
Custom validation using your own domain specific validators is also available - here's an example of how to use it:
|
|
||||||
```go
|
|
||||||
import "github.com/asaskevich/govalidator"
|
|
||||||
|
|
||||||
type CustomByteArray [6]byte // custom types are supported and can be validated
|
|
||||||
|
|
||||||
type StructWithCustomByteArray struct {
|
|
||||||
ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
|
|
||||||
Email string `valid:"email"`
|
|
||||||
CustomMinLength int `valid:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
|
||||||
switch v := context.(type) { // you can type switch on the context interface being validated
|
|
||||||
case StructWithCustomByteArray:
|
|
||||||
// you can check and validate against some other field in the context,
|
|
||||||
// return early or not validate against the context at all – your choice
|
|
||||||
case SomeOtherType:
|
|
||||||
// ...
|
|
||||||
default:
|
|
||||||
// expecting some other type? Throw/panic here or continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v := i.(type) { // type switch on the struct field being validated
|
|
||||||
case CustomByteArray:
|
|
||||||
for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
|
|
||||||
if e != 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}))
|
|
||||||
govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
|
||||||
switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
|
|
||||||
case StructWithCustomByteArray:
|
|
||||||
return len(v.ID) >= v.CustomMinLength
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}))
|
|
||||||
```
|
|
||||||
|
|
||||||
###### Custom error messages
|
|
||||||
Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
|
|
||||||
```go
|
|
||||||
type Ticket struct {
|
|
||||||
Id int64 `json:"id"`
|
|
||||||
FirstName string `json:"firstname" valid:"required~First name is blank"`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Notes
|
|
||||||
Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
|
|
||||||
Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
|
|
||||||
|
|
||||||
#### Support
|
|
||||||
If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
|
|
||||||
|
|
||||||
#### What to contribute
|
|
||||||
If you don't know what to do, there are some features and functions that need to be done
|
|
||||||
|
|
||||||
- [ ] Refactor code
|
|
||||||
- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
|
|
||||||
- [ ] Create actual list of contributors and projects that currently using this package
|
|
||||||
- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
|
|
||||||
- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
|
|
||||||
- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
|
|
||||||
- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
|
|
||||||
- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
|
|
||||||
- [ ] Implement fuzzing testing
|
|
||||||
- [ ] Implement some struct/map/array utilities
|
|
||||||
- [ ] Implement map/array validation
|
|
||||||
- [ ] Implement benchmarking
|
|
||||||
- [ ] Implement batch of examples
|
|
||||||
- [ ] Look at forks for new features and fixes
|
|
||||||
|
|
||||||
#### Advice
|
|
||||||
Feel free to create what you want, but keep in mind when you implement new features:
|
|
||||||
- Code must be clear and readable, names of variables/constants clearly describes what they are doing
|
|
||||||
- Public functions must be documented and described in source file and added to README.md to the list of available functions
|
|
||||||
- There are must be unit-tests for any new functions and improvements
|
|
||||||
|
|
||||||
## Credits
|
|
||||||
### Contributors
|
|
||||||
|
|
||||||
This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
|
|
||||||
|
|
||||||
#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
|
|
||||||
* [Daniel Lohse](https://github.com/annismckenzie)
|
|
||||||
* [Attila Oláh](https://github.com/attilaolah)
|
|
||||||
* [Daniel Korner](https://github.com/Dadie)
|
|
||||||
* [Steven Wilkin](https://github.com/stevenwilkin)
|
|
||||||
* [Deiwin Sarjas](https://github.com/deiwin)
|
|
||||||
* [Noah Shibley](https://github.com/slugmobile)
|
|
||||||
* [Nathan Davies](https://github.com/nathj07)
|
|
||||||
* [Matt Sanford](https://github.com/mzsanford)
|
|
||||||
* [Simon ccl1115](https://github.com/ccl1115)
|
|
||||||
|
|
||||||
<a href="graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a>
|
|
||||||
|
|
||||||
|
|
||||||
### Backers
|
|
||||||
|
|
||||||
Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
|
|
||||||
|
|
||||||
<a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a>
|
|
||||||
|
|
||||||
|
|
||||||
### Sponsors
|
|
||||||
|
|
||||||
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
|
|
||||||
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a>
|
|
||||||
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## License
|
|
||||||
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
|
|
|
@ -1,58 +0,0 @@
|
||||||
package govalidator
|
|
||||||
|
|
||||||
// Iterator is the function that accepts element of slice/array and its index
|
|
||||||
type Iterator func(interface{}, int)
|
|
||||||
|
|
||||||
// ResultIterator is the function that accepts element of slice/array and its index and returns any result
|
|
||||||
type ResultIterator func(interface{}, int) interface{}
|
|
||||||
|
|
||||||
// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
|
|
||||||
type ConditionIterator func(interface{}, int) bool
|
|
||||||
|
|
||||||
// Each iterates over the slice and apply Iterator to every item
|
|
||||||
func Each(array []interface{}, iterator Iterator) {
|
|
||||||
for index, data := range array {
|
|
||||||
iterator(data, index)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
|
|
||||||
func Map(array []interface{}, iterator ResultIterator) []interface{} {
|
|
||||||
var result = make([]interface{}, len(array))
|
|
||||||
for index, data := range array {
|
|
||||||
result[index] = iterator(data, index)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
|
|
||||||
func Find(array []interface{}, iterator ConditionIterator) interface{} {
|
|
||||||
for index, data := range array {
|
|
||||||
if iterator(data, index) {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
|
|
||||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
|
|
||||||
var result = make([]interface{}, 0)
|
|
||||||
for index, data := range array {
|
|
||||||
if iterator(data, index) {
|
|
||||||
result = append(result, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
|
|
||||||
func Count(array []interface{}, iterator ConditionIterator) int {
|
|
||||||
count := 0
|
|
||||||
for index, data := range array {
|
|
||||||
if iterator(data, index) {
|
|
||||||
count = count + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return count
|
|
||||||
}
|
|
|
@ -1,64 +0,0 @@
|
||||||
package govalidator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ToString convert the input to a string.
|
|
||||||
func ToString(obj interface{}) string {
|
|
||||||
res := fmt.Sprintf("%v", obj)
|
|
||||||
return string(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToJSON convert the input to a valid JSON string
|
|
||||||
func ToJSON(obj interface{}) (string, error) {
|
|
||||||
res, err := json.Marshal(obj)
|
|
||||||
if err != nil {
|
|
||||||
res = []byte("")
|
|
||||||
}
|
|
||||||
return string(res), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
|
|
||||||
func ToFloat(str string) (float64, error) {
|
|
||||||
res, err := strconv.ParseFloat(str, 64)
|
|
||||||
if err != nil {
|
|
||||||
res = 0.0
|
|
||||||
}
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
|
|
||||||
func ToInt(value interface{}) (res int64, err error) {
|
|
||||||
val := reflect.ValueOf(value)
|
|
||||||
|
|
||||||
switch value.(type) {
|
|
||||||
case int, int8, int16, int32, int64:
|
|
||||||
res = val.Int()
|
|
||||||
case uint, uint8, uint16, uint32, uint64:
|
|
||||||
res = int64(val.Uint())
|
|
||||||
case string:
|
|
||||||
if IsInt(val.String()) {
|
|
||||||
res, err = strconv.ParseInt(val.String(), 0, 64)
|
|
||||||
if err != nil {
|
|
||||||
res = 0
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("math: square root of negative number %g", value)
|
|
||||||
res = 0
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("math: square root of negative number %g", value)
|
|
||||||
res = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToBoolean convert the input string to a boolean.
|
|
||||||
func ToBoolean(str string) (bool, error) {
|
|
||||||
return strconv.ParseBool(str)
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
package govalidator
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// Errors is an array of multiple errors and conforms to the error interface.
|
|
||||||
type Errors []error
|
|
||||||
|
|
||||||
// Errors returns itself.
|
|
||||||
func (es Errors) Errors() []error {
|
|
||||||
return es
|
|
||||||
}
|
|
||||||
|
|
||||||
func (es Errors) Error() string {
|
|
||||||
var errs []string
|
|
||||||
for _, e := range es {
|
|
||||||
errs = append(errs, e.Error())
|
|
||||||
}
|
|
||||||
return strings.Join(errs, ";")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error encapsulates a name, an error and whether there's a custom error message or not.
|
|
||||||
type Error struct {
|
|
||||||
Name string
|
|
||||||
Err error
|
|
||||||
CustomErrorMessageExists bool
|
|
||||||
|
|
||||||
// Validator indicates the name of the validator that failed
|
|
||||||
Validator string
|
|
||||||
Path []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e Error) Error() string {
|
|
||||||
if e.CustomErrorMessageExists {
|
|
||||||
return e.Err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
errName := e.Name
|
|
||||||
if len(e.Path) > 0 {
|
|
||||||
errName = strings.Join(append(e.Path, e.Name), ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
return errName + ": " + e.Err.Error()
|
|
||||||
}
|
|
|
@ -1,97 +0,0 @@
|
||||||
package govalidator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Abs returns absolute value of number
|
|
||||||
func Abs(value float64) float64 {
|
|
||||||
return math.Abs(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
|
|
||||||
func Sign(value float64) float64 {
|
|
||||||
if value > 0 {
|
|
||||||
return 1
|
|
||||||
} else if value < 0 {
|
|
||||||
return -1
|
|
||||||
} else {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNegative returns true if value < 0
|
|
||||||
func IsNegative(value float64) bool {
|
|
||||||
return value < 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPositive returns true if value > 0
|
|
||||||
func IsPositive(value float64) bool {
|
|
||||||
return value > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNonNegative returns true if value >= 0
|
|
||||||
func IsNonNegative(value float64) bool {
|
|
||||||
return value >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNonPositive returns true if value <= 0
|
|
||||||
func IsNonPositive(value float64) bool {
|
|
||||||
return value <= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// InRange returns true if value lies between left and right border
|
|
||||||
func InRangeInt(value, left, right interface{}) bool {
|
|
||||||
value64, _ := ToInt(value)
|
|
||||||
left64, _ := ToInt(left)
|
|
||||||
right64, _ := ToInt(right)
|
|
||||||
if left64 > right64 {
|
|
||||||
left64, right64 = right64, left64
|
|
||||||
}
|
|
||||||
return value64 >= left64 && value64 <= right64
|
|
||||||
}
|
|
||||||
|
|
||||||
// InRange returns true if value lies between left and right border
|
|
||||||
func InRangeFloat32(value, left, right float32) bool {
|
|
||||||
if left > right {
|
|
||||||
left, right = right, left
|
|
||||||
}
|
|
||||||
return value >= left && value <= right
|
|
||||||
}
|
|
||||||
|
|
||||||
// InRange returns true if value lies between left and right border
|
|
||||||
func InRangeFloat64(value, left, right float64) bool {
|
|
||||||
if left > right {
|
|
||||||
left, right = right, left
|
|
||||||
}
|
|
||||||
return value >= left && value <= right
|
|
||||||
}
|
|
||||||
|
|
||||||
// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type
|
|
||||||
func InRange(value interface{}, left interface{}, right interface{}) bool {
|
|
||||||
|
|
||||||
reflectValue := reflect.TypeOf(value).Kind()
|
|
||||||
reflectLeft := reflect.TypeOf(left).Kind()
|
|
||||||
reflectRight := reflect.TypeOf(right).Kind()
|
|
||||||
|
|
||||||
if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int {
|
|
||||||
return InRangeInt(value.(int), left.(int), right.(int))
|
|
||||||
} else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 {
|
|
||||||
return InRangeFloat32(value.(float32), left.(float32), right.(float32))
|
|
||||||
} else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 {
|
|
||||||
return InRangeFloat64(value.(float64), left.(float64), right.(float64))
|
|
||||||
} else {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsWhole returns true if value is whole number
|
|
||||||
func IsWhole(value float64) bool {
|
|
||||||
return math.Remainder(value, 1) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNatural returns true if value is natural number (positive and whole)
|
|
||||||
func IsNatural(value float64) bool {
|
|
||||||
return IsWhole(value) && IsPositive(value)
|
|
||||||
}
|
|
|
@ -1,101 +0,0 @@
|
||||||
package govalidator
|
|
||||||
|
|
||||||
import "regexp"
|
|
||||||
|
|
||||||
// Basic regular expressions for validating strings
|
|
||||||
const (
|
|
||||||
Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
|
|
||||||
CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
|
|
||||||
ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
|
|
||||||
ISBN13 string = "^(?:[0-9]{13})$"
|
|
||||||
UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
|
||||||
UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
|
||||||
UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
|
||||||
UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
|
||||||
Alpha string = "^[a-zA-Z]+$"
|
|
||||||
Alphanumeric string = "^[a-zA-Z0-9]+$"
|
|
||||||
Numeric string = "^[0-9]+$"
|
|
||||||
Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
|
|
||||||
Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
|
|
||||||
Hexadecimal string = "^[0-9a-fA-F]+$"
|
|
||||||
Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
|
|
||||||
RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
|
|
||||||
ASCII string = "^[\x00-\x7F]+$"
|
|
||||||
Multibyte string = "[^\x00-\x7F]"
|
|
||||||
FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
|
||||||
HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
|
||||||
Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
|
|
||||||
PrintableASCII string = "^[\x20-\x7E]+$"
|
|
||||||
DataURI string = "^data:.+\\/(.+);base64$"
|
|
||||||
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
|
|
||||||
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
|
|
||||||
DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
|
|
||||||
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
|
|
||||||
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
|
|
||||||
URLUsername string = `(\S+(:\S*)?@)`
|
|
||||||
URLPath string = `((\/|\?|#)[^\s]*)`
|
|
||||||
URLPort string = `(:(\d{1,5}))`
|
|
||||||
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
|
|
||||||
URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
|
|
||||||
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
|
|
||||||
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
|
|
||||||
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
|
||||||
UnixPath string = `^(/[^/\x00]*)+/?$`
|
|
||||||
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
|
|
||||||
tagName string = "valid"
|
|
||||||
hasLowerCase string = ".*[[:lower:]]"
|
|
||||||
hasUpperCase string = ".*[[:upper:]]"
|
|
||||||
hasWhitespace string = ".*[[:space:]]"
|
|
||||||
hasWhitespaceOnly string = "^[[:space:]]+$"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Used by IsFilePath func
|
|
||||||
const (
|
|
||||||
// Unknown is unresolved OS type
|
|
||||||
Unknown = iota
|
|
||||||
// Win is Windows type
|
|
||||||
Win
|
|
||||||
// Unix is *nix OS types
|
|
||||||
Unix
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
|
|
||||||
hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
|
|
||||||
userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
|
|
||||||
rxEmail = regexp.MustCompile(Email)
|
|
||||||
rxCreditCard = regexp.MustCompile(CreditCard)
|
|
||||||
rxISBN10 = regexp.MustCompile(ISBN10)
|
|
||||||
rxISBN13 = regexp.MustCompile(ISBN13)
|
|
||||||
rxUUID3 = regexp.MustCompile(UUID3)
|
|
||||||
rxUUID4 = regexp.MustCompile(UUID4)
|
|
||||||
rxUUID5 = regexp.MustCompile(UUID5)
|
|
||||||
rxUUID = regexp.MustCompile(UUID)
|
|
||||||
rxAlpha = regexp.MustCompile(Alpha)
|
|
||||||
rxAlphanumeric = regexp.MustCompile(Alphanumeric)
|
|
||||||
rxNumeric = regexp.MustCompile(Numeric)
|
|
||||||
rxInt = regexp.MustCompile(Int)
|
|
||||||
rxFloat = regexp.MustCompile(Float)
|
|
||||||
rxHexadecimal = regexp.MustCompile(Hexadecimal)
|
|
||||||
rxHexcolor = regexp.MustCompile(Hexcolor)
|
|
||||||
rxRGBcolor = regexp.MustCompile(RGBcolor)
|
|
||||||
rxASCII = regexp.MustCompile(ASCII)
|
|
||||||
rxPrintableASCII = regexp.MustCompile(PrintableASCII)
|
|
||||||
rxMultibyte = regexp.MustCompile(Multibyte)
|
|
||||||
rxFullWidth = regexp.MustCompile(FullWidth)
|
|
||||||
rxHalfWidth = regexp.MustCompile(HalfWidth)
|
|
||||||
rxBase64 = regexp.MustCompile(Base64)
|
|
||||||
rxDataURI = regexp.MustCompile(DataURI)
|
|
||||||
rxLatitude = regexp.MustCompile(Latitude)
|
|
||||||
rxLongitude = regexp.MustCompile(Longitude)
|
|
||||||
rxDNSName = regexp.MustCompile(DNSName)
|
|
||||||
rxURL = regexp.MustCompile(URL)
|
|
||||||
rxSSN = regexp.MustCompile(SSN)
|
|
||||||
rxWinPath = regexp.MustCompile(WinPath)
|
|
||||||
rxUnixPath = regexp.MustCompile(UnixPath)
|
|
||||||
rxSemver = regexp.MustCompile(Semver)
|
|
||||||
rxHasLowerCase = regexp.MustCompile(hasLowerCase)
|
|
||||||
rxHasUpperCase = regexp.MustCompile(hasUpperCase)
|
|
||||||
rxHasWhitespace = regexp.MustCompile(hasWhitespace)
|
|
||||||
rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
|
|
||||||
)
|
|
|
@ -1,636 +0,0 @@
|
||||||
package govalidator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Validator is a wrapper for a validator function that returns bool and accepts string.
|
|
||||||
type Validator func(str string) bool
|
|
||||||
|
|
||||||
// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
|
|
||||||
// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
|
|
||||||
type CustomTypeValidator func(i interface{}, o interface{}) bool
|
|
||||||
|
|
||||||
// ParamValidator is a wrapper for validator functions that accepts additional parameters.
|
|
||||||
type ParamValidator func(str string, params ...string) bool
|
|
||||||
type tagOptionsMap map[string]tagOption
|
|
||||||
|
|
||||||
func (t tagOptionsMap) orderedKeys() []string {
|
|
||||||
var keys []string
|
|
||||||
for k := range t {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(keys, func(a, b int) bool {
|
|
||||||
return t[keys[a]].order < t[keys[b]].order
|
|
||||||
})
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
type tagOption struct {
|
|
||||||
name string
|
|
||||||
customErrorMessage string
|
|
||||||
order int
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnsupportedTypeError is a wrapper for reflect.Type
|
|
||||||
type UnsupportedTypeError struct {
|
|
||||||
Type reflect.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
|
||||||
// It implements the methods to sort by string.
|
|
||||||
type stringValues []reflect.Value
|
|
||||||
|
|
||||||
// ParamTagMap is a map of functions accept variants parameters
|
|
||||||
var ParamTagMap = map[string]ParamValidator{
|
|
||||||
"length": ByteLength,
|
|
||||||
"range": Range,
|
|
||||||
"runelength": RuneLength,
|
|
||||||
"stringlength": StringLength,
|
|
||||||
"matches": StringMatches,
|
|
||||||
"in": isInRaw,
|
|
||||||
"rsapub": IsRsaPub,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParamTagRegexMap maps param tags to their respective regexes.
|
|
||||||
var ParamTagRegexMap = map[string]*regexp.Regexp{
|
|
||||||
"range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
|
|
||||||
"length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
|
|
||||||
"runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
|
|
||||||
"stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
|
|
||||||
"in": regexp.MustCompile(`^in\((.*)\)`),
|
|
||||||
"matches": regexp.MustCompile(`^matches\((.+)\)$`),
|
|
||||||
"rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
|
|
||||||
}
|
|
||||||
|
|
||||||
type customTypeTagMap struct {
|
|
||||||
validators map[string]CustomTypeValidator
|
|
||||||
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
|
|
||||||
tm.RLock()
|
|
||||||
defer tm.RUnlock()
|
|
||||||
v, ok := tm.validators[name]
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
|
|
||||||
tm.Lock()
|
|
||||||
defer tm.Unlock()
|
|
||||||
tm.validators[name] = ctv
|
|
||||||
}
|
|
||||||
|
|
||||||
// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
|
|
||||||
// Use this to validate compound or custom types that need to be handled as a whole, e.g.
|
|
||||||
// `type UUID [16]byte` (this would be handled as an array of bytes).
|
|
||||||
var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
|
|
||||||
|
|
||||||
// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
|
|
||||||
var TagMap = map[string]Validator{
|
|
||||||
"email": IsEmail,
|
|
||||||
"url": IsURL,
|
|
||||||
"dialstring": IsDialString,
|
|
||||||
"requrl": IsRequestURL,
|
|
||||||
"requri": IsRequestURI,
|
|
||||||
"alpha": IsAlpha,
|
|
||||||
"utfletter": IsUTFLetter,
|
|
||||||
"alphanum": IsAlphanumeric,
|
|
||||||
"utfletternum": IsUTFLetterNumeric,
|
|
||||||
"numeric": IsNumeric,
|
|
||||||
"utfnumeric": IsUTFNumeric,
|
|
||||||
"utfdigit": IsUTFDigit,
|
|
||||||
"hexadecimal": IsHexadecimal,
|
|
||||||
"hexcolor": IsHexcolor,
|
|
||||||
"rgbcolor": IsRGBcolor,
|
|
||||||
"lowercase": IsLowerCase,
|
|
||||||
"uppercase": IsUpperCase,
|
|
||||||
"int": IsInt,
|
|
||||||
"float": IsFloat,
|
|
||||||
"null": IsNull,
|
|
||||||
"uuid": IsUUID,
|
|
||||||
"uuidv3": IsUUIDv3,
|
|
||||||
"uuidv4": IsUUIDv4,
|
|
||||||
"uuidv5": IsUUIDv5,
|
|
||||||
"creditcard": IsCreditCard,
|
|
||||||
"isbn10": IsISBN10,
|
|
||||||
"isbn13": IsISBN13,
|
|
||||||
"json": IsJSON,
|
|
||||||
"multibyte": IsMultibyte,
|
|
||||||
"ascii": IsASCII,
|
|
||||||
"printableascii": IsPrintableASCII,
|
|
||||||
"fullwidth": IsFullWidth,
|
|
||||||
"halfwidth": IsHalfWidth,
|
|
||||||
"variablewidth": IsVariableWidth,
|
|
||||||
"base64": IsBase64,
|
|
||||||
"datauri": IsDataURI,
|
|
||||||
"ip": IsIP,
|
|
||||||
"port": IsPort,
|
|
||||||
"ipv4": IsIPv4,
|
|
||||||
"ipv6": IsIPv6,
|
|
||||||
"dns": IsDNSName,
|
|
||||||
"host": IsHost,
|
|
||||||
"mac": IsMAC,
|
|
||||||
"latitude": IsLatitude,
|
|
||||||
"longitude": IsLongitude,
|
|
||||||
"ssn": IsSSN,
|
|
||||||
"semver": IsSemver,
|
|
||||||
"rfc3339": IsRFC3339,
|
|
||||||
"rfc3339WithoutZone": IsRFC3339WithoutZone,
|
|
||||||
"ISO3166Alpha2": IsISO3166Alpha2,
|
|
||||||
"ISO3166Alpha3": IsISO3166Alpha3,
|
|
||||||
"ISO4217": IsISO4217,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ISO3166Entry stores country codes
|
|
||||||
type ISO3166Entry struct {
|
|
||||||
EnglishShortName string
|
|
||||||
FrenchShortName string
|
|
||||||
Alpha2Code string
|
|
||||||
Alpha3Code string
|
|
||||||
Numeric string
|
|
||||||
}
|
|
||||||
|
|
||||||
//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
|
|
||||||
var ISO3166List = []ISO3166Entry{
|
|
||||||
{"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
|
|
||||||
{"Albania", "Albanie (l')", "AL", "ALB", "008"},
|
|
||||||
{"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
|
|
||||||
{"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
|
|
||||||
{"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
|
|
||||||
{"Andorra", "Andorre (l')", "AD", "AND", "020"},
|
|
||||||
{"Angola", "Angola (l')", "AO", "AGO", "024"},
|
|
||||||
{"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
|
|
||||||
{"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
|
|
||||||
{"Argentina", "Argentine (l')", "AR", "ARG", "032"},
|
|
||||||
{"Australia", "Australie (l')", "AU", "AUS", "036"},
|
|
||||||
{"Austria", "Autriche (l')", "AT", "AUT", "040"},
|
|
||||||
{"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
|
|
||||||
{"Bahrain", "Bahreïn", "BH", "BHR", "048"},
|
|
||||||
{"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
|
|
||||||
{"Armenia", "Arménie (l')", "AM", "ARM", "051"},
|
|
||||||
{"Barbados", "Barbade (la)", "BB", "BRB", "052"},
|
|
||||||
{"Belgium", "Belgique (la)", "BE", "BEL", "056"},
|
|
||||||
{"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
|
|
||||||
{"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
|
|
||||||
{"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
|
|
||||||
{"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
|
|
||||||
{"Botswana", "Botswana (le)", "BW", "BWA", "072"},
|
|
||||||
{"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
|
|
||||||
{"Brazil", "Brésil (le)", "BR", "BRA", "076"},
|
|
||||||
{"Belize", "Belize (le)", "BZ", "BLZ", "084"},
|
|
||||||
{"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
|
|
||||||
{"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
|
|
||||||
{"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
|
|
||||||
{"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
|
|
||||||
{"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
|
|
||||||
{"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
|
|
||||||
{"Burundi", "Burundi (le)", "BI", "BDI", "108"},
|
|
||||||
{"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
|
|
||||||
{"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
|
|
||||||
{"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
|
|
||||||
{"Canada", "Canada (le)", "CA", "CAN", "124"},
|
|
||||||
{"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
|
|
||||||
{"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
|
|
||||||
{"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
|
|
||||||
{"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
|
|
||||||
{"Chad", "Tchad (le)", "TD", "TCD", "148"},
|
|
||||||
{"Chile", "Chili (le)", "CL", "CHL", "152"},
|
|
||||||
{"China", "Chine (la)", "CN", "CHN", "156"},
|
|
||||||
{"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
|
|
||||||
{"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
|
|
||||||
{"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
|
|
||||||
{"Colombia", "Colombie (la)", "CO", "COL", "170"},
|
|
||||||
{"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
|
|
||||||
{"Mayotte", "Mayotte", "YT", "MYT", "175"},
|
|
||||||
{"Congo (the)", "Congo (le)", "CG", "COG", "178"},
|
|
||||||
{"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
|
|
||||||
{"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
|
|
||||||
{"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
|
|
||||||
{"Croatia", "Croatie (la)", "HR", "HRV", "191"},
|
|
||||||
{"Cuba", "Cuba", "CU", "CUB", "192"},
|
|
||||||
{"Cyprus", "Chypre", "CY", "CYP", "196"},
|
|
||||||
{"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
|
|
||||||
{"Benin", "Bénin (le)", "BJ", "BEN", "204"},
|
|
||||||
{"Denmark", "Danemark (le)", "DK", "DNK", "208"},
|
|
||||||
{"Dominica", "Dominique (la)", "DM", "DMA", "212"},
|
|
||||||
{"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
|
|
||||||
{"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
|
|
||||||
{"El Salvador", "El Salvador", "SV", "SLV", "222"},
|
|
||||||
{"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
|
|
||||||
{"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
|
|
||||||
{"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
|
|
||||||
{"Estonia", "Estonie (l')", "EE", "EST", "233"},
|
|
||||||
{"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
|
|
||||||
{"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
|
|
||||||
{"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
|
|
||||||
{"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
|
|
||||||
{"Finland", "Finlande (la)", "FI", "FIN", "246"},
|
|
||||||
{"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
|
|
||||||
{"France", "France (la)", "FR", "FRA", "250"},
|
|
||||||
{"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
|
|
||||||
{"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
|
|
||||||
{"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
|
|
||||||
{"Djibouti", "Djibouti", "DJ", "DJI", "262"},
|
|
||||||
{"Gabon", "Gabon (le)", "GA", "GAB", "266"},
|
|
||||||
{"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
|
|
||||||
{"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
|
|
||||||
{"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
|
|
||||||
{"Germany", "Allemagne (l')", "DE", "DEU", "276"},
|
|
||||||
{"Ghana", "Ghana (le)", "GH", "GHA", "288"},
|
|
||||||
{"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
|
|
||||||
{"Kiribati", "Kiribati", "KI", "KIR", "296"},
|
|
||||||
{"Greece", "Grèce (la)", "GR", "GRC", "300"},
|
|
||||||
{"Greenland", "Groenland (le)", "GL", "GRL", "304"},
|
|
||||||
{"Grenada", "Grenade (la)", "GD", "GRD", "308"},
|
|
||||||
{"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
|
|
||||||
{"Guam", "Guam", "GU", "GUM", "316"},
|
|
||||||
{"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
|
|
||||||
{"Guinea", "Guinée (la)", "GN", "GIN", "324"},
|
|
||||||
{"Guyana", "Guyana (le)", "GY", "GUY", "328"},
|
|
||||||
{"Haiti", "Haïti", "HT", "HTI", "332"},
|
|
||||||
{"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
|
|
||||||
{"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
|
|
||||||
{"Honduras", "Honduras (le)", "HN", "HND", "340"},
|
|
||||||
{"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
|
|
||||||
{"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
|
|
||||||
{"Iceland", "Islande (l')", "IS", "ISL", "352"},
|
|
||||||
{"India", "Inde (l')", "IN", "IND", "356"},
|
|
||||||
{"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
|
|
||||||
{"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
|
|
||||||
{"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
|
|
||||||
{"Ireland", "Irlande (l')", "IE", "IRL", "372"},
|
|
||||||
{"Israel", "Israël", "IL", "ISR", "376"},
|
|
||||||
{"Italy", "Italie (l')", "IT", "ITA", "380"},
|
|
||||||
{"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
|
|
||||||
{"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
|
|
||||||
{"Japan", "Japon (le)", "JP", "JPN", "392"},
|
|
||||||
{"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
|
|
||||||
{"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
|
|
||||||
{"Kenya", "Kenya (le)", "KE", "KEN", "404"},
|
|
||||||
{"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
|
|
||||||
{"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
|
|
||||||
{"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
|
|
||||||
{"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
|
|
||||||
{"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
|
|
||||||
{"Lebanon", "Liban (le)", "LB", "LBN", "422"},
|
|
||||||
{"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
|
|
||||||
{"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
|
|
||||||
{"Liberia", "Libéria (le)", "LR", "LBR", "430"},
|
|
||||||
{"Libya", "Libye (la)", "LY", "LBY", "434"},
|
|
||||||
{"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
|
|
||||||
{"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
|
|
||||||
{"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
|
|
||||||
{"Macao", "Macao", "MO", "MAC", "446"},
|
|
||||||
{"Madagascar", "Madagascar", "MG", "MDG", "450"},
|
|
||||||
{"Malawi", "Malawi (le)", "MW", "MWI", "454"},
|
|
||||||
{"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
|
|
||||||
{"Maldives", "Maldives (les)", "MV", "MDV", "462"},
|
|
||||||
{"Mali", "Mali (le)", "ML", "MLI", "466"},
|
|
||||||
{"Malta", "Malte", "MT", "MLT", "470"},
|
|
||||||
{"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
|
|
||||||
{"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
|
|
||||||
{"Mauritius", "Maurice", "MU", "MUS", "480"},
|
|
||||||
{"Mexico", "Mexique (le)", "MX", "MEX", "484"},
|
|
||||||
{"Monaco", "Monaco", "MC", "MCO", "492"},
|
|
||||||
{"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
|
|
||||||
{"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
|
|
||||||
{"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
|
|
||||||
{"Montserrat", "Montserrat", "MS", "MSR", "500"},
|
|
||||||
{"Morocco", "Maroc (le)", "MA", "MAR", "504"},
|
|
||||||
{"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
|
|
||||||
{"Oman", "Oman", "OM", "OMN", "512"},
|
|
||||||
{"Namibia", "Namibie (la)", "NA", "NAM", "516"},
|
|
||||||
{"Nauru", "Nauru", "NR", "NRU", "520"},
|
|
||||||
{"Nepal", "Népal (le)", "NP", "NPL", "524"},
|
|
||||||
{"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
|
|
||||||
{"Curaçao", "Curaçao", "CW", "CUW", "531"},
|
|
||||||
{"Aruba", "Aruba", "AW", "ABW", "533"},
|
|
||||||
{"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
|
|
||||||
{"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
|
|
||||||
{"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
|
|
||||||
{"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
|
|
||||||
{"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
|
|
||||||
{"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
|
|
||||||
{"Niger (the)", "Niger (le)", "NE", "NER", "562"},
|
|
||||||
{"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
|
|
||||||
{"Niue", "Niue", "NU", "NIU", "570"},
|
|
||||||
{"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
|
|
||||||
{"Norway", "Norvège (la)", "NO", "NOR", "578"},
|
|
||||||
{"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
|
|
||||||
{"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
|
|
||||||
{"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
|
|
||||||
{"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
|
|
||||||
{"Palau", "Palaos (les)", "PW", "PLW", "585"},
|
|
||||||
{"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
|
|
||||||
{"Panama", "Panama (le)", "PA", "PAN", "591"},
|
|
||||||
{"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
|
|
||||||
{"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
|
|
||||||
{"Peru", "Pérou (le)", "PE", "PER", "604"},
|
|
||||||
{"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
|
|
||||||
{"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
|
|
||||||
{"Poland", "Pologne (la)", "PL", "POL", "616"},
|
|
||||||
{"Portugal", "Portugal (le)", "PT", "PRT", "620"},
|
|
||||||
{"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
|
|
||||||
{"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
|
|
||||||
{"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
|
|
||||||
{"Qatar", "Qatar (le)", "QA", "QAT", "634"},
|
|
||||||
{"Réunion", "Réunion (La)", "RE", "REU", "638"},
|
|
||||||
{"Romania", "Roumanie (la)", "RO", "ROU", "642"},
|
|
||||||
{"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
|
|
||||||
{"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
|
|
||||||
{"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
|
|
||||||
{"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
|
|
||||||
{"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
|
|
||||||
{"Anguilla", "Anguilla", "AI", "AIA", "660"},
|
|
||||||
{"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
|
|
||||||
{"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
|
|
||||||
{"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
|
|
||||||
{"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
|
|
||||||
{"San Marino", "Saint-Marin", "SM", "SMR", "674"},
|
|
||||||
{"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
|
|
||||||
{"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
|
|
||||||
{"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
|
|
||||||
{"Serbia", "Serbie (la)", "RS", "SRB", "688"},
|
|
||||||
{"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
|
|
||||||
{"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
|
|
||||||
{"Singapore", "Singapour", "SG", "SGP", "702"},
|
|
||||||
{"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
|
|
||||||
{"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
|
|
||||||
{"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
|
|
||||||
{"Somalia", "Somalie (la)", "SO", "SOM", "706"},
|
|
||||||
{"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
|
|
||||||
{"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
|
|
||||||
{"Spain", "Espagne (l')", "ES", "ESP", "724"},
|
|
||||||
{"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
|
|
||||||
{"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
|
|
||||||
{"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
|
|
||||||
{"Suriname", "Suriname (le)", "SR", "SUR", "740"},
|
|
||||||
{"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
|
|
||||||
{"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
|
|
||||||
{"Sweden", "Suède (la)", "SE", "SWE", "752"},
|
|
||||||
{"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
|
|
||||||
{"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
|
|
||||||
{"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
|
|
||||||
{"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
|
|
||||||
{"Togo", "Togo (le)", "TG", "TGO", "768"},
|
|
||||||
{"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
|
|
||||||
{"Tonga", "Tonga (les)", "TO", "TON", "776"},
|
|
||||||
{"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
|
|
||||||
{"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
|
|
||||||
{"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
|
|
||||||
{"Turkey", "Turquie (la)", "TR", "TUR", "792"},
|
|
||||||
{"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
|
|
||||||
{"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
|
|
||||||
{"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
|
|
||||||
{"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
|
|
||||||
{"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
|
|
||||||
{"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
|
|
||||||
{"Egypt", "Égypte (l')", "EG", "EGY", "818"},
|
|
||||||
{"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
|
|
||||||
{"Guernsey", "Guernesey", "GG", "GGY", "831"},
|
|
||||||
{"Jersey", "Jersey", "JE", "JEY", "832"},
|
|
||||||
{"Isle of Man", "Île de Man", "IM", "IMN", "833"},
|
|
||||||
{"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
|
|
||||||
{"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
|
|
||||||
{"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
|
|
||||||
{"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
|
|
||||||
{"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
|
|
||||||
{"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
|
|
||||||
{"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
|
|
||||||
{"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
|
|
||||||
{"Samoa", "Samoa (le)", "WS", "WSM", "882"},
|
|
||||||
{"Yemen", "Yémen (le)", "YE", "YEM", "887"},
|
|
||||||
{"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// ISO4217List is the list of ISO currency codes
|
|
||||||
var ISO4217List = []string{
|
|
||||||
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
|
|
||||||
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
|
|
||||||
"CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
|
|
||||||
"DJF", "DKK", "DOP", "DZD",
|
|
||||||
"EGP", "ERN", "ETB", "EUR",
|
|
||||||
"FJD", "FKP",
|
|
||||||
"GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
|
|
||||||
"HKD", "HNL", "HRK", "HTG", "HUF",
|
|
||||||
"IDR", "ILS", "INR", "IQD", "IRR", "ISK",
|
|
||||||
"JMD", "JOD", "JPY",
|
|
||||||
"KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
|
|
||||||
"LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
|
|
||||||
"MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
|
|
||||||
"NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
|
|
||||||
"OMR",
|
|
||||||
"PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
|
|
||||||
"QAR",
|
|
||||||
"RON", "RSD", "RUB", "RWF",
|
|
||||||
"SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL",
|
|
||||||
"THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
|
|
||||||
"UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS",
|
|
||||||
"VEF", "VND", "VUV",
|
|
||||||
"WST",
|
|
||||||
"XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
|
|
||||||
"YER",
|
|
||||||
"ZAR", "ZMW", "ZWL",
|
|
||||||
}
|
|
||||||
|
|
||||||
// ISO693Entry stores ISO language codes
|
|
||||||
type ISO693Entry struct {
|
|
||||||
Alpha3bCode string
|
|
||||||
Alpha2Code string
|
|
||||||
English string
|
|
||||||
}
|
|
||||||
|
|
||||||
//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
|
|
||||||
var ISO693List = []ISO693Entry{
|
|
||||||
{Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
|
|
||||||
{Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
|
|
||||||
{Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
|
|
||||||
{Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
|
|
||||||
{Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
|
|
||||||
{Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
|
|
||||||
{Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
|
|
||||||
{Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
|
|
||||||
{Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
|
|
||||||
{Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
|
|
||||||
{Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
|
|
||||||
{Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
|
|
||||||
{Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
|
|
||||||
{Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
|
|
||||||
{Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
|
|
||||||
{Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
|
|
||||||
{Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
|
|
||||||
{Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
|
|
||||||
{Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
|
|
||||||
{Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
|
|
||||||
{Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
|
|
||||||
{Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
|
|
||||||
{Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
|
|
||||||
{Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
|
|
||||||
{Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
|
|
||||||
{Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
|
|
||||||
{Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
|
|
||||||
{Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
|
|
||||||
{Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
|
|
||||||
{Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
|
|
||||||
{Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
|
|
||||||
{Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
|
|
||||||
{Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
|
|
||||||
{Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
|
|
||||||
{Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
|
|
||||||
{Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
|
|
||||||
{Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
|
|
||||||
{Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
|
|
||||||
{Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
|
|
||||||
{Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
|
|
||||||
{Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
|
|
||||||
{Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
|
|
||||||
{Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
|
|
||||||
{Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
|
|
||||||
{Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
|
|
||||||
{Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
|
|
||||||
{Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
|
|
||||||
{Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
|
|
||||||
{Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
|
|
||||||
{Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
|
|
||||||
{Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
|
|
||||||
{Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
|
|
||||||
{Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
|
|
||||||
{Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
|
|
||||||
{Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
|
|
||||||
{Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
|
|
||||||
{Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
|
|
||||||
{Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
|
|
||||||
{Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
|
|
||||||
{Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
|
|
||||||
{Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
|
|
||||||
{Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
|
|
||||||
{Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
|
|
||||||
{Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
|
|
||||||
{Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
|
|
||||||
{Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
|
|
||||||
{Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
|
|
||||||
{Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
|
|
||||||
{Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
|
|
||||||
{Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
|
|
||||||
{Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
|
|
||||||
{Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
|
|
||||||
{Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
|
|
||||||
{Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
|
|
||||||
{Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
|
|
||||||
{Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
|
|
||||||
{Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
|
|
||||||
{Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
|
|
||||||
{Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
|
|
||||||
{Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
|
|
||||||
{Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
|
|
||||||
{Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
|
|
||||||
{Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
|
|
||||||
{Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
|
|
||||||
{Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
|
|
||||||
{Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
|
|
||||||
{Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
|
|
||||||
{Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
|
|
||||||
{Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
|
|
||||||
{Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
|
|
||||||
{Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
|
|
||||||
{Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
|
|
||||||
{Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
|
|
||||||
{Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
|
|
||||||
{Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
|
|
||||||
{Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
|
|
||||||
{Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
|
|
||||||
{Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
|
|
||||||
{Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
|
|
||||||
{Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
|
|
||||||
{Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
|
|
||||||
{Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
|
|
||||||
{Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
|
|
||||||
{Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
|
|
||||||
{Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
|
|
||||||
{Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
|
|
||||||
{Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
|
|
||||||
{Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
|
|
||||||
{Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
|
|
||||||
{Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
|
|
||||||
{Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
|
|
||||||
{Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
|
|
||||||
{Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
|
|
||||||
{Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
|
|
||||||
{Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
|
|
||||||
{Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
|
|
||||||
{Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
|
|
||||||
{Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
|
|
||||||
{Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
|
|
||||||
{Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
|
|
||||||
{Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
|
|
||||||
{Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
|
|
||||||
{Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
|
|
||||||
{Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
|
|
||||||
{Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
|
|
||||||
{Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
|
|
||||||
{Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
|
|
||||||
{Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
|
|
||||||
{Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
|
|
||||||
{Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
|
|
||||||
{Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
|
|
||||||
{Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
|
|
||||||
{Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
|
|
||||||
{Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
|
|
||||||
{Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
|
|
||||||
{Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
|
|
||||||
{Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
|
|
||||||
{Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
|
|
||||||
{Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
|
|
||||||
{Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
|
|
||||||
{Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
|
|
||||||
{Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
|
|
||||||
{Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
|
|
||||||
{Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
|
|
||||||
{Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
|
|
||||||
{Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
|
|
||||||
{Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
|
|
||||||
{Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
|
|
||||||
{Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
|
|
||||||
{Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
|
|
||||||
{Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
|
|
||||||
{Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
|
|
||||||
{Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
|
|
||||||
{Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
|
|
||||||
{Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
|
|
||||||
{Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
|
|
||||||
{Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
|
|
||||||
{Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
|
|
||||||
{Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
|
|
||||||
{Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
|
|
||||||
{Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
|
|
||||||
{Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
|
|
||||||
{Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
|
|
||||||
{Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
|
|
||||||
{Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
|
|
||||||
{Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
|
|
||||||
{Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
|
|
||||||
{Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
|
|
||||||
{Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
|
|
||||||
{Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
|
|
||||||
{Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
|
|
||||||
{Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
|
|
||||||
{Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
|
|
||||||
{Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
|
|
||||||
{Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
|
|
||||||
{Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
|
|
||||||
{Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
|
|
||||||
{Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
|
|
||||||
{Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
|
|
||||||
{Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
|
|
||||||
{Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
|
|
||||||
{Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
|
|
||||||
{Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
|
|
||||||
{Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
|
|
||||||
}
|
|
|
@ -1,270 +0,0 @@
|
||||||
package govalidator
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"html"
|
|
||||||
"math"
|
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Contains check if the string contains the substring.
|
|
||||||
func Contains(str, substring string) bool {
|
|
||||||
return strings.Contains(str, substring)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matches check if string matches the pattern (pattern is regular expression)
|
|
||||||
// In case of error return false
|
|
||||||
func Matches(str, pattern string) bool {
|
|
||||||
match, _ := regexp.MatchString(pattern, str)
|
|
||||||
return match
|
|
||||||
}
|
|
||||||
|
|
||||||
// LeftTrim trim characters from the left-side of the input.
|
|
||||||
// If second argument is empty, it's will be remove leading spaces.
|
|
||||||
func LeftTrim(str, chars string) string {
|
|
||||||
if chars == "" {
|
|
||||||
return strings.TrimLeftFunc(str, unicode.IsSpace)
|
|
||||||
}
|
|
||||||
r, _ := regexp.Compile("^[" + chars + "]+")
|
|
||||||
return r.ReplaceAllString(str, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// RightTrim trim characters from the right-side of the input.
|
|
||||||
// If second argument is empty, it's will be remove spaces.
|
|
||||||
func RightTrim(str, chars string) string {
|
|
||||||
if chars == "" {
|
|
||||||
return strings.TrimRightFunc(str, unicode.IsSpace)
|
|
||||||
}
|
|
||||||
r, _ := regexp.Compile("[" + chars + "]+$")
|
|
||||||
return r.ReplaceAllString(str, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim trim characters from both sides of the input.
|
|
||||||
// If second argument is empty, it's will be remove spaces.
|
|
||||||
func Trim(str, chars string) string {
|
|
||||||
return LeftTrim(RightTrim(str, chars), chars)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WhiteList remove characters that do not appear in the whitelist.
|
|
||||||
func WhiteList(str, chars string) string {
|
|
||||||
pattern := "[^" + chars + "]+"
|
|
||||||
r, _ := regexp.Compile(pattern)
|
|
||||||
return r.ReplaceAllString(str, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlackList remove characters that appear in the blacklist.
|
|
||||||
func BlackList(str, chars string) string {
|
|
||||||
pattern := "[" + chars + "]+"
|
|
||||||
r, _ := regexp.Compile(pattern)
|
|
||||||
return r.ReplaceAllString(str, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
|
|
||||||
// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
|
|
||||||
func StripLow(str string, keepNewLines bool) string {
|
|
||||||
chars := ""
|
|
||||||
if keepNewLines {
|
|
||||||
chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
|
|
||||||
} else {
|
|
||||||
chars = "\x00-\x1F\x7F"
|
|
||||||
}
|
|
||||||
return BlackList(str, chars)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplacePattern replace regular expression pattern in string
|
|
||||||
func ReplacePattern(str, pattern, replace string) string {
|
|
||||||
r, _ := regexp.Compile(pattern)
|
|
||||||
return r.ReplaceAllString(str, replace)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Escape replace <, >, & and " with HTML entities.
|
|
||||||
var Escape = html.EscapeString
|
|
||||||
|
|
||||||
func addSegment(inrune, segment []rune) []rune {
|
|
||||||
if len(segment) == 0 {
|
|
||||||
return inrune
|
|
||||||
}
|
|
||||||
if len(inrune) != 0 {
|
|
||||||
inrune = append(inrune, '_')
|
|
||||||
}
|
|
||||||
inrune = append(inrune, segment...)
|
|
||||||
return inrune
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnderscoreToCamelCase converts from underscore separated form to camel case form.
|
|
||||||
// Ex.: my_func => MyFunc
|
|
||||||
func UnderscoreToCamelCase(s string) string {
|
|
||||||
return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CamelCaseToUnderscore converts from camel case form to underscore separated form.
|
|
||||||
// Ex.: MyFunc => my_func
|
|
||||||
func CamelCaseToUnderscore(str string) string {
|
|
||||||
var output []rune
|
|
||||||
var segment []rune
|
|
||||||
for _, r := range str {
|
|
||||||
|
|
||||||
// not treat number as separate segment
|
|
||||||
if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
|
|
||||||
output = addSegment(output, segment)
|
|
||||||
segment = nil
|
|
||||||
}
|
|
||||||
segment = append(segment, unicode.ToLower(r))
|
|
||||||
}
|
|
||||||
output = addSegment(output, segment)
|
|
||||||
return string(output)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reverse return reversed string
|
|
||||||
func Reverse(s string) string {
|
|
||||||
r := []rune(s)
|
|
||||||
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
|
|
||||||
r[i], r[j] = r[j], r[i]
|
|
||||||
}
|
|
||||||
return string(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLines split string by "\n" and return array of lines
|
|
||||||
func GetLines(s string) []string {
|
|
||||||
return strings.Split(s, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLine return specified line of multiline string
|
|
||||||
func GetLine(s string, index int) (string, error) {
|
|
||||||
lines := GetLines(s)
|
|
||||||
if index < 0 || index >= len(lines) {
|
|
||||||
return "", errors.New("line index out of bounds")
|
|
||||||
}
|
|
||||||
return lines[index], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveTags remove all tags from HTML string
|
|
||||||
func RemoveTags(s string) string {
|
|
||||||
return ReplacePattern(s, "<[^>]*>", "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// SafeFileName return safe string that can be used in file names
|
|
||||||
func SafeFileName(str string) string {
|
|
||||||
name := strings.ToLower(str)
|
|
||||||
name = path.Clean(path.Base(name))
|
|
||||||
name = strings.Trim(name, " ")
|
|
||||||
separators, err := regexp.Compile(`[ &_=+:]`)
|
|
||||||
if err == nil {
|
|
||||||
name = separators.ReplaceAllString(name, "-")
|
|
||||||
}
|
|
||||||
legal, err := regexp.Compile(`[^[:alnum:]-.]`)
|
|
||||||
if err == nil {
|
|
||||||
name = legal.ReplaceAllString(name, "")
|
|
||||||
}
|
|
||||||
for strings.Contains(name, "--") {
|
|
||||||
name = strings.Replace(name, "--", "-", -1)
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// NormalizeEmail canonicalize an email address.
|
|
||||||
// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
|
|
||||||
// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
|
|
||||||
// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
|
|
||||||
// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
|
|
||||||
// normalized to @gmail.com.
|
|
||||||
func NormalizeEmail(str string) (string, error) {
|
|
||||||
if !IsEmail(str) {
|
|
||||||
return "", fmt.Errorf("%s is not an email", str)
|
|
||||||
}
|
|
||||||
parts := strings.Split(str, "@")
|
|
||||||
parts[0] = strings.ToLower(parts[0])
|
|
||||||
parts[1] = strings.ToLower(parts[1])
|
|
||||||
if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
|
|
||||||
parts[1] = "gmail.com"
|
|
||||||
parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
|
|
||||||
}
|
|
||||||
return strings.Join(parts, "@"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Truncate a string to the closest length without breaking words.
|
|
||||||
func Truncate(str string, length int, ending string) string {
|
|
||||||
var aftstr, befstr string
|
|
||||||
if len(str) > length {
|
|
||||||
words := strings.Fields(str)
|
|
||||||
before, present := 0, 0
|
|
||||||
for i := range words {
|
|
||||||
befstr = aftstr
|
|
||||||
before = present
|
|
||||||
aftstr = aftstr + words[i] + " "
|
|
||||||
present = len(aftstr)
|
|
||||||
if present > length && i != 0 {
|
|
||||||
if (length - before) < (present - length) {
|
|
||||||
return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
|
|
||||||
}
|
|
||||||
return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
// PadLeft pad left side of string if size of string is less then indicated pad length
|
|
||||||
func PadLeft(str string, padStr string, padLen int) string {
|
|
||||||
return buildPadStr(str, padStr, padLen, true, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PadRight pad right side of string if size of string is less then indicated pad length
|
|
||||||
func PadRight(str string, padStr string, padLen int) string {
|
|
||||||
return buildPadStr(str, padStr, padLen, false, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PadBoth pad sides of string if size of string is less then indicated pad length
|
|
||||||
func PadBoth(str string, padStr string, padLen int) string {
|
|
||||||
return buildPadStr(str, padStr, padLen, true, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PadString either left, right or both sides, not the padding string can be unicode and more then one
|
|
||||||
// character
|
|
||||||
func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
|
|
||||||
|
|
||||||
// When padded length is less then the current string size
|
|
||||||
if padLen < utf8.RuneCountInString(str) {
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
padLen -= utf8.RuneCountInString(str)
|
|
||||||
|
|
||||||
targetLen := padLen
|
|
||||||
|
|
||||||
targetLenLeft := targetLen
|
|
||||||
targetLenRight := targetLen
|
|
||||||
if padLeft && padRight {
|
|
||||||
targetLenLeft = padLen / 2
|
|
||||||
targetLenRight = padLen - targetLenLeft
|
|
||||||
}
|
|
||||||
|
|
||||||
strToRepeatLen := utf8.RuneCountInString(padStr)
|
|
||||||
|
|
||||||
repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
|
|
||||||
repeatedString := strings.Repeat(padStr, repeatTimes)
|
|
||||||
|
|
||||||
leftSide := ""
|
|
||||||
if padLeft {
|
|
||||||
leftSide = repeatedString[0:targetLenLeft]
|
|
||||||
}
|
|
||||||
|
|
||||||
rightSide := ""
|
|
||||||
if padRight {
|
|
||||||
rightSide = repeatedString[0:targetLenRight]
|
|
||||||
}
|
|
||||||
|
|
||||||
return leftSide + str + rightSide
|
|
||||||
}
|
|
||||||
|
|
||||||
// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
|
|
||||||
func TruncatingErrorf(str string, args ...interface{}) error {
|
|
||||||
n := strings.Count(str, "%s")
|
|
||||||
return fmt.Errorf(str, args[:n]...)
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
3525
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
3525
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
163
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
generated
vendored
163
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
generated
vendored
|
@ -1,163 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.containers.v1;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/field_mask.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers";
|
|
||||||
|
|
||||||
// Containers provides metadata storage for containers used in the execution
|
|
||||||
// service.
|
|
||||||
//
|
|
||||||
// The objects here provide an state-independent view of containers for use in
|
|
||||||
// management and resource pinning. From that perspective, containers do not
|
|
||||||
// have a "state" but rather this is the set of resources that will be
|
|
||||||
// considered in use by the container.
|
|
||||||
//
|
|
||||||
// From the perspective of the execution service, these objects represent the
|
|
||||||
// base parameters for creating a container process.
|
|
||||||
//
|
|
||||||
// In general, when looking to add fields for this type, first ask yourself
|
|
||||||
// whether or not the function of the field has to do with runtime execution or
|
|
||||||
// is invariant of the runtime state of the container. If it has to do with
|
|
||||||
// runtime, or changes as the "container" is started and stops, it probably
|
|
||||||
// doesn't belong on this object.
|
|
||||||
service Containers {
|
|
||||||
rpc Get(GetContainerRequest) returns (GetContainerResponse);
|
|
||||||
rpc List(ListContainersRequest) returns (ListContainersResponse);
|
|
||||||
rpc ListStream(ListContainersRequest) returns (stream ListContainerMessage);
|
|
||||||
rpc Create(CreateContainerRequest) returns (CreateContainerResponse);
|
|
||||||
rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse);
|
|
||||||
rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty);
|
|
||||||
}
|
|
||||||
|
|
||||||
message Container {
|
|
||||||
// ID is the user-specified identifier.
|
|
||||||
//
|
|
||||||
// This field may not be updated.
|
|
||||||
string id = 1;
|
|
||||||
|
|
||||||
// Labels provides an area to include arbitrary data on containers.
|
|
||||||
//
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
//
|
|
||||||
// Note that to add a new value to this field, read the existing set and
|
|
||||||
// include the entire result in the update call.
|
|
||||||
map<string, string> labels = 2;
|
|
||||||
|
|
||||||
// Image contains the reference of the image used to build the
|
|
||||||
// specification and snapshots for running this container.
|
|
||||||
//
|
|
||||||
// If this field is updated, the spec and rootfs needed to updated, as well.
|
|
||||||
string image = 3;
|
|
||||||
|
|
||||||
message Runtime {
|
|
||||||
// Name is the name of the runtime.
|
|
||||||
string name = 1;
|
|
||||||
// Options specify additional runtime initialization options.
|
|
||||||
google.protobuf.Any options = 2;
|
|
||||||
}
|
|
||||||
// Runtime specifies which runtime to use for executing this container.
|
|
||||||
Runtime runtime = 4;
|
|
||||||
|
|
||||||
// Spec to be used when creating the container. This is runtime specific.
|
|
||||||
google.protobuf.Any spec = 5;
|
|
||||||
|
|
||||||
// Snapshotter specifies the snapshotter name used for rootfs
|
|
||||||
string snapshotter = 6;
|
|
||||||
|
|
||||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
|
||||||
// filesystem. When starting a task from this container, a caller should
|
|
||||||
// look up the mounts from the snapshot service and include those on the
|
|
||||||
// task create request.
|
|
||||||
//
|
|
||||||
// Snapshots referenced in this field will not be garbage collected.
|
|
||||||
//
|
|
||||||
// This field is set to empty when the rootfs is not a snapshot.
|
|
||||||
//
|
|
||||||
// This field may be updated.
|
|
||||||
string snapshot_key = 7;
|
|
||||||
|
|
||||||
// CreatedAt is the time the container was first created.
|
|
||||||
google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdatedAt is the last time the container was mutated.
|
|
||||||
google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// Extensions allow clients to provide zero or more blobs that are directly
|
|
||||||
// associated with the container. One may provide protobuf, json, or other
|
|
||||||
// encoding formats. The primary use of this is to further decorate the
|
|
||||||
// container object with fields that may be specific to a client integration.
|
|
||||||
//
|
|
||||||
// The key portion of this map should identify a "name" for the extension
|
|
||||||
// that should be unique against other extensions. When updating extension
|
|
||||||
// data, one should only update the specified extension using field paths
|
|
||||||
// to select a specific map key.
|
|
||||||
map<string, google.protobuf.Any> extensions = 10 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetContainerRequest {
|
|
||||||
string id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetContainerResponse {
|
|
||||||
Container container = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListContainersRequest {
|
|
||||||
// Filters contains one or more filters using the syntax defined in the
|
|
||||||
// containerd filter package.
|
|
||||||
//
|
|
||||||
// The returned result will be those that match any of the provided
|
|
||||||
// filters. Expanded, containers that match the following will be
|
|
||||||
// returned:
|
|
||||||
//
|
|
||||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
|
||||||
//
|
|
||||||
// If filters is zero-length or nil, all items will be returned.
|
|
||||||
repeated string filters = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListContainersResponse {
|
|
||||||
repeated Container containers = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateContainerRequest {
|
|
||||||
Container container = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateContainerResponse {
|
|
||||||
Container container = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateContainerRequest updates the metadata on one or more container.
|
|
||||||
//
|
|
||||||
// The operation should follow semantics described in
|
|
||||||
// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
|
|
||||||
// unless otherwise qualified.
|
|
||||||
message UpdateContainerRequest {
|
|
||||||
// Container provides the target values, as declared by the mask, for the update.
|
|
||||||
//
|
|
||||||
// The ID field must be set.
|
|
||||||
Container container = 1 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
|
||||||
// the operation applies to all fields.
|
|
||||||
google.protobuf.FieldMask update_mask = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateContainerResponse {
|
|
||||||
Container container = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteContainerRequest {
|
|
||||||
string id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListContainerMessage {
|
|
||||||
Container container = 1;
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,62 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.diff.v1;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/diff/v1;diff";
|
|
||||||
|
|
||||||
// Diff service creates and applies diffs
|
|
||||||
service Diff {
|
|
||||||
// Apply applies the content associated with the provided digests onto
|
|
||||||
// the provided mounts. Archive content will be extracted and
|
|
||||||
// decompressed if necessary.
|
|
||||||
rpc Apply(ApplyRequest) returns (ApplyResponse);
|
|
||||||
|
|
||||||
// Diff creates a diff between the given mounts and uploads the result
|
|
||||||
// to the content store.
|
|
||||||
rpc Diff(DiffRequest) returns (DiffResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
message ApplyRequest {
|
|
||||||
// Diff is the descriptor of the diff to be extracted
|
|
||||||
containerd.types.Descriptor diff = 1;
|
|
||||||
|
|
||||||
repeated containerd.types.Mount mounts = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ApplyResponse {
|
|
||||||
// Applied is the descriptor for the object which was applied.
|
|
||||||
// If the input was a compressed blob then the result will be
|
|
||||||
// the descriptor for the uncompressed blob.
|
|
||||||
containerd.types.Descriptor applied = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DiffRequest {
|
|
||||||
// Left are the mounts which represent the older copy
|
|
||||||
// in which is the base of the computed changes.
|
|
||||||
repeated containerd.types.Mount left = 1;
|
|
||||||
|
|
||||||
// Right are the mounts which represents the newer copy
|
|
||||||
// in which changes from the left were made into.
|
|
||||||
repeated containerd.types.Mount right = 2;
|
|
||||||
|
|
||||||
// MediaType is the media type descriptor for the created diff
|
|
||||||
// object
|
|
||||||
string media_type = 3;
|
|
||||||
|
|
||||||
// Ref identifies the pre-commit content store object. This
|
|
||||||
// reference can be used to get the status from the content store.
|
|
||||||
string ref = 4;
|
|
||||||
|
|
||||||
// Labels are the labels to apply to the generated content
|
|
||||||
// on content store commit.
|
|
||||||
map<string, string> labels = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DiffResponse {
|
|
||||||
// Diff is the descriptor of the diff which can be applied
|
|
||||||
containerd.types.Descriptor diff = 3;
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package events defines the event pushing and subscription service.
|
|
||||||
package events
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,56 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.events.v1;
|
|
||||||
|
|
||||||
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
|
||||||
|
|
||||||
service Events {
|
|
||||||
// Publish an event to a topic.
|
|
||||||
//
|
|
||||||
// The event will be packed into a timestamp envelope with the namespace
|
|
||||||
// introspected from the context. The envelope will then be dispatched.
|
|
||||||
rpc Publish(PublishRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
// Forward sends an event that has already been packaged into an envelope
|
|
||||||
// with a timestamp and namespace.
|
|
||||||
//
|
|
||||||
// This is useful if earlier timestamping is required or when forwarding on
|
|
||||||
// behalf of another component, namespace or publisher.
|
|
||||||
rpc Forward(ForwardRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
// Subscribe to a stream of events, possibly returning only that match any
|
|
||||||
// of the provided filters.
|
|
||||||
//
|
|
||||||
// Unlike many other methods in containerd, subscribers will get messages
|
|
||||||
// from all namespaces unless otherwise specified. If this is not desired,
|
|
||||||
// a filter can be provided in the format 'namespace==<namespace>' to
|
|
||||||
// restrict the received events.
|
|
||||||
rpc Subscribe(SubscribeRequest) returns (stream Envelope);
|
|
||||||
}
|
|
||||||
|
|
||||||
message PublishRequest {
|
|
||||||
string topic = 1;
|
|
||||||
google.protobuf.Any event = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ForwardRequest {
|
|
||||||
Envelope envelope = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message SubscribeRequest {
|
|
||||||
repeated string filters = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Envelope {
|
|
||||||
option (containerd.plugin.fieldpath) = true;
|
|
||||||
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
string namespace = 2;
|
|
||||||
string topic = 3;
|
|
||||||
google.protobuf.Any event = 4;
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package images
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,124 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.images.v1;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/field_mask.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/images/v1;images";
|
|
||||||
|
|
||||||
// Images is a service that allows one to register images with containerd.
|
|
||||||
//
|
|
||||||
// In containerd, an image is merely the mapping of a name to a content root,
|
|
||||||
// described by a descriptor. The behavior and state of image is purely
|
|
||||||
// dictated by the type of the descriptor.
|
|
||||||
//
|
|
||||||
// From the perspective of this service, these references are mostly shallow,
|
|
||||||
// in that the existence of the required content won't be validated until
|
|
||||||
// required by consuming services.
|
|
||||||
//
|
|
||||||
// As such, this can really be considered a "metadata service".
|
|
||||||
service Images {
|
|
||||||
// Get returns an image by name.
|
|
||||||
rpc Get(GetImageRequest) returns (GetImageResponse);
|
|
||||||
|
|
||||||
// List returns a list of all images known to containerd.
|
|
||||||
rpc List(ListImagesRequest) returns (ListImagesResponse);
|
|
||||||
|
|
||||||
// Create an image record in the metadata store.
|
|
||||||
//
|
|
||||||
// The name of the image must be unique.
|
|
||||||
rpc Create(CreateImageRequest) returns (CreateImageResponse);
|
|
||||||
|
|
||||||
// Update assigns the name to a given target image based on the provided
|
|
||||||
// image.
|
|
||||||
rpc Update(UpdateImageRequest) returns (UpdateImageResponse);
|
|
||||||
|
|
||||||
// Delete deletes the image by name.
|
|
||||||
rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty);
|
|
||||||
}
|
|
||||||
|
|
||||||
message Image {
|
|
||||||
// Name provides a unique name for the image.
|
|
||||||
//
|
|
||||||
// Containerd treats this as the primary identifier.
|
|
||||||
string name = 1;
|
|
||||||
|
|
||||||
// Labels provides free form labels for the image. These are runtime only
|
|
||||||
// and do not get inherited into the package image in any way.
|
|
||||||
//
|
|
||||||
// Labels may be updated using the field mask.
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
map<string, string> labels = 2;
|
|
||||||
|
|
||||||
// Target describes the content entry point of the image.
|
|
||||||
containerd.types.Descriptor target = 3 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// CreatedAt is the time the image was first created.
|
|
||||||
google.protobuf.Timestamp created_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdatedAt is the last time the image was mutated.
|
|
||||||
google.protobuf.Timestamp updated_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetImageRequest {
|
|
||||||
string name = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetImageResponse {
|
|
||||||
Image image = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateImageRequest {
|
|
||||||
Image image = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateImageResponse {
|
|
||||||
Image image = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateImageRequest {
|
|
||||||
// Image provides a full or partial image for update.
|
|
||||||
//
|
|
||||||
// The name field must be set or an error will be returned.
|
|
||||||
Image image = 1 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
|
||||||
// the operation applies to all fields.
|
|
||||||
google.protobuf.FieldMask update_mask = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateImageResponse {
|
|
||||||
Image image = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListImagesRequest {
|
|
||||||
// Filters contains one or more filters using the syntax defined in the
|
|
||||||
// containerd filter package.
|
|
||||||
//
|
|
||||||
// The returned result will be those that match any of the provided
|
|
||||||
// filters. Expanded, images that match the following will be
|
|
||||||
// returned:
|
|
||||||
//
|
|
||||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
|
||||||
//
|
|
||||||
// If filters is zero-length or nil, all items will be returned.
|
|
||||||
repeated string filters = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListImagesResponse {
|
|
||||||
repeated Image images = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteImageRequest {
|
|
||||||
string name = 1;
|
|
||||||
|
|
||||||
// Sync indicates that the delete and cleanup should be done
|
|
||||||
// synchronously before returning to the caller
|
|
||||||
//
|
|
||||||
// Default is false
|
|
||||||
bool sync = 2;
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package introspection
|
|
1318
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
1318
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
|
@ -1,81 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.introspection.v1;
|
|
||||||
|
|
||||||
import "github.com/containerd/containerd/api/types/platform.proto";
|
|
||||||
import "google/rpc/status.proto";
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
|
|
||||||
|
|
||||||
service Introspection {
|
|
||||||
// Plugins returns a list of plugins in containerd.
|
|
||||||
//
|
|
||||||
// Clients can use this to detect features and capabilities when using
|
|
||||||
// containerd.
|
|
||||||
rpc Plugins(PluginsRequest) returns (PluginsResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
message Plugin {
|
|
||||||
// Type defines the type of plugin.
|
|
||||||
//
|
|
||||||
// See package plugin for a list of possible values. Non core plugins may
|
|
||||||
// define their own values during registration.
|
|
||||||
string type = 1;
|
|
||||||
|
|
||||||
// ID identifies the plugin uniquely in the system.
|
|
||||||
string id = 2;
|
|
||||||
|
|
||||||
// Requires lists the plugin types required by this plugin.
|
|
||||||
repeated string requires = 3;
|
|
||||||
|
|
||||||
// Platforms enumerates the platforms this plugin will support.
|
|
||||||
//
|
|
||||||
// If values are provided here, the plugin will only be operable under the
|
|
||||||
// provided platforms.
|
|
||||||
//
|
|
||||||
// If this is empty, the plugin will work across all platforms.
|
|
||||||
//
|
|
||||||
// If the plugin prefers certain platforms over others, they should be
|
|
||||||
// listed from most to least preferred.
|
|
||||||
repeated types.Platform platforms = 4 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// Exports allows plugins to provide values about state or configuration to
|
|
||||||
// interested parties.
|
|
||||||
//
|
|
||||||
// One example is exposing the configured path of a snapshotter plugin.
|
|
||||||
map<string, string> exports = 5;
|
|
||||||
|
|
||||||
// Capabilities allows plugins to communicate feature switches to allow
|
|
||||||
// clients to detect features that may not be on be default or may be
|
|
||||||
// different from version to version.
|
|
||||||
//
|
|
||||||
// Use this sparingly.
|
|
||||||
repeated string capabilities = 6;
|
|
||||||
|
|
||||||
// InitErr will be set if the plugin fails initialization.
|
|
||||||
//
|
|
||||||
// This means the plugin may have been registered but a non-terminal error
|
|
||||||
// was encountered during initialization.
|
|
||||||
//
|
|
||||||
// Plugins that have this value set cannot be used.
|
|
||||||
google.rpc.Status init_err = 7;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PluginsRequest {
|
|
||||||
// Filters contains one or more filters using the syntax defined in the
|
|
||||||
// containerd filter package.
|
|
||||||
//
|
|
||||||
// The returned result will be those that match any of the provided
|
|
||||||
// filters. Expanded, plugins that match the following will be
|
|
||||||
// returned:
|
|
||||||
//
|
|
||||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
|
||||||
//
|
|
||||||
// If filters is zero-length or nil, all items will be returned.
|
|
||||||
repeated string filters = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PluginsResponse {
|
|
||||||
repeated Plugin plugins = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package leases
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,64 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.leases.v1;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/leases/v1;leases";
|
|
||||||
|
|
||||||
// Leases service manages resources leases within the metadata store.
|
|
||||||
service Leases {
|
|
||||||
// Create creates a new lease for managing changes to metadata. A lease
|
|
||||||
// can be used to protect objects from being removed.
|
|
||||||
rpc Create(CreateRequest) returns (CreateResponse);
|
|
||||||
|
|
||||||
// Delete deletes the lease and makes any unreferenced objects created
|
|
||||||
// during the lease eligible for garbage collection if not referenced
|
|
||||||
// or retained by other resources during the lease.
|
|
||||||
rpc Delete(DeleteRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
// List lists all active leases, returning the full list of
|
|
||||||
// leases and optionally including the referenced resources.
|
|
||||||
rpc List(ListRequest) returns (ListResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lease is an object which retains resources while it exists.
|
|
||||||
message Lease {
|
|
||||||
string id = 1;
|
|
||||||
|
|
||||||
google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
map<string, string> labels = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateRequest {
|
|
||||||
// ID is used to identity the lease, when the id is not set the service
|
|
||||||
// generates a random identifier for the lease.
|
|
||||||
string id = 1;
|
|
||||||
|
|
||||||
map<string, string> labels = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateResponse {
|
|
||||||
Lease lease = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteRequest {
|
|
||||||
string id = 1;
|
|
||||||
|
|
||||||
// Sync indicates that the delete and cleanup should be done
|
|
||||||
// synchronously before returning to the caller
|
|
||||||
//
|
|
||||||
// Default is false
|
|
||||||
bool sync = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListRequest {
|
|
||||||
repeated string filters = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListResponse {
|
|
||||||
repeated Lease leases = 1;
|
|
||||||
}
|
|
2471
vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
generated
vendored
2471
vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
92
vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
generated
vendored
92
vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
generated
vendored
|
@ -1,92 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.namespaces.v1;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/field_mask.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/namespaces/v1;namespaces";
|
|
||||||
|
|
||||||
// Namespaces provides the ability to manipulate containerd namespaces.
|
|
||||||
//
|
|
||||||
// All objects in the system are required to be a member of a namespace. If a
|
|
||||||
// namespace is deleted, all objects, including containers, images and
|
|
||||||
// snapshots, will be deleted, as well.
|
|
||||||
//
|
|
||||||
// Unless otherwise noted, operations in containerd apply only to the namespace
|
|
||||||
// supplied per request.
|
|
||||||
//
|
|
||||||
// I hope this goes without saying, but namespaces are themselves NOT
|
|
||||||
// namespaced.
|
|
||||||
service Namespaces {
|
|
||||||
rpc Get(GetNamespaceRequest) returns (GetNamespaceResponse);
|
|
||||||
rpc List(ListNamespacesRequest) returns (ListNamespacesResponse);
|
|
||||||
rpc Create(CreateNamespaceRequest) returns (CreateNamespaceResponse);
|
|
||||||
rpc Update(UpdateNamespaceRequest) returns (UpdateNamespaceResponse);
|
|
||||||
rpc Delete(DeleteNamespaceRequest) returns (google.protobuf.Empty);
|
|
||||||
}
|
|
||||||
|
|
||||||
message Namespace {
|
|
||||||
string name = 1;
|
|
||||||
|
|
||||||
// Labels provides an area to include arbitrary data on namespaces.
|
|
||||||
//
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
//
|
|
||||||
// Note that to add a new value to this field, read the existing set and
|
|
||||||
// include the entire result in the update call.
|
|
||||||
map<string, string> labels = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetNamespaceRequest {
|
|
||||||
string name = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetNamespaceResponse {
|
|
||||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListNamespacesRequest {
|
|
||||||
string filter = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListNamespacesResponse {
|
|
||||||
repeated Namespace namespaces = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateNamespaceRequest {
|
|
||||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateNamespaceResponse {
|
|
||||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateNamespaceRequest updates the metadata for a namespace.
|
|
||||||
//
|
|
||||||
// The operation should follow semantics described in
|
|
||||||
// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
|
|
||||||
// unless otherwise qualified.
|
|
||||||
message UpdateNamespaceRequest {
|
|
||||||
// Namespace provides the target value, as declared by the mask, for the update.
|
|
||||||
//
|
|
||||||
// The namespace field must be set.
|
|
||||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
|
||||||
// the operation applies to all fields.
|
|
||||||
//
|
|
||||||
// For the most part, this applies only to selectively updating labels on
|
|
||||||
// the namespace. While field masks are typically limited to ascii alphas
|
|
||||||
// and digits, we just take everything after the "labels." as the map key.
|
|
||||||
google.protobuf.FieldMask update_mask = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateNamespaceResponse {
|
|
||||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteNamespaceRequest {
|
|
||||||
string name = 1;
|
|
||||||
}
|
|
5139
vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
generated
vendored
5139
vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
150
vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
generated
vendored
150
vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
generated
vendored
|
@ -1,150 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.snapshots.v1;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/field_mask.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots";
|
|
||||||
|
|
||||||
// Snapshot service manages snapshots
|
|
||||||
service Snapshots {
|
|
||||||
rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);
|
|
||||||
rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);
|
|
||||||
rpc Mounts(MountsRequest) returns (MountsResponse);
|
|
||||||
rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);
|
|
||||||
rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);
|
|
||||||
rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);
|
|
||||||
rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);
|
|
||||||
rpc Usage(UsageRequest) returns (UsageResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
message PrepareSnapshotRequest {
|
|
||||||
string snapshotter = 1;
|
|
||||||
string key = 2;
|
|
||||||
string parent = 3;
|
|
||||||
|
|
||||||
// Labels are arbitrary data on snapshots.
|
|
||||||
//
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
map<string, string> labels = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PrepareSnapshotResponse {
|
|
||||||
repeated containerd.types.Mount mounts = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ViewSnapshotRequest {
|
|
||||||
string snapshotter = 1;
|
|
||||||
string key = 2;
|
|
||||||
string parent = 3;
|
|
||||||
|
|
||||||
// Labels are arbitrary data on snapshots.
|
|
||||||
//
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
map<string, string> labels = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ViewSnapshotResponse {
|
|
||||||
repeated containerd.types.Mount mounts = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MountsRequest {
|
|
||||||
string snapshotter = 1;
|
|
||||||
string key = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MountsResponse {
|
|
||||||
repeated containerd.types.Mount mounts = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message RemoveSnapshotRequest {
|
|
||||||
string snapshotter = 1;
|
|
||||||
string key = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CommitSnapshotRequest {
|
|
||||||
string snapshotter = 1;
|
|
||||||
string name = 2;
|
|
||||||
string key = 3;
|
|
||||||
|
|
||||||
// Labels are arbitrary data on snapshots.
|
|
||||||
//
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
map<string, string> labels = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatSnapshotRequest {
|
|
||||||
string snapshotter = 1;
|
|
||||||
string key = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum Kind {
|
|
||||||
option (gogoproto.goproto_enum_prefix) = false;
|
|
||||||
option (gogoproto.enum_customname) = "Kind";
|
|
||||||
|
|
||||||
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"];
|
|
||||||
VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"];
|
|
||||||
ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"];
|
|
||||||
COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"];
|
|
||||||
}
|
|
||||||
|
|
||||||
message Info {
|
|
||||||
string name = 1;
|
|
||||||
string parent = 2;
|
|
||||||
Kind kind = 3;
|
|
||||||
|
|
||||||
// CreatedAt provides the time at which the snapshot was created.
|
|
||||||
google.protobuf.Timestamp created_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdatedAt provides the time the info was last updated.
|
|
||||||
google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// Labels are arbitrary data on snapshots.
|
|
||||||
//
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
map<string, string> labels = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatSnapshotResponse {
|
|
||||||
Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateSnapshotRequest {
|
|
||||||
string snapshotter = 1;
|
|
||||||
Info info = 2 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
|
||||||
// the operation applies to all fields.
|
|
||||||
//
|
|
||||||
// In info, Name, Parent, Kind, Created are immutable,
|
|
||||||
// other field may be updated using this mask.
|
|
||||||
// If no mask is provided, all mutable field are updated.
|
|
||||||
google.protobuf.FieldMask update_mask = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateSnapshotResponse {
|
|
||||||
Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListSnapshotsRequest{
|
|
||||||
string snapshotter = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListSnapshotsResponse {
|
|
||||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message UsageRequest {
|
|
||||||
string snapshotter = 1;
|
|
||||||
string key = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UsageResponse {
|
|
||||||
int64 size = 1;
|
|
||||||
int64 inodes = 2;
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,209 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.tasks.v1;
|
|
||||||
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/mount.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/metrics.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
|
||||||
import "github.com/containerd/containerd/api/types/task/task.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks";
|
|
||||||
|
|
||||||
service Tasks {
|
|
||||||
// Create a task.
|
|
||||||
rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
|
|
||||||
|
|
||||||
// Start a process.
|
|
||||||
rpc Start(StartRequest) returns (StartResponse);
|
|
||||||
|
|
||||||
// Delete a task and on disk state.
|
|
||||||
rpc Delete(DeleteTaskRequest) returns (DeleteResponse);
|
|
||||||
|
|
||||||
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
|
|
||||||
|
|
||||||
rpc Get(GetRequest) returns (GetResponse);
|
|
||||||
|
|
||||||
rpc List(ListTasksRequest) returns (ListTasksResponse);
|
|
||||||
|
|
||||||
// Kill a task or process.
|
|
||||||
rpc Kill(KillRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
rpc ListPids(ListPidsRequest) returns (ListPidsResponse);
|
|
||||||
|
|
||||||
rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);
|
|
||||||
|
|
||||||
rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
rpc Metrics(MetricsRequest) returns (MetricsResponse);
|
|
||||||
|
|
||||||
rpc Wait(WaitRequest) returns (WaitResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateTaskRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
|
|
||||||
// RootFS provides the pre-chroot mounts to perform in the shim before
|
|
||||||
// executing the container task.
|
|
||||||
//
|
|
||||||
// These are for mounts that cannot be performed in the user namespace.
|
|
||||||
// Typically, these mounts should be resolved from snapshots specified on
|
|
||||||
// the container object.
|
|
||||||
repeated containerd.types.Mount rootfs = 3;
|
|
||||||
|
|
||||||
string stdin = 4;
|
|
||||||
string stdout = 5;
|
|
||||||
string stderr = 6;
|
|
||||||
bool terminal = 7;
|
|
||||||
|
|
||||||
containerd.types.Descriptor checkpoint = 8;
|
|
||||||
|
|
||||||
google.protobuf.Any options = 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CreateTaskResponse {
|
|
||||||
string container_id = 1;
|
|
||||||
uint32 pid = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StartRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string exec_id = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StartResponse {
|
|
||||||
uint32 pid = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteTaskRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteResponse {
|
|
||||||
string id = 1;
|
|
||||||
uint32 pid = 2;
|
|
||||||
uint32 exit_status = 3;
|
|
||||||
google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteProcessRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string exec_id = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string exec_id = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetResponse {
|
|
||||||
containerd.v1.types.Process process = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListTasksRequest {
|
|
||||||
string filter = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListTasksResponse {
|
|
||||||
repeated containerd.v1.types.Process tasks = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message KillRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string exec_id = 2;
|
|
||||||
uint32 signal = 3;
|
|
||||||
bool all = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExecProcessRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string stdin = 2;
|
|
||||||
string stdout = 3;
|
|
||||||
string stderr = 4;
|
|
||||||
bool terminal = 5;
|
|
||||||
// Spec for starting a process in the target container.
|
|
||||||
//
|
|
||||||
// For runc, this is a process spec, for example.
|
|
||||||
google.protobuf.Any spec = 6;
|
|
||||||
// id of the exec process
|
|
||||||
string exec_id = 7;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExecProcessResponse {
|
|
||||||
}
|
|
||||||
|
|
||||||
message ResizePtyRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string exec_id = 2;
|
|
||||||
uint32 width = 3;
|
|
||||||
uint32 height = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CloseIORequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string exec_id = 2;
|
|
||||||
bool stdin = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PauseTaskRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ResumeTaskRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListPidsRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListPidsResponse {
|
|
||||||
// Processes includes the process ID and additional process information
|
|
||||||
repeated containerd.v1.types.ProcessInfo processes = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CheckpointTaskRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
google.protobuf.Any options = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message CheckpointTaskResponse {
|
|
||||||
repeated containerd.types.Descriptor descriptors = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateTaskRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
google.protobuf.Any resources = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MetricsRequest {
|
|
||||||
repeated string filters = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MetricsResponse {
|
|
||||||
repeated types.Metric metrics = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message WaitRequest {
|
|
||||||
string container_id = 1;
|
|
||||||
string exec_id = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message WaitResponse {
|
|
||||||
uint32 exit_status = 1;
|
|
||||||
google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
|
491
vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
generated
vendored
491
vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
generated
vendored
|
@ -1,491 +0,0 @@
|
||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
|
||||||
// source: github.com/containerd/containerd/api/services/version/v1/version.proto
|
|
||||||
|
|
||||||
package version
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/gogo/protobuf/proto"
|
|
||||||
types "github.com/gogo/protobuf/types"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
io "io"
|
|
||||||
math "math"
|
|
||||||
reflect "reflect"
|
|
||||||
strings "strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
type VersionResponse struct {
|
|
||||||
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
|
||||||
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *VersionResponse) Reset() { *m = VersionResponse{} }
|
|
||||||
func (*VersionResponse) ProtoMessage() {}
|
|
||||||
func (*VersionResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_128109001e578ffe, []int{0}
|
|
||||||
}
|
|
||||||
func (m *VersionResponse) XXX_Unmarshal(b []byte) error {
|
|
||||||
return m.Unmarshal(b)
|
|
||||||
}
|
|
||||||
func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
if deterministic {
|
|
||||||
return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic)
|
|
||||||
} else {
|
|
||||||
b = b[:cap(b)]
|
|
||||||
n, err := m.MarshalTo(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (m *VersionResponse) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_VersionResponse.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *VersionResponse) XXX_Size() int {
|
|
||||||
return m.Size()
|
|
||||||
}
|
|
||||||
func (m *VersionResponse) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_VersionResponse.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_VersionResponse proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptor_128109001e578ffe)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_128109001e578ffe = []byte{
|
|
||||||
// 243 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
|
||||||
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
|
|
||||||
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
|
|
||||||
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
|
|
||||||
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
|
|
||||||
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
|
|
||||||
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
|
|
||||||
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
|
|
||||||
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
|
|
||||||
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
|
|
||||||
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
|
||||||
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
|
|
||||||
0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
|
|
||||||
0x01, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConn
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion4
|
|
||||||
|
|
||||||
// VersionClient is the client API for Version service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type VersionClient interface {
|
|
||||||
Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type versionClient struct {
|
|
||||||
cc *grpc.ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewVersionClient(cc *grpc.ClientConn) VersionClient {
|
|
||||||
return &versionClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *versionClient) Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
|
||||||
out := new(VersionResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VersionServer is the server API for Version service.
|
|
||||||
type VersionServer interface {
|
|
||||||
Version(context.Context, *types.Empty) (*VersionResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
|
|
||||||
s.RegisterService(&_Version_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(types.Empty)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(VersionServer).Version(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/containerd.services.version.v1.Version/Version",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(VersionServer).Version(ctx, req.(*types.Empty))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _Version_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "containerd.services.version.v1.Version",
|
|
||||||
HandlerType: (*VersionServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{
|
|
||||||
{
|
|
||||||
MethodName: "Version",
|
|
||||||
Handler: _Version_Version_Handler,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Streams: []grpc.StreamDesc{},
|
|
||||||
Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
|
|
||||||
size := m.Size()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalTo(dAtA)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) {
|
|
||||||
var i int
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if len(m.Version) > 0 {
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
i++
|
|
||||||
i = encodeVarintVersion(dAtA, i, uint64(len(m.Version)))
|
|
||||||
i += copy(dAtA[i:], m.Version)
|
|
||||||
}
|
|
||||||
if len(m.Revision) > 0 {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision)))
|
|
||||||
i += copy(dAtA[i:], m.Revision)
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeVarintVersion(dAtA []byte, offset int, v uint64) int {
|
|
||||||
for v >= 1<<7 {
|
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
|
||||||
v >>= 7
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
dAtA[offset] = uint8(v)
|
|
||||||
return offset + 1
|
|
||||||
}
|
|
||||||
func (m *VersionResponse) Size() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
l = len(m.Version)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovVersion(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Revision)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovVersion(uint64(l))
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
n += len(m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func sovVersion(x uint64) (n int) {
|
|
||||||
for {
|
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
func sozVersion(x uint64) (n int) {
|
|
||||||
return sovVersion(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
||||||
}
|
|
||||||
func (this *VersionResponse) String() string {
|
|
||||||
if this == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
s := strings.Join([]string{`&VersionResponse{`,
|
|
||||||
`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
|
|
||||||
`Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
|
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
|
||||||
`}`,
|
|
||||||
}, "")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func valueToStringVersion(v interface{}) string {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.IsNil() {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
pv := reflect.Indirect(rv).Interface()
|
|
||||||
return fmt.Sprintf("*%v", pv)
|
|
||||||
}
|
|
||||||
func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowVersion
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: VersionResponse: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: VersionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowVersion
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Version = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowVersion
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Revision = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skipVersion(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if skippy < 0 {
|
|
||||||
return ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func skipVersion(dAtA []byte) (n int, err error) {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowVersion
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
switch wireType {
|
|
||||||
case 0:
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowVersion
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
iNdEx++
|
|
||||||
if dAtA[iNdEx-1] < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 1:
|
|
||||||
iNdEx += 8
|
|
||||||
return iNdEx, nil
|
|
||||||
case 2:
|
|
||||||
var length int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowVersion
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
length |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if length < 0 {
|
|
||||||
return 0, ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
iNdEx += length
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 3:
|
|
||||||
for {
|
|
||||||
var innerWire uint64
|
|
||||||
var start int = iNdEx
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowVersion
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
innerWireType := int(innerWire & 0x7)
|
|
||||||
if innerWireType == 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
next, err := skipVersion(dAtA[start:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
iNdEx = start + next
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthVersion
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 4:
|
|
||||||
return iNdEx, nil
|
|
||||||
case 5:
|
|
||||||
iNdEx += 4
|
|
||||||
return iNdEx, nil
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
|
|
||||||
ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow")
|
|
||||||
)
|
|
|
@ -1,18 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.version.v1;
|
|
||||||
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
|
|
||||||
// TODO(stevvooe): Should version service actually be versioned?
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/version/v1;version";
|
|
||||||
|
|
||||||
service Version {
|
|
||||||
rpc Version(google.protobuf.Empty) returns (VersionResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
message VersionResponse {
|
|
||||||
string version = 1;
|
|
||||||
string revision = 2;
|
|
||||||
}
|
|
|
@ -1,621 +0,0 @@
|
||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
|
||||||
// source: github.com/containerd/containerd/api/types/descriptor.proto
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/gogo/protobuf/proto"
|
|
||||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
|
||||||
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
|
||||||
io "io"
|
|
||||||
math "math"
|
|
||||||
reflect "reflect"
|
|
||||||
strings "strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
// Descriptor describes a blob in a content store.
|
|
||||||
//
|
|
||||||
// This descriptor can be used to reference content from an
|
|
||||||
// oci descriptor found in a manifest.
|
|
||||||
// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
|
|
||||||
type Descriptor struct {
|
|
||||||
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
|
|
||||||
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
|
||||||
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
|
||||||
Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Descriptor) Reset() { *m = Descriptor{} }
|
|
||||||
func (*Descriptor) ProtoMessage() {}
|
|
||||||
func (*Descriptor) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_37f958df3707db9e, []int{0}
|
|
||||||
}
|
|
||||||
func (m *Descriptor) XXX_Unmarshal(b []byte) error {
|
|
||||||
return m.Unmarshal(b)
|
|
||||||
}
|
|
||||||
func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
if deterministic {
|
|
||||||
return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic)
|
|
||||||
} else {
|
|
||||||
b = b[:cap(b)]
|
|
||||||
n, err := m.MarshalTo(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (m *Descriptor) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Descriptor.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Descriptor) XXX_Size() int {
|
|
||||||
return m.Size()
|
|
||||||
}
|
|
||||||
func (m *Descriptor) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Descriptor.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Descriptor proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor")
|
|
||||||
proto.RegisterMapType((map[string]string)(nil), "containerd.types.Descriptor.AnnotationsEntry")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptor_37f958df3707db9e)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_37f958df3707db9e = []byte{
|
|
||||||
// 311 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
|
|
||||||
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
|
|
||||||
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
|
|
||||||
0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29,
|
|
||||||
0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90,
|
|
||||||
0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90,
|
|
||||||
0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20,
|
|
||||||
0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82,
|
|
||||||
0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c,
|
|
||||||
0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7,
|
|
||||||
0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62,
|
|
||||||
0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04,
|
|
||||||
0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70,
|
|
||||||
0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a,
|
|
||||||
0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
|
|
||||||
0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1,
|
|
||||||
0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22,
|
|
||||||
0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
|
|
||||||
size := m.Size()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalTo(dAtA)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
|
|
||||||
var i int
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if len(m.MediaType) > 0 {
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
i++
|
|
||||||
i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType)))
|
|
||||||
i += copy(dAtA[i:], m.MediaType)
|
|
||||||
}
|
|
||||||
if len(m.Digest) > 0 {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest)))
|
|
||||||
i += copy(dAtA[i:], m.Digest)
|
|
||||||
}
|
|
||||||
if m.Size_ != 0 {
|
|
||||||
dAtA[i] = 0x18
|
|
||||||
i++
|
|
||||||
i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
|
|
||||||
}
|
|
||||||
if len(m.Annotations) > 0 {
|
|
||||||
for k, _ := range m.Annotations {
|
|
||||||
dAtA[i] = 0x2a
|
|
||||||
i++
|
|
||||||
v := m.Annotations[k]
|
|
||||||
mapSize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
|
|
||||||
i = encodeVarintDescriptor(dAtA, i, uint64(mapSize))
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
i++
|
|
||||||
i = encodeVarintDescriptor(dAtA, i, uint64(len(k)))
|
|
||||||
i += copy(dAtA[i:], k)
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintDescriptor(dAtA, i, uint64(len(v)))
|
|
||||||
i += copy(dAtA[i:], v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int {
|
|
||||||
for v >= 1<<7 {
|
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
|
||||||
v >>= 7
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
dAtA[offset] = uint8(v)
|
|
||||||
return offset + 1
|
|
||||||
}
|
|
||||||
func (m *Descriptor) Size() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
l = len(m.MediaType)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovDescriptor(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Digest)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovDescriptor(uint64(l))
|
|
||||||
}
|
|
||||||
if m.Size_ != 0 {
|
|
||||||
n += 1 + sovDescriptor(uint64(m.Size_))
|
|
||||||
}
|
|
||||||
if len(m.Annotations) > 0 {
|
|
||||||
for k, v := range m.Annotations {
|
|
||||||
_ = k
|
|
||||||
_ = v
|
|
||||||
mapEntrySize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
|
|
||||||
n += mapEntrySize + 1 + sovDescriptor(uint64(mapEntrySize))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
n += len(m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func sovDescriptor(x uint64) (n int) {
|
|
||||||
for {
|
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
func sozDescriptor(x uint64) (n int) {
|
|
||||||
return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
||||||
}
|
|
||||||
func (this *Descriptor) String() string {
|
|
||||||
if this == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
keysForAnnotations := make([]string, 0, len(this.Annotations))
|
|
||||||
for k, _ := range this.Annotations {
|
|
||||||
keysForAnnotations = append(keysForAnnotations, k)
|
|
||||||
}
|
|
||||||
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
|
|
||||||
mapStringForAnnotations := "map[string]string{"
|
|
||||||
for _, k := range keysForAnnotations {
|
|
||||||
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
|
|
||||||
}
|
|
||||||
mapStringForAnnotations += "}"
|
|
||||||
s := strings.Join([]string{`&Descriptor{`,
|
|
||||||
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
|
|
||||||
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
|
|
||||||
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
|
|
||||||
`Annotations:` + mapStringForAnnotations + `,`,
|
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
|
||||||
`}`,
|
|
||||||
}, "")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func valueToStringDescriptor(v interface{}) string {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.IsNil() {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
pv := reflect.Indirect(rv).Interface()
|
|
||||||
return fmt.Sprintf("*%v", pv)
|
|
||||||
}
|
|
||||||
func (m *Descriptor) Unmarshal(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: Descriptor: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.MediaType = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 3:
|
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
|
|
||||||
}
|
|
||||||
m.Size_ = 0
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
m.Size_ |= int64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 5:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
|
|
||||||
}
|
|
||||||
var msglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
msglen |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if msglen < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + msglen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
if m.Annotations == nil {
|
|
||||||
m.Annotations = make(map[string]string)
|
|
||||||
}
|
|
||||||
var mapkey string
|
|
||||||
var mapvalue string
|
|
||||||
for iNdEx < postIndex {
|
|
||||||
entryPreIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
if fieldNum == 1 {
|
|
||||||
var stringLenmapkey uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLenmapkey := int(stringLenmapkey)
|
|
||||||
if intStringLenmapkey < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
|
||||||
if postStringIndexmapkey < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
if postStringIndexmapkey > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
|
||||||
iNdEx = postStringIndexmapkey
|
|
||||||
} else if fieldNum == 2 {
|
|
||||||
var stringLenmapvalue uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLenmapvalue |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLenmapvalue := int(stringLenmapvalue)
|
|
||||||
if intStringLenmapvalue < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
|
||||||
if postStringIndexmapvalue < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
if postStringIndexmapvalue > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
|
||||||
iNdEx = postStringIndexmapvalue
|
|
||||||
} else {
|
|
||||||
iNdEx = entryPreIndex
|
|
||||||
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if skippy < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > postIndex {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.Annotations[mapkey] = mapvalue
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skipDescriptor(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if skippy < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func skipDescriptor(dAtA []byte) (n int, err error) {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
switch wireType {
|
|
||||||
case 0:
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
iNdEx++
|
|
||||||
if dAtA[iNdEx-1] < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 1:
|
|
||||||
iNdEx += 8
|
|
||||||
return iNdEx, nil
|
|
||||||
case 2:
|
|
||||||
var length int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
length |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if length < 0 {
|
|
||||||
return 0, ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
iNdEx += length
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 3:
|
|
||||||
for {
|
|
||||||
var innerWire uint64
|
|
||||||
var start int = iNdEx
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowDescriptor
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
innerWireType := int(innerWire & 0x7)
|
|
||||||
if innerWireType == 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
next, err := skipDescriptor(dAtA[start:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
iNdEx = start + next
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthDescriptor
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 4:
|
|
||||||
return iNdEx, nil
|
|
||||||
case 5:
|
|
||||||
iNdEx += 4
|
|
||||||
return iNdEx, nil
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
|
|
||||||
ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow")
|
|
||||||
)
|
|
|
@ -1,19 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.types;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/types;types";
|
|
||||||
|
|
||||||
// Descriptor describes a blob in a content store.
|
|
||||||
//
|
|
||||||
// This descriptor can be used to reference content from an
|
|
||||||
// oci descriptor found in a manifest.
|
|
||||||
// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
|
|
||||||
message Descriptor {
|
|
||||||
string media_type = 1;
|
|
||||||
string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
int64 size = 3;
|
|
||||||
map<string, string> annotations = 5;
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package types
|
|
|
@ -1,474 +0,0 @@
|
||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
|
||||||
// source: github.com/containerd/containerd/api/types/metrics.proto
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/gogo/protobuf/proto"
|
|
||||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
|
||||||
types "github.com/gogo/protobuf/types"
|
|
||||||
io "io"
|
|
||||||
math "math"
|
|
||||||
reflect "reflect"
|
|
||||||
strings "strings"
|
|
||||||
time "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
var _ = time.Kitchen
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
type Metric struct {
|
|
||||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
|
|
||||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
|
||||||
Data *types.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) Reset() { *m = Metric{} }
|
|
||||||
func (*Metric) ProtoMessage() {}
|
|
||||||
func (*Metric) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_8d594d87edf6e6bc, []int{0}
|
|
||||||
}
|
|
||||||
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
|
||||||
return m.Unmarshal(b)
|
|
||||||
}
|
|
||||||
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
if deterministic {
|
|
||||||
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
|
|
||||||
} else {
|
|
||||||
b = b[:cap(b)]
|
|
||||||
n, err := m.MarshalTo(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (m *Metric) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Metric.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Metric) XXX_Size() int {
|
|
||||||
return m.Size()
|
|
||||||
}
|
|
||||||
func (m *Metric) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Metric.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Metric proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Metric)(nil), "containerd.types.Metric")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptor_8d594d87edf6e6bc)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_8d594d87edf6e6bc = []byte{
|
|
||||||
// 258 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
|
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
|
|
||||||
0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
|
|
||||||
0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
|
|
||||||
0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
|
|
||||||
0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
|
|
||||||
0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
|
|
||||||
0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
|
|
||||||
0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
|
|
||||||
0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
|
|
||||||
0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
|
|
||||||
0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
|
|
||||||
0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
|
|
||||||
0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
|
|
||||||
0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
|
|
||||||
0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) Marshal() (dAtA []byte, err error) {
|
|
||||||
size := m.Size()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalTo(dAtA)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
|
|
||||||
var i int
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
i++
|
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
|
|
||||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n1
|
|
||||||
if len(m.ID) > 0 {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID)))
|
|
||||||
i += copy(dAtA[i:], m.ID)
|
|
||||||
}
|
|
||||||
if m.Data != nil {
|
|
||||||
dAtA[i] = 0x1a
|
|
||||||
i++
|
|
||||||
i = encodeVarintMetrics(dAtA, i, uint64(m.Data.Size()))
|
|
||||||
n2, err := m.Data.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n2
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
|
|
||||||
for v >= 1<<7 {
|
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
|
||||||
v >>= 7
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
dAtA[offset] = uint8(v)
|
|
||||||
return offset + 1
|
|
||||||
}
|
|
||||||
func (m *Metric) Size() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
|
||||||
n += 1 + l + sovMetrics(uint64(l))
|
|
||||||
l = len(m.ID)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovMetrics(uint64(l))
|
|
||||||
}
|
|
||||||
if m.Data != nil {
|
|
||||||
l = m.Data.Size()
|
|
||||||
n += 1 + l + sovMetrics(uint64(l))
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
n += len(m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func sovMetrics(x uint64) (n int) {
|
|
||||||
for {
|
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
func sozMetrics(x uint64) (n int) {
|
|
||||||
return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
||||||
}
|
|
||||||
func (this *Metric) String() string {
|
|
||||||
if this == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
s := strings.Join([]string{`&Metric{`,
|
|
||||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
|
||||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
|
||||||
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`,
|
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
|
||||||
`}`,
|
|
||||||
}, "")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func valueToStringMetrics(v interface{}) string {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.IsNil() {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
pv := reflect.Indirect(rv).Interface()
|
|
||||||
return fmt.Sprintf("*%v", pv)
|
|
||||||
}
|
|
||||||
func (m *Metric) Unmarshal(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMetrics
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: Metric: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
|
|
||||||
}
|
|
||||||
var msglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMetrics
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
msglen |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if msglen < 0 {
|
|
||||||
return ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + msglen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMetrics
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.ID = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 3:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
|
|
||||||
}
|
|
||||||
var msglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMetrics
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
msglen |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if msglen < 0 {
|
|
||||||
return ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + msglen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
if m.Data == nil {
|
|
||||||
m.Data = &types.Any{}
|
|
||||||
}
|
|
||||||
if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skipMetrics(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if skippy < 0 {
|
|
||||||
return ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func skipMetrics(dAtA []byte) (n int, err error) {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowMetrics
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
switch wireType {
|
|
||||||
case 0:
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowMetrics
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
iNdEx++
|
|
||||||
if dAtA[iNdEx-1] < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 1:
|
|
||||||
iNdEx += 8
|
|
||||||
return iNdEx, nil
|
|
||||||
case 2:
|
|
||||||
var length int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowMetrics
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
length |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if length < 0 {
|
|
||||||
return 0, ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
iNdEx += length
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 3:
|
|
||||||
for {
|
|
||||||
var innerWire uint64
|
|
||||||
var start int = iNdEx
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowMetrics
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
innerWireType := int(innerWire & 0x7)
|
|
||||||
if innerWireType == 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
next, err := skipMetrics(dAtA[start:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
iNdEx = start + next
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthMetrics
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 4:
|
|
||||||
return iNdEx, nil
|
|
||||||
case 5:
|
|
||||||
iNdEx += 4
|
|
||||||
return iNdEx, nil
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
|
|
||||||
ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
|
|
||||||
)
|
|
|
@ -1,15 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.types;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/types;types";
|
|
||||||
|
|
||||||
message Metric {
|
|
||||||
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
string id = 2;
|
|
||||||
google.protobuf.Any data = 3;
|
|
||||||
}
|
|
|
@ -1,524 +0,0 @@
|
||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
|
||||||
// source: github.com/containerd/containerd/api/types/mount.proto
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/gogo/protobuf/proto"
|
|
||||||
io "io"
|
|
||||||
math "math"
|
|
||||||
reflect "reflect"
|
|
||||||
strings "strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
// Mount describes mounts for a container.
|
|
||||||
//
|
|
||||||
// This type is the lingua franca of ContainerD. All services provide mounts
|
|
||||||
// to be used with the container at creation time.
|
|
||||||
//
|
|
||||||
// The Mount type follows the structure of the mount syscall, including a type,
|
|
||||||
// source, target and options.
|
|
||||||
type Mount struct {
|
|
||||||
// Type defines the nature of the mount.
|
|
||||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
|
||||||
// Source specifies the name of the mount. Depending on mount type, this
|
|
||||||
// may be a volume name or a host path, or even ignored.
|
|
||||||
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
|
|
||||||
// Target path in container
|
|
||||||
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
|
|
||||||
// Options specifies zero or more fstab style mount options.
|
|
||||||
Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Mount) Reset() { *m = Mount{} }
|
|
||||||
func (*Mount) ProtoMessage() {}
|
|
||||||
func (*Mount) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_920196890d4a7b9f, []int{0}
|
|
||||||
}
|
|
||||||
func (m *Mount) XXX_Unmarshal(b []byte) error {
|
|
||||||
return m.Unmarshal(b)
|
|
||||||
}
|
|
||||||
func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
if deterministic {
|
|
||||||
return xxx_messageInfo_Mount.Marshal(b, m, deterministic)
|
|
||||||
} else {
|
|
||||||
b = b[:cap(b)]
|
|
||||||
n, err := m.MarshalTo(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (m *Mount) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Mount.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Mount) XXX_Size() int {
|
|
||||||
return m.Size()
|
|
||||||
}
|
|
||||||
func (m *Mount) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Mount.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Mount proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptor_920196890d4a7b9f)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_920196890d4a7b9f = []byte{
|
|
||||||
// 202 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
|
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
|
|
||||||
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
|
|
||||||
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
|
|
||||||
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
|
|
||||||
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
|
|
||||||
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
|
|
||||||
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
|
|
||||||
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
|
|
||||||
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
|
|
||||||
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
|
|
||||||
0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Mount) Marshal() (dAtA []byte, err error) {
|
|
||||||
size := m.Size()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalTo(dAtA)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
|
|
||||||
var i int
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if len(m.Type) > 0 {
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
i++
|
|
||||||
i = encodeVarintMount(dAtA, i, uint64(len(m.Type)))
|
|
||||||
i += copy(dAtA[i:], m.Type)
|
|
||||||
}
|
|
||||||
if len(m.Source) > 0 {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintMount(dAtA, i, uint64(len(m.Source)))
|
|
||||||
i += copy(dAtA[i:], m.Source)
|
|
||||||
}
|
|
||||||
if len(m.Target) > 0 {
|
|
||||||
dAtA[i] = 0x1a
|
|
||||||
i++
|
|
||||||
i = encodeVarintMount(dAtA, i, uint64(len(m.Target)))
|
|
||||||
i += copy(dAtA[i:], m.Target)
|
|
||||||
}
|
|
||||||
if len(m.Options) > 0 {
|
|
||||||
for _, s := range m.Options {
|
|
||||||
dAtA[i] = 0x22
|
|
||||||
i++
|
|
||||||
l = len(s)
|
|
||||||
for l >= 1<<7 {
|
|
||||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
|
||||||
l >>= 7
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
dAtA[i] = uint8(l)
|
|
||||||
i++
|
|
||||||
i += copy(dAtA[i:], s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
|
|
||||||
for v >= 1<<7 {
|
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
|
||||||
v >>= 7
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
dAtA[offset] = uint8(v)
|
|
||||||
return offset + 1
|
|
||||||
}
|
|
||||||
func (m *Mount) Size() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
l = len(m.Type)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovMount(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Source)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovMount(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Target)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovMount(uint64(l))
|
|
||||||
}
|
|
||||||
if len(m.Options) > 0 {
|
|
||||||
for _, s := range m.Options {
|
|
||||||
l = len(s)
|
|
||||||
n += 1 + l + sovMount(uint64(l))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
n += len(m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func sovMount(x uint64) (n int) {
|
|
||||||
for {
|
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
func sozMount(x uint64) (n int) {
|
|
||||||
return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
||||||
}
|
|
||||||
func (this *Mount) String() string {
|
|
||||||
if this == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
s := strings.Join([]string{`&Mount{`,
|
|
||||||
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
|
|
||||||
`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
|
|
||||||
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
|
|
||||||
`Options:` + fmt.Sprintf("%v", this.Options) + `,`,
|
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
|
||||||
`}`,
|
|
||||||
}, "")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func valueToStringMount(v interface{}) string {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.IsNil() {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
pv := reflect.Indirect(rv).Interface()
|
|
||||||
return fmt.Sprintf("*%v", pv)
|
|
||||||
}
|
|
||||||
func (m *Mount) Unmarshal(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: Mount: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Type = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Source = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 3:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Target = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 4:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Options = append(m.Options, string(dAtA[iNdEx:postIndex]))
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skipMount(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if skippy < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func skipMount(dAtA []byte) (n int, err error) {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
switch wireType {
|
|
||||||
case 0:
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
iNdEx++
|
|
||||||
if dAtA[iNdEx-1] < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 1:
|
|
||||||
iNdEx += 8
|
|
||||||
return iNdEx, nil
|
|
||||||
case 2:
|
|
||||||
var length int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
length |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if length < 0 {
|
|
||||||
return 0, ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
iNdEx += length
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 3:
|
|
||||||
for {
|
|
||||||
var innerWire uint64
|
|
||||||
var start int = iNdEx
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowMount
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
innerWireType := int(innerWire & 0x7)
|
|
||||||
if innerWireType == 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
next, err := skipMount(dAtA[start:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
iNdEx = start + next
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthMount
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 4:
|
|
||||||
return iNdEx, nil
|
|
||||||
case 5:
|
|
||||||
iNdEx += 4
|
|
||||||
return iNdEx, nil
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
|
|
||||||
ErrIntOverflowMount = fmt.Errorf("proto: integer overflow")
|
|
||||||
)
|
|
|
@ -1,29 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.types;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/types;types";
|
|
||||||
|
|
||||||
// Mount describes mounts for a container.
|
|
||||||
//
|
|
||||||
// This type is the lingua franca of ContainerD. All services provide mounts
|
|
||||||
// to be used with the container at creation time.
|
|
||||||
//
|
|
||||||
// The Mount type follows the structure of the mount syscall, including a type,
|
|
||||||
// source, target and options.
|
|
||||||
message Mount {
|
|
||||||
// Type defines the nature of the mount.
|
|
||||||
string type = 1;
|
|
||||||
|
|
||||||
// Source specifies the name of the mount. Depending on mount type, this
|
|
||||||
// may be a volume name or a host path, or even ignored.
|
|
||||||
string source = 2;
|
|
||||||
|
|
||||||
// Target path in container
|
|
||||||
string target = 3;
|
|
||||||
|
|
||||||
// Options specifies zero or more fstab style mount options.
|
|
||||||
repeated string options = 4;
|
|
||||||
}
|
|
|
@ -1,459 +0,0 @@
|
||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
|
||||||
// source: github.com/containerd/containerd/api/types/platform.proto
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/gogo/protobuf/proto"
|
|
||||||
io "io"
|
|
||||||
math "math"
|
|
||||||
reflect "reflect"
|
|
||||||
strings "strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
// Platform follows the structure of the OCI platform specification, from
|
|
||||||
// descriptors.
|
|
||||||
type Platform struct {
|
|
||||||
OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
|
|
||||||
Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
|
|
||||||
Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Platform) Reset() { *m = Platform{} }
|
|
||||||
func (*Platform) ProtoMessage() {}
|
|
||||||
func (*Platform) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_24ba7a4b83e2367e, []int{0}
|
|
||||||
}
|
|
||||||
func (m *Platform) XXX_Unmarshal(b []byte) error {
|
|
||||||
return m.Unmarshal(b)
|
|
||||||
}
|
|
||||||
func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
if deterministic {
|
|
||||||
return xxx_messageInfo_Platform.Marshal(b, m, deterministic)
|
|
||||||
} else {
|
|
||||||
b = b[:cap(b)]
|
|
||||||
n, err := m.MarshalTo(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (m *Platform) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Platform.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Platform) XXX_Size() int {
|
|
||||||
return m.Size()
|
|
||||||
}
|
|
||||||
func (m *Platform) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Platform.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Platform proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Platform)(nil), "containerd.types.Platform")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptor_24ba7a4b83e2367e)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_24ba7a4b83e2367e = []byte{
|
|
||||||
// 205 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
|
|
||||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
|
||||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
|
|
||||||
0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
|
|
||||||
0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
|
|
||||||
0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
|
|
||||||
0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
|
|
||||||
0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
|
|
||||||
0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
|
|
||||||
0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
|
|
||||||
0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
|
|
||||||
0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
|
|
||||||
0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Platform) Marshal() (dAtA []byte, err error) {
|
|
||||||
size := m.Size()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalTo(dAtA)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Platform) MarshalTo(dAtA []byte) (int, error) {
|
|
||||||
var i int
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if len(m.OS) > 0 {
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
i++
|
|
||||||
i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS)))
|
|
||||||
i += copy(dAtA[i:], m.OS)
|
|
||||||
}
|
|
||||||
if len(m.Architecture) > 0 {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture)))
|
|
||||||
i += copy(dAtA[i:], m.Architecture)
|
|
||||||
}
|
|
||||||
if len(m.Variant) > 0 {
|
|
||||||
dAtA[i] = 0x1a
|
|
||||||
i++
|
|
||||||
i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant)))
|
|
||||||
i += copy(dAtA[i:], m.Variant)
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int {
|
|
||||||
for v >= 1<<7 {
|
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
|
||||||
v >>= 7
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
dAtA[offset] = uint8(v)
|
|
||||||
return offset + 1
|
|
||||||
}
|
|
||||||
func (m *Platform) Size() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
l = len(m.OS)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovPlatform(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Architecture)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovPlatform(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Variant)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovPlatform(uint64(l))
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
n += len(m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func sovPlatform(x uint64) (n int) {
|
|
||||||
for {
|
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
func sozPlatform(x uint64) (n int) {
|
|
||||||
return sovPlatform(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
||||||
}
|
|
||||||
func (this *Platform) String() string {
|
|
||||||
if this == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
s := strings.Join([]string{`&Platform{`,
|
|
||||||
`OS:` + fmt.Sprintf("%v", this.OS) + `,`,
|
|
||||||
`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
|
|
||||||
`Variant:` + fmt.Sprintf("%v", this.Variant) + `,`,
|
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
|
||||||
`}`,
|
|
||||||
}, "")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func valueToStringPlatform(v interface{}) string {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.IsNil() {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
pv := reflect.Indirect(rv).Interface()
|
|
||||||
return fmt.Sprintf("*%v", pv)
|
|
||||||
}
|
|
||||||
func (m *Platform) Unmarshal(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowPlatform
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: Platform: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowPlatform
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.OS = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowPlatform
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Architecture = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 3:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowPlatform
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Variant = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skipPlatform(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if skippy < 0 {
|
|
||||||
return ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func skipPlatform(dAtA []byte) (n int, err error) {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowPlatform
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
switch wireType {
|
|
||||||
case 0:
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowPlatform
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
iNdEx++
|
|
||||||
if dAtA[iNdEx-1] < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 1:
|
|
||||||
iNdEx += 8
|
|
||||||
return iNdEx, nil
|
|
||||||
case 2:
|
|
||||||
var length int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowPlatform
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
length |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if length < 0 {
|
|
||||||
return 0, ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
iNdEx += length
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 3:
|
|
||||||
for {
|
|
||||||
var innerWire uint64
|
|
||||||
var start int = iNdEx
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowPlatform
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
innerWireType := int(innerWire & 0x7)
|
|
||||||
if innerWireType == 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
next, err := skipPlatform(dAtA[start:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
iNdEx = start + next
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthPlatform
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 4:
|
|
||||||
return iNdEx, nil
|
|
||||||
case 5:
|
|
||||||
iNdEx += 4
|
|
||||||
return iNdEx, nil
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling")
|
|
||||||
ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow")
|
|
||||||
)
|
|
|
@ -1,15 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.types;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/types;types";
|
|
||||||
|
|
||||||
// Platform follows the structure of the OCI platform specification, from
|
|
||||||
// descriptors.
|
|
||||||
message Platform {
|
|
||||||
string os = 1 [(gogoproto.customname) = "OS"];
|
|
||||||
string architecture = 2;
|
|
||||||
string variant = 3;
|
|
||||||
}
|
|
|
@ -1,997 +0,0 @@
|
||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
|
||||||
// source: github.com/containerd/containerd/api/types/task/task.proto
|
|
||||||
|
|
||||||
package task
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/gogo/protobuf/proto"
|
|
||||||
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
|
||||||
types "github.com/gogo/protobuf/types"
|
|
||||||
io "io"
|
|
||||||
math "math"
|
|
||||||
reflect "reflect"
|
|
||||||
strings "strings"
|
|
||||||
time "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
var _ = time.Kitchen
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
type Status int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
StatusUnknown Status = 0
|
|
||||||
StatusCreated Status = 1
|
|
||||||
StatusRunning Status = 2
|
|
||||||
StatusStopped Status = 3
|
|
||||||
StatusPaused Status = 4
|
|
||||||
StatusPausing Status = 5
|
|
||||||
)
|
|
||||||
|
|
||||||
var Status_name = map[int32]string{
|
|
||||||
0: "UNKNOWN",
|
|
||||||
1: "CREATED",
|
|
||||||
2: "RUNNING",
|
|
||||||
3: "STOPPED",
|
|
||||||
4: "PAUSED",
|
|
||||||
5: "PAUSING",
|
|
||||||
}
|
|
||||||
|
|
||||||
var Status_value = map[string]int32{
|
|
||||||
"UNKNOWN": 0,
|
|
||||||
"CREATED": 1,
|
|
||||||
"RUNNING": 2,
|
|
||||||
"STOPPED": 3,
|
|
||||||
"PAUSED": 4,
|
|
||||||
"PAUSING": 5,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x Status) String() string {
|
|
||||||
return proto.EnumName(Status_name, int32(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (Status) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_391ef18c8ab0dc16, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Process struct {
|
|
||||||
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
|
|
||||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
|
||||||
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
|
||||||
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
|
|
||||||
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
|
|
||||||
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
|
||||||
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
|
|
||||||
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
|
||||||
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
|
|
||||||
ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Process) Reset() { *m = Process{} }
|
|
||||||
func (*Process) ProtoMessage() {}
|
|
||||||
func (*Process) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_391ef18c8ab0dc16, []int{0}
|
|
||||||
}
|
|
||||||
func (m *Process) XXX_Unmarshal(b []byte) error {
|
|
||||||
return m.Unmarshal(b)
|
|
||||||
}
|
|
||||||
func (m *Process) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
if deterministic {
|
|
||||||
return xxx_messageInfo_Process.Marshal(b, m, deterministic)
|
|
||||||
} else {
|
|
||||||
b = b[:cap(b)]
|
|
||||||
n, err := m.MarshalTo(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (m *Process) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Process.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Process) XXX_Size() int {
|
|
||||||
return m.Size()
|
|
||||||
}
|
|
||||||
func (m *Process) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Process.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Process proto.InternalMessageInfo
|
|
||||||
|
|
||||||
type ProcessInfo struct {
|
|
||||||
// PID is the process ID.
|
|
||||||
Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
|
|
||||||
// Info contains additional process information.
|
|
||||||
//
|
|
||||||
// Info varies by platform.
|
|
||||||
Info *types.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProcessInfo) Reset() { *m = ProcessInfo{} }
|
|
||||||
func (*ProcessInfo) ProtoMessage() {}
|
|
||||||
func (*ProcessInfo) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_391ef18c8ab0dc16, []int{1}
|
|
||||||
}
|
|
||||||
func (m *ProcessInfo) XXX_Unmarshal(b []byte) error {
|
|
||||||
return m.Unmarshal(b)
|
|
||||||
}
|
|
||||||
func (m *ProcessInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
if deterministic {
|
|
||||||
return xxx_messageInfo_ProcessInfo.Marshal(b, m, deterministic)
|
|
||||||
} else {
|
|
||||||
b = b[:cap(b)]
|
|
||||||
n, err := m.MarshalTo(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (m *ProcessInfo) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ProcessInfo.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ProcessInfo) XXX_Size() int {
|
|
||||||
return m.Size()
|
|
||||||
}
|
|
||||||
func (m *ProcessInfo) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ProcessInfo.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ProcessInfo proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
|
|
||||||
proto.RegisterType((*Process)(nil), "containerd.v1.types.Process")
|
|
||||||
proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptor_391ef18c8ab0dc16)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_391ef18c8ab0dc16 = []byte{
|
|
||||||
// 545 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
|
|
||||||
0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
|
|
||||||
0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
|
|
||||||
0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
|
|
||||||
0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
|
|
||||||
0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
|
|
||||||
0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
|
|
||||||
0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
|
|
||||||
0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
|
|
||||||
0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
|
|
||||||
0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
|
|
||||||
0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
|
|
||||||
0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
|
|
||||||
0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
|
|
||||||
0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
|
|
||||||
0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
|
|
||||||
0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
|
|
||||||
0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
|
|
||||||
0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
|
|
||||||
0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
|
|
||||||
0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
|
|
||||||
0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
|
|
||||||
0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
|
|
||||||
0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
|
|
||||||
0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
|
|
||||||
0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
|
|
||||||
0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
|
|
||||||
0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
|
|
||||||
0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
|
|
||||||
0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
|
|
||||||
0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
|
|
||||||
0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
|
|
||||||
0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
|
|
||||||
0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
|
|
||||||
0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Process) Marshal() (dAtA []byte, err error) {
|
|
||||||
size := m.Size()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalTo(dAtA)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Process) MarshalTo(dAtA []byte) (int, error) {
|
|
||||||
var i int
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if len(m.ContainerID) > 0 {
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
|
|
||||||
i += copy(dAtA[i:], m.ContainerID)
|
|
||||||
}
|
|
||||||
if len(m.ID) > 0 {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
|
|
||||||
i += copy(dAtA[i:], m.ID)
|
|
||||||
}
|
|
||||||
if m.Pid != 0 {
|
|
||||||
dAtA[i] = 0x18
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(m.Pid))
|
|
||||||
}
|
|
||||||
if m.Status != 0 {
|
|
||||||
dAtA[i] = 0x20
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(m.Status))
|
|
||||||
}
|
|
||||||
if len(m.Stdin) > 0 {
|
|
||||||
dAtA[i] = 0x2a
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin)))
|
|
||||||
i += copy(dAtA[i:], m.Stdin)
|
|
||||||
}
|
|
||||||
if len(m.Stdout) > 0 {
|
|
||||||
dAtA[i] = 0x32
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout)))
|
|
||||||
i += copy(dAtA[i:], m.Stdout)
|
|
||||||
}
|
|
||||||
if len(m.Stderr) > 0 {
|
|
||||||
dAtA[i] = 0x3a
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr)))
|
|
||||||
i += copy(dAtA[i:], m.Stderr)
|
|
||||||
}
|
|
||||||
if m.Terminal {
|
|
||||||
dAtA[i] = 0x40
|
|
||||||
i++
|
|
||||||
if m.Terminal {
|
|
||||||
dAtA[i] = 1
|
|
||||||
} else {
|
|
||||||
dAtA[i] = 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if m.ExitStatus != 0 {
|
|
||||||
dAtA[i] = 0x48
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
|
|
||||||
}
|
|
||||||
dAtA[i] = 0x52
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
|
|
||||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n1
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProcessInfo) Marshal() (dAtA []byte, err error) {
|
|
||||||
size := m.Size()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalTo(dAtA)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) {
|
|
||||||
var i int
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if m.Pid != 0 {
|
|
||||||
dAtA[i] = 0x8
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(m.Pid))
|
|
||||||
}
|
|
||||||
if m.Info != nil {
|
|
||||||
dAtA[i] = 0x12
|
|
||||||
i++
|
|
||||||
i = encodeVarintTask(dAtA, i, uint64(m.Info.Size()))
|
|
||||||
n2, err := m.Info.MarshalTo(dAtA[i:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i += n2
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
i += copy(dAtA[i:], m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
|
|
||||||
for v >= 1<<7 {
|
|
||||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
|
||||||
v >>= 7
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
dAtA[offset] = uint8(v)
|
|
||||||
return offset + 1
|
|
||||||
}
|
|
||||||
func (m *Process) Size() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
l = len(m.ContainerID)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovTask(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.ID)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovTask(uint64(l))
|
|
||||||
}
|
|
||||||
if m.Pid != 0 {
|
|
||||||
n += 1 + sovTask(uint64(m.Pid))
|
|
||||||
}
|
|
||||||
if m.Status != 0 {
|
|
||||||
n += 1 + sovTask(uint64(m.Status))
|
|
||||||
}
|
|
||||||
l = len(m.Stdin)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovTask(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Stdout)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovTask(uint64(l))
|
|
||||||
}
|
|
||||||
l = len(m.Stderr)
|
|
||||||
if l > 0 {
|
|
||||||
n += 1 + l + sovTask(uint64(l))
|
|
||||||
}
|
|
||||||
if m.Terminal {
|
|
||||||
n += 2
|
|
||||||
}
|
|
||||||
if m.ExitStatus != 0 {
|
|
||||||
n += 1 + sovTask(uint64(m.ExitStatus))
|
|
||||||
}
|
|
||||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
|
|
||||||
n += 1 + l + sovTask(uint64(l))
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
n += len(m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProcessInfo) Size() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if m.Pid != 0 {
|
|
||||||
n += 1 + sovTask(uint64(m.Pid))
|
|
||||||
}
|
|
||||||
if m.Info != nil {
|
|
||||||
l = m.Info.Size()
|
|
||||||
n += 1 + l + sovTask(uint64(l))
|
|
||||||
}
|
|
||||||
if m.XXX_unrecognized != nil {
|
|
||||||
n += len(m.XXX_unrecognized)
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func sovTask(x uint64) (n int) {
|
|
||||||
for {
|
|
||||||
n++
|
|
||||||
x >>= 7
|
|
||||||
if x == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
func sozTask(x uint64) (n int) {
|
|
||||||
return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
|
||||||
}
|
|
||||||
func (this *Process) String() string {
|
|
||||||
if this == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
s := strings.Join([]string{`&Process{`,
|
|
||||||
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
|
|
||||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
|
||||||
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
|
|
||||||
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
|
|
||||||
`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
|
|
||||||
`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
|
|
||||||
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
|
|
||||||
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
|
|
||||||
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
|
|
||||||
`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
|
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
|
||||||
`}`,
|
|
||||||
}, "")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func (this *ProcessInfo) String() string {
|
|
||||||
if this == nil {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
s := strings.Join([]string{`&ProcessInfo{`,
|
|
||||||
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
|
|
||||||
`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "types.Any", 1) + `,`,
|
|
||||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
|
||||||
`}`,
|
|
||||||
}, "")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func valueToStringTask(v interface{}) string {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.IsNil() {
|
|
||||||
return "nil"
|
|
||||||
}
|
|
||||||
pv := reflect.Indirect(rv).Interface()
|
|
||||||
return fmt.Sprintf("*%v", pv)
|
|
||||||
}
|
|
||||||
func (m *Process) Unmarshal(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: Process: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: Process: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.ContainerID = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.ID = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 3:
|
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
|
|
||||||
}
|
|
||||||
m.Pid = 0
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
m.Pid |= uint32(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 4:
|
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
|
|
||||||
}
|
|
||||||
m.Status = 0
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
m.Status |= Status(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 5:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Stdin = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 6:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Stdout = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 7:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
|
|
||||||
}
|
|
||||||
var stringLen uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
stringLen |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intStringLen := int(stringLen)
|
|
||||||
if intStringLen < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + intStringLen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Stderr = string(dAtA[iNdEx:postIndex])
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 8:
|
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
|
|
||||||
}
|
|
||||||
var v int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
v |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.Terminal = bool(v != 0)
|
|
||||||
case 9:
|
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
|
|
||||||
}
|
|
||||||
m.ExitStatus = 0
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
m.ExitStatus |= uint32(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 10:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
|
|
||||||
}
|
|
||||||
var msglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
msglen |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if msglen < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + msglen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skipTask(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if skippy < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: ProcessInfo: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: ProcessInfo: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
|
|
||||||
}
|
|
||||||
m.Pid = 0
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
m.Pid |= uint32(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 2:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
|
|
||||||
}
|
|
||||||
var msglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
msglen |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if msglen < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + msglen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
if m.Info == nil {
|
|
||||||
m.Info = &types.Any{}
|
|
||||||
}
|
|
||||||
if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skipTask(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if skippy < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) < 0 {
|
|
||||||
return ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func skipTask(dAtA []byte) (n int, err error) {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
switch wireType {
|
|
||||||
case 0:
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
iNdEx++
|
|
||||||
if dAtA[iNdEx-1] < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 1:
|
|
||||||
iNdEx += 8
|
|
||||||
return iNdEx, nil
|
|
||||||
case 2:
|
|
||||||
var length int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
length |= (int(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if length < 0 {
|
|
||||||
return 0, ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
iNdEx += length
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 3:
|
|
||||||
for {
|
|
||||||
var innerWire uint64
|
|
||||||
var start int = iNdEx
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return 0, ErrIntOverflowTask
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
innerWire |= (uint64(b) & 0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
innerWireType := int(innerWire & 0x7)
|
|
||||||
if innerWireType == 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
next, err := skipTask(dAtA[start:])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
iNdEx = start + next
|
|
||||||
if iNdEx < 0 {
|
|
||||||
return 0, ErrInvalidLengthTask
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iNdEx, nil
|
|
||||||
case 4:
|
|
||||||
return iNdEx, nil
|
|
||||||
case 5:
|
|
||||||
iNdEx += 4
|
|
||||||
return iNdEx, nil
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
|
|
||||||
ErrIntOverflowTask = fmt.Errorf("proto: integer overflow")
|
|
||||||
)
|
|
|
@ -1,41 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.v1.types;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
|
|
||||||
enum Status {
|
|
||||||
option (gogoproto.goproto_enum_prefix) = false;
|
|
||||||
option (gogoproto.enum_customname) = "Status";
|
|
||||||
|
|
||||||
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StatusUnknown"];
|
|
||||||
CREATED = 1 [(gogoproto.enumvalue_customname) = "StatusCreated"];
|
|
||||||
RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"];
|
|
||||||
STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"];
|
|
||||||
PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"];
|
|
||||||
PAUSING = 5 [(gogoproto.enumvalue_customname) = "StatusPausing"];
|
|
||||||
}
|
|
||||||
|
|
||||||
message Process {
|
|
||||||
string container_id = 1;
|
|
||||||
string id = 2;
|
|
||||||
uint32 pid = 3;
|
|
||||||
Status status = 4;
|
|
||||||
string stdin = 5;
|
|
||||||
string stdout = 6;
|
|
||||||
string stderr = 7;
|
|
||||||
bool terminal = 8;
|
|
||||||
uint32 exit_status = 9;
|
|
||||||
google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ProcessInfo {
|
|
||||||
// PID is the process ID.
|
|
||||||
uint32 pid = 1;
|
|
||||||
// Info contains additional process information.
|
|
||||||
//
|
|
||||||
// Info varies by platform.
|
|
||||||
google.protobuf.Any info = 2;
|
|
||||||
}
|
|
|
@ -1,266 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package compression
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Compression is the state represents if compressed or not.
|
|
||||||
Compression int
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Uncompressed represents the uncompressed.
|
|
||||||
Uncompressed Compression = iota
|
|
||||||
// Gzip is gzip compression algorithm.
|
|
||||||
Gzip
|
|
||||||
)
|
|
||||||
|
|
||||||
const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ"
|
|
||||||
|
|
||||||
var (
|
|
||||||
initPigz sync.Once
|
|
||||||
unpigzPath string
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
bufioReader32KPool = &sync.Pool{
|
|
||||||
New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// DecompressReadCloser include the stream after decompress and the compress method detected.
|
|
||||||
type DecompressReadCloser interface {
|
|
||||||
io.ReadCloser
|
|
||||||
// GetCompression returns the compress method which is used before decompressing
|
|
||||||
GetCompression() Compression
|
|
||||||
}
|
|
||||||
|
|
||||||
type readCloserWrapper struct {
|
|
||||||
io.Reader
|
|
||||||
compression Compression
|
|
||||||
closer func() error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *readCloserWrapper) Close() error {
|
|
||||||
if r.closer != nil {
|
|
||||||
return r.closer()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *readCloserWrapper) GetCompression() Compression {
|
|
||||||
return r.compression
|
|
||||||
}
|
|
||||||
|
|
||||||
type writeCloserWrapper struct {
|
|
||||||
io.Writer
|
|
||||||
closer func() error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writeCloserWrapper) Close() error {
|
|
||||||
if w.closer != nil {
|
|
||||||
w.closer()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type bufferedReader struct {
|
|
||||||
buf *bufio.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBufferedReader(r io.Reader) *bufferedReader {
|
|
||||||
buf := bufioReader32KPool.Get().(*bufio.Reader)
|
|
||||||
buf.Reset(r)
|
|
||||||
return &bufferedReader{buf}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *bufferedReader) Read(p []byte) (n int, err error) {
|
|
||||||
if r.buf == nil {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
n, err = r.buf.Read(p)
|
|
||||||
if err == io.EOF {
|
|
||||||
r.buf.Reset(nil)
|
|
||||||
bufioReader32KPool.Put(r.buf)
|
|
||||||
r.buf = nil
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *bufferedReader) Peek(n int) ([]byte, error) {
|
|
||||||
if r.buf == nil {
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
return r.buf.Peek(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetectCompression detects the compression algorithm of the source.
|
|
||||||
func DetectCompression(source []byte) Compression {
|
|
||||||
for compression, m := range map[Compression][]byte{
|
|
||||||
Gzip: {0x1F, 0x8B, 0x08},
|
|
||||||
} {
|
|
||||||
if len(source) < len(m) {
|
|
||||||
// Len too short
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if bytes.Equal(m, source[:len(m)]) {
|
|
||||||
return compression
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Uncompressed
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
|
|
||||||
func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
|
|
||||||
buf := newBufferedReader(archive)
|
|
||||||
bs, err := buf.Peek(10)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
// Note: we'll ignore any io.EOF error because there are some odd
|
|
||||||
// cases where the layer.tar file will be empty (zero bytes) and
|
|
||||||
// that results in an io.EOF from the Peek() call. So, in those
|
|
||||||
// cases we'll just treat it as a non-compressed stream and
|
|
||||||
// that means just create an empty layer.
|
|
||||||
// See Issue docker/docker#18170
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch compression := DetectCompression(bs); compression {
|
|
||||||
case Uncompressed:
|
|
||||||
return &readCloserWrapper{
|
|
||||||
Reader: buf,
|
|
||||||
compression: compression,
|
|
||||||
}, nil
|
|
||||||
case Gzip:
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
gzReader, err := gzipDecompress(ctx, buf)
|
|
||||||
if err != nil {
|
|
||||||
cancel()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &readCloserWrapper{
|
|
||||||
Reader: gzReader,
|
|
||||||
compression: compression,
|
|
||||||
closer: func() error {
|
|
||||||
cancel()
|
|
||||||
return gzReader.Close()
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompressStream compresseses the dest with specified compression algorithm.
|
|
||||||
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
|
|
||||||
switch compression {
|
|
||||||
case Uncompressed:
|
|
||||||
return &writeCloserWrapper{dest, nil}, nil
|
|
||||||
case Gzip:
|
|
||||||
return gzip.NewWriter(dest), nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extension returns the extension of a file that uses the specified compression algorithm.
|
|
||||||
func (compression *Compression) Extension() string {
|
|
||||||
switch *compression {
|
|
||||||
case Gzip:
|
|
||||||
return "gz"
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
|
|
||||||
initPigz.Do(func() {
|
|
||||||
if unpigzPath = detectPigz(); unpigzPath != "" {
|
|
||||||
log.L.Debug("using pigz for decompression")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
if unpigzPath == "" {
|
|
||||||
return gzip.NewReader(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) {
|
|
||||||
reader, writer := io.Pipe()
|
|
||||||
|
|
||||||
cmd.Stdin = in
|
|
||||||
cmd.Stdout = writer
|
|
||||||
|
|
||||||
var errBuf bytes.Buffer
|
|
||||||
cmd.Stderr = &errBuf
|
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := cmd.Wait(); err != nil {
|
|
||||||
writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
|
|
||||||
} else {
|
|
||||||
writer.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return reader, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func detectPigz() string {
|
|
||||||
path, err := exec.LookPath("unpigz")
|
|
||||||
if err != nil {
|
|
||||||
log.L.WithError(err).Debug("unpigz not found, falling back to go gzip")
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable
|
|
||||||
value := os.Getenv(disablePigzEnv)
|
|
||||||
if value == "" {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
disable, err := strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value)
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
if disable {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return path
|
|
||||||
}
|
|
|
@ -1,68 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"archive/tar"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Forked from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
|
|
||||||
// as archive/tar doesn't support CreationTime, but does handle PAX time parsing,
|
|
||||||
// and there's no need to re-invent the wheel.
|
|
||||||
|
|
||||||
// parsePAXTime takes a string of the form %d.%d as described in the PAX
|
|
||||||
// specification. Note that this implementation allows for negative timestamps,
|
|
||||||
// which is allowed for by the PAX specification, but not always portable.
|
|
||||||
func parsePAXTime(s string) (time.Time, error) {
|
|
||||||
const maxNanoSecondDigits = 9
|
|
||||||
|
|
||||||
// Split string into seconds and sub-seconds parts.
|
|
||||||
ss, sn := s, ""
|
|
||||||
if pos := strings.IndexByte(s, '.'); pos >= 0 {
|
|
||||||
ss, sn = s[:pos], s[pos+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the seconds.
|
|
||||||
secs, err := strconv.ParseInt(ss, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, tar.ErrHeader
|
|
||||||
}
|
|
||||||
if len(sn) == 0 {
|
|
||||||
return time.Unix(secs, 0), nil // No sub-second values
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the nanoseconds.
|
|
||||||
if strings.Trim(sn, "0123456789") != "" {
|
|
||||||
return time.Time{}, tar.ErrHeader
|
|
||||||
}
|
|
||||||
if len(sn) < maxNanoSecondDigits {
|
|
||||||
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
|
|
||||||
} else {
|
|
||||||
sn = sn[:maxNanoSecondDigits] // Right truncate
|
|
||||||
}
|
|
||||||
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
|
||||||
if len(ss) > 0 && ss[0] == '-' {
|
|
||||||
return time.Unix(secs, -nsecs), nil // Negative correction
|
|
||||||
}
|
|
||||||
return time.Unix(secs, nsecs), nil
|
|
||||||
}
|
|
|
@ -1,686 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
"github.com/containerd/continuity/fs"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var bufPool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
buffer := make([]byte, 32*1024)
|
|
||||||
return &buffer
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var errInvalidArchive = errors.New("invalid archive")
|
|
||||||
|
|
||||||
// Diff returns a tar stream of the computed filesystem
|
|
||||||
// difference between the provided directories.
|
|
||||||
//
|
|
||||||
// Produces a tar using OCI style file markers for deletions. Deleted
|
|
||||||
// files will be prepended with the prefix ".wh.". This style is
|
|
||||||
// based off AUFS whiteouts.
|
|
||||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
|
||||||
func Diff(ctx context.Context, a, b string) io.ReadCloser {
|
|
||||||
r, w := io.Pipe()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
err := WriteDiff(ctx, w, a, b)
|
|
||||||
if err = w.CloseWithError(err); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteDiff writes a tar stream of the computed difference between the
|
|
||||||
// provided directories.
|
|
||||||
//
|
|
||||||
// Produces a tar using OCI style file markers for deletions. Deleted
|
|
||||||
// files will be prepended with the prefix ".wh.". This style is
|
|
||||||
// based off AUFS whiteouts.
|
|
||||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
|
||||||
func WriteDiff(ctx context.Context, w io.Writer, a, b string) error {
|
|
||||||
cw := newChangeWriter(w, b)
|
|
||||||
err := fs.Changes(ctx, a, b, cw.HandleChange)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to create diff tar stream")
|
|
||||||
}
|
|
||||||
return cw.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
|
||||||
// filename this means that file has been removed from the base layer.
|
|
||||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
|
|
||||||
whiteoutPrefix = ".wh."
|
|
||||||
|
|
||||||
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
|
||||||
// for removing an actual file. Normally these files are excluded from exported
|
|
||||||
// archives.
|
|
||||||
whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
|
|
||||||
|
|
||||||
// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
|
||||||
// layers. Normally these should not go into exported archives and all changed
|
|
||||||
// hardlinks should be copied to the top layer.
|
|
||||||
whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
|
|
||||||
|
|
||||||
// whiteoutOpaqueDir file means directory has been made opaque - meaning
|
|
||||||
// readdir calls to this directory do not follow to lower layers.
|
|
||||||
whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
|
|
||||||
|
|
||||||
paxSchilyXattr = "SCHILY.xattr."
|
|
||||||
)
|
|
||||||
|
|
||||||
// Apply applies a tar stream of an OCI style diff tar.
|
|
||||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
|
||||||
func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) {
|
|
||||||
root = filepath.Clean(root)
|
|
||||||
|
|
||||||
var options ApplyOptions
|
|
||||||
for _, opt := range opts {
|
|
||||||
if err := opt(&options); err != nil {
|
|
||||||
return 0, errors.Wrap(err, "failed to apply option")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if options.Filter == nil {
|
|
||||||
options.Filter = all
|
|
||||||
}
|
|
||||||
|
|
||||||
return apply(ctx, root, tar.NewReader(r), options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// applyNaive applies a tar stream of an OCI style diff tar.
|
|
||||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
|
||||||
func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
|
|
||||||
var (
|
|
||||||
dirs []*tar.Header
|
|
||||||
|
|
||||||
// Used for handling opaque directory markers which
|
|
||||||
// may occur out of order
|
|
||||||
unpackedPaths = make(map[string]struct{})
|
|
||||||
|
|
||||||
// Used for aufs plink directory
|
|
||||||
aufsTempdir = ""
|
|
||||||
aufsHardlinks = make(map[string]*tar.Header)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Iterate through the files in the archive.
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return 0, ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
// end of tar archive
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
size += hdr.Size
|
|
||||||
|
|
||||||
// Normalize name, for safety and for a simple is-root check
|
|
||||||
hdr.Name = filepath.Clean(hdr.Name)
|
|
||||||
|
|
||||||
accept, err := options.Filter(hdr)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if !accept {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if skipFile(hdr) {
|
|
||||||
log.G(ctx).Warnf("file %q ignored: archive may not be supported on system", hdr.Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split name and resolve symlinks for root directory.
|
|
||||||
ppath, base := filepath.Split(hdr.Name)
|
|
||||||
ppath, err = fs.RootPath(root, ppath)
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Wrap(err, "failed to get root path")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Join to root before joining to parent path to ensure relative links are
|
|
||||||
// already resolved based on the root before adding to parent.
|
|
||||||
path := filepath.Join(ppath, filepath.Join("/", base))
|
|
||||||
if path == root {
|
|
||||||
log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If file is not directly under root, ensure parent directory
|
|
||||||
// exists or is created.
|
|
||||||
if ppath != root {
|
|
||||||
parentPath := ppath
|
|
||||||
if base == "" {
|
|
||||||
parentPath = filepath.Dir(path)
|
|
||||||
}
|
|
||||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
|
||||||
err = mkdirAll(parentPath, 0755)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip AUFS metadata dirs
|
|
||||||
if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) {
|
|
||||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
|
||||||
// We don't want this directory, but we need the files in them so that
|
|
||||||
// such hardlinks can be resolved.
|
|
||||||
if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
|
||||||
basename := filepath.Base(hdr.Name)
|
|
||||||
aufsHardlinks[basename] = hdr
|
|
||||||
if aufsTempdir == "" {
|
|
||||||
if aufsTempdir, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "dockerplnk"); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(aufsTempdir)
|
|
||||||
}
|
|
||||||
p, err := fs.RootPath(aufsTempdir, basename)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := createTarFile(ctx, p, root, hdr, tr); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hdr.Name != whiteoutOpaqueDir {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(base, whiteoutPrefix) {
|
|
||||||
dir := filepath.Dir(path)
|
|
||||||
if base == whiteoutOpaqueDir {
|
|
||||||
_, err := os.Lstat(dir)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = nil // parent was deleted
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if path == dir {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if _, exists := unpackedPaths[path]; !exists {
|
|
||||||
err := os.RemoveAll(path)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
originalBase := base[len(whiteoutPrefix):]
|
|
||||||
originalPath := filepath.Join(dir, originalBase)
|
|
||||||
|
|
||||||
// Ensure originalPath is under dir
|
|
||||||
if dir[len(dir)-1] != filepath.Separator {
|
|
||||||
dir += string(filepath.Separator)
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(originalPath, dir) {
|
|
||||||
return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.RemoveAll(originalPath); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// If path exits we almost always just want to remove and replace it.
|
|
||||||
// The only exception is when it is a directory *and* the file from
|
|
||||||
// the layer is also a directory. Then we want to merge them (i.e.
|
|
||||||
// just apply the metadata from the layer).
|
|
||||||
if fi, err := os.Lstat(path); err == nil {
|
|
||||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
|
||||||
if err := os.RemoveAll(path); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
srcData := io.Reader(tr)
|
|
||||||
srcHdr := hdr
|
|
||||||
|
|
||||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
|
||||||
// we manually retarget these into the temporary files we extracted them into
|
|
||||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) {
|
|
||||||
linkBasename := filepath.Base(hdr.Linkname)
|
|
||||||
srcHdr = aufsHardlinks[linkBasename]
|
|
||||||
if srcHdr == nil {
|
|
||||||
return 0, fmt.Errorf("invalid aufs hardlink")
|
|
||||||
}
|
|
||||||
p, err := fs.RootPath(aufsTempdir, linkBasename)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
tmpFile, err := os.Open(p)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer tmpFile.Close()
|
|
||||||
srcData = tmpFile
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Directory mtimes must be handled at the end to avoid further
|
|
||||||
// file creation in them to modify the directory mtime
|
|
||||||
if hdr.Typeflag == tar.TypeDir {
|
|
||||||
dirs = append(dirs, hdr)
|
|
||||||
}
|
|
||||||
unpackedPaths[path] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, hdr := range dirs {
|
|
||||||
path, err := fs.RootPath(root, hdr.Name)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error {
|
|
||||||
// hdr.Mode is in linux format, which we can use for syscalls,
|
|
||||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
|
||||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
|
||||||
hdrInfo := hdr.FileInfo()
|
|
||||||
|
|
||||||
switch hdr.Typeflag {
|
|
||||||
case tar.TypeDir:
|
|
||||||
// Create directory unless it exists as a directory already.
|
|
||||||
// In that case we just want to merge the two
|
|
||||||
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
|
|
||||||
if err := mkdir(path, hdrInfo.Mode()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case tar.TypeReg, tar.TypeRegA:
|
|
||||||
file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = copyBuffered(ctx, file, reader)
|
|
||||||
if err1 := file.Close(); err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
case tar.TypeBlock, tar.TypeChar:
|
|
||||||
// Handle this is an OS-specific way
|
|
||||||
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
case tar.TypeFifo:
|
|
||||||
// Handle this is an OS-specific way
|
|
||||||
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
case tar.TypeLink:
|
|
||||||
targetPath, err := hardlinkRootPath(extractDir, hdr.Linkname)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Link(targetPath, path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
case tar.TypeSymlink:
|
|
||||||
if err := os.Symlink(hdr.Linkname, path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
case tar.TypeXGlobalHeader:
|
|
||||||
log.G(ctx).Debug("PAX Global Extended Headers found and ignored")
|
|
||||||
return nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lchown is not supported on Windows.
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, value := range hdr.PAXRecords {
|
|
||||||
if strings.HasPrefix(key, paxSchilyXattr) {
|
|
||||||
key = key[len(paxSchilyXattr):]
|
|
||||||
if err := setxattr(path, key, value); err != nil {
|
|
||||||
if errors.Cause(err) == syscall.ENOTSUP {
|
|
||||||
log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
|
||||||
// must happen after chown, as that can modify the file mode
|
|
||||||
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime))
|
|
||||||
}
|
|
||||||
|
|
||||||
type changeWriter struct {
|
|
||||||
tw *tar.Writer
|
|
||||||
source string
|
|
||||||
whiteoutT time.Time
|
|
||||||
inodeSrc map[uint64]string
|
|
||||||
inodeRefs map[uint64][]string
|
|
||||||
addedDirs map[string]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newChangeWriter(w io.Writer, source string) *changeWriter {
|
|
||||||
return &changeWriter{
|
|
||||||
tw: tar.NewWriter(w),
|
|
||||||
source: source,
|
|
||||||
whiteoutT: time.Now(),
|
|
||||||
inodeSrc: map[uint64]string{},
|
|
||||||
inodeRefs: map[uint64][]string{},
|
|
||||||
addedDirs: map[string]struct{}{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if k == fs.ChangeKindDelete {
|
|
||||||
whiteOutDir := filepath.Dir(p)
|
|
||||||
whiteOutBase := filepath.Base(p)
|
|
||||||
whiteOut := filepath.Join(whiteOutDir, whiteoutPrefix+whiteOutBase)
|
|
||||||
hdr := &tar.Header{
|
|
||||||
Typeflag: tar.TypeReg,
|
|
||||||
Name: whiteOut[1:],
|
|
||||||
Size: 0,
|
|
||||||
ModTime: cw.whiteoutT,
|
|
||||||
AccessTime: cw.whiteoutT,
|
|
||||||
ChangeTime: cw.whiteoutT,
|
|
||||||
}
|
|
||||||
if err := cw.includeParents(hdr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to write whiteout header")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var (
|
|
||||||
link string
|
|
||||||
err error
|
|
||||||
source = filepath.Join(cw.source, p)
|
|
||||||
)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case f.Mode()&os.ModeSocket != 0:
|
|
||||||
return nil // ignore sockets
|
|
||||||
case f.Mode()&os.ModeSymlink != 0:
|
|
||||||
if link, err = os.Readlink(source); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr, err := tar.FileInfoHeader(f, link)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
|
||||||
|
|
||||||
name := p
|
|
||||||
if strings.HasPrefix(name, string(filepath.Separator)) {
|
|
||||||
name, err = filepath.Rel(string(filepath.Separator), name)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to make path relative")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
name, err = tarName(name)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "cannot canonicalize path")
|
|
||||||
}
|
|
||||||
// suffix with '/' for directories
|
|
||||||
if f.IsDir() && !strings.HasSuffix(name, "/") {
|
|
||||||
name += "/"
|
|
||||||
}
|
|
||||||
hdr.Name = name
|
|
||||||
|
|
||||||
if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to set device headers")
|
|
||||||
}
|
|
||||||
|
|
||||||
// additionalLinks stores file names which must be linked to
|
|
||||||
// this file when this file is added
|
|
||||||
var additionalLinks []string
|
|
||||||
inode, isHardlink := fs.GetLinkInfo(f)
|
|
||||||
if isHardlink {
|
|
||||||
// If the inode has a source, always link to it
|
|
||||||
if source, ok := cw.inodeSrc[inode]; ok {
|
|
||||||
hdr.Typeflag = tar.TypeLink
|
|
||||||
hdr.Linkname = source
|
|
||||||
hdr.Size = 0
|
|
||||||
} else {
|
|
||||||
if k == fs.ChangeKindUnmodified {
|
|
||||||
cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
cw.inodeSrc[inode] = name
|
|
||||||
additionalLinks = cw.inodeRefs[inode]
|
|
||||||
delete(cw.inodeRefs, inode)
|
|
||||||
}
|
|
||||||
} else if k == fs.ChangeKindUnmodified {
|
|
||||||
// Nothing to write to diff
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if capability, err := getxattr(source, "security.capability"); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to get capabilities xattr")
|
|
||||||
} else if capability != nil {
|
|
||||||
if hdr.PAXRecords == nil {
|
|
||||||
hdr.PAXRecords = map[string]string{}
|
|
||||||
}
|
|
||||||
hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cw.includeParents(hdr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to write file header")
|
|
||||||
}
|
|
||||||
|
|
||||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
|
||||||
file, err := open(source)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to open path: %v", source)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
n, err := copyBuffered(context.TODO(), cw.tw, file)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to copy")
|
|
||||||
}
|
|
||||||
if n != hdr.Size {
|
|
||||||
return errors.New("short write copying file")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if additionalLinks != nil {
|
|
||||||
source = hdr.Name
|
|
||||||
for _, extra := range additionalLinks {
|
|
||||||
hdr.Name = extra
|
|
||||||
hdr.Typeflag = tar.TypeLink
|
|
||||||
hdr.Linkname = source
|
|
||||||
hdr.Size = 0
|
|
||||||
|
|
||||||
if err := cw.includeParents(hdr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to write file header")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *changeWriter) Close() error {
|
|
||||||
if err := cw.tw.Close(); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to close tar writer")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *changeWriter) includeParents(hdr *tar.Header) error {
|
|
||||||
name := strings.TrimRight(hdr.Name, "/")
|
|
||||||
fname := filepath.Join(cw.source, name)
|
|
||||||
parent := filepath.Dir(name)
|
|
||||||
pname := filepath.Join(cw.source, parent)
|
|
||||||
|
|
||||||
// Do not include root directory as parent
|
|
||||||
if fname != cw.source && pname != cw.source {
|
|
||||||
_, ok := cw.addedDirs[parent]
|
|
||||||
if !ok {
|
|
||||||
cw.addedDirs[parent] = struct{}{}
|
|
||||||
fi, err := os.Stat(pname)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := cw.HandleChange(fs.ChangeKindModify, parent, fi, nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hdr.Typeflag == tar.TypeDir {
|
|
||||||
cw.addedDirs[name] = struct{}{}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
|
|
||||||
buf := bufPool.Get().(*[]byte)
|
|
||||||
defer bufPool.Put(buf)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
err = ctx.Err()
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
nr, er := src.Read(*buf)
|
|
||||||
if nr > 0 {
|
|
||||||
nw, ew := dst.Write((*buf)[0:nr])
|
|
||||||
if nw > 0 {
|
|
||||||
written += int64(nw)
|
|
||||||
}
|
|
||||||
if ew != nil {
|
|
||||||
err = ew
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if nr != nw {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if er != nil {
|
|
||||||
if er != io.EOF {
|
|
||||||
err = er
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return written, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// hardlinkRootPath returns target linkname, evaluating and bounding any
|
|
||||||
// symlink to the parent directory.
|
|
||||||
//
|
|
||||||
// NOTE: Allow hardlink to the softlink, not the real one. For example,
|
|
||||||
//
|
|
||||||
// touch /tmp/zzz
|
|
||||||
// ln -s /tmp/zzz /tmp/xxx
|
|
||||||
// ln /tmp/xxx /tmp/yyy
|
|
||||||
//
|
|
||||||
// /tmp/yyy should be softlink which be same of /tmp/xxx, not /tmp/zzz.
|
|
||||||
func hardlinkRootPath(root, linkname string) (string, error) {
|
|
||||||
ppath, base := filepath.Split(linkname)
|
|
||||||
ppath, err := fs.RootPath(root, ppath)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
targetPath := filepath.Join(ppath, base)
|
|
||||||
if !strings.HasPrefix(targetPath, root) {
|
|
||||||
targetPath = root
|
|
||||||
}
|
|
||||||
return targetPath, nil
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import "archive/tar"
|
|
||||||
|
|
||||||
// ApplyOpt allows setting mutable archive apply properties on creation
|
|
||||||
type ApplyOpt func(options *ApplyOptions) error
|
|
||||||
|
|
||||||
// Filter specific files from the archive
|
|
||||||
type Filter func(*tar.Header) (bool, error)
|
|
||||||
|
|
||||||
// all allows all files
|
|
||||||
func all(_ *tar.Header) (bool, error) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithFilter uses the filter to select which files are to be extracted.
|
|
||||||
func WithFilter(f Filter) ApplyOpt {
|
|
||||||
return func(options *ApplyOptions) error {
|
|
||||||
options.Filter = f
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue