mirror of https://github.com/docker/cli.git
Merge pull request #3139 from ndeloof/drop_kubernetes_support
Drop support for (archived) Compose-on-Kubernetes
This commit is contained in:
commit
aa75635eea
|
@ -28,9 +28,6 @@ linters:
|
|||
|
||||
run:
|
||||
timeout: 5m
|
||||
skip-dirs:
|
||||
- cli/command/stack/kubernetes/api/openapi
|
||||
- cli/command/stack/kubernetes/api/client
|
||||
skip-files:
|
||||
- cli/compose/schema/bindata.go
|
||||
- .*generated.*
|
||||
|
|
|
@ -60,7 +60,6 @@ type Cli interface {
|
|||
ContentTrustEnabled() bool
|
||||
ContextStore() store.Store
|
||||
CurrentContext() string
|
||||
StackOrchestrator(flagValue string) (Orchestrator, error)
|
||||
DockerEndpoint() docker.Endpoint
|
||||
}
|
||||
|
||||
|
@ -367,25 +366,6 @@ func (cli *DockerCli) CurrentContext() string {
|
|||
return cli.currentContext
|
||||
}
|
||||
|
||||
// StackOrchestrator resolves which stack orchestrator is in use
|
||||
func (cli *DockerCli) StackOrchestrator(flagValue string) (Orchestrator, error) {
|
||||
currentContext := cli.CurrentContext()
|
||||
ctxRaw, err := cli.ContextStore().GetMetadata(currentContext)
|
||||
if store.IsErrContextDoesNotExist(err) {
|
||||
// case where the currentContext has been removed (CLI behavior is to fallback to using DOCKER_HOST based resolution)
|
||||
return GetStackOrchestrator(flagValue, "", cli.ConfigFile().StackOrchestrator, cli.Err())
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ctxMeta, err := GetDockerContext(ctxRaw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ctxOrchestrator := string(ctxMeta.StackOrchestrator)
|
||||
return GetStackOrchestrator(flagValue, ctxOrchestrator, cli.ConfigFile().StackOrchestrator, cli.Err())
|
||||
}
|
||||
|
||||
// DockerEndpoint returns the current docker endpoint
|
||||
func (cli *DockerCli) DockerEndpoint() docker.Endpoint {
|
||||
return cli.dockerEndpoint
|
||||
|
|
|
@ -9,9 +9,8 @@ import (
|
|||
|
||||
// DockerContext is a typed representation of what we put in Context metadata
|
||||
type DockerContext struct {
|
||||
Description string
|
||||
StackOrchestrator Orchestrator
|
||||
AdditionalFields map[string]interface{}
|
||||
Description string
|
||||
AdditionalFields map[string]interface{}
|
||||
}
|
||||
|
||||
// MarshalJSON implements custom JSON marshalling
|
||||
|
@ -20,9 +19,6 @@ func (dc DockerContext) MarshalJSON() ([]byte, error) {
|
|||
if dc.Description != "" {
|
||||
s["Description"] = dc.Description
|
||||
}
|
||||
if dc.StackOrchestrator != "" {
|
||||
s["StackOrchestrator"] = dc.StackOrchestrator
|
||||
}
|
||||
if dc.AdditionalFields != nil {
|
||||
for k, v := range dc.AdditionalFields {
|
||||
s[k] = v
|
||||
|
@ -41,8 +37,6 @@ func (dc *DockerContext) UnmarshalJSON(payload []byte) error {
|
|||
switch k {
|
||||
case "Description":
|
||||
dc.Description = v.(string)
|
||||
case "StackOrchestrator":
|
||||
dc.StackOrchestrator = Orchestrator(v.(string))
|
||||
default:
|
||||
if dc.AdditionalFields == nil {
|
||||
dc.AdditionalFields = make(map[string]interface{})
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -16,12 +15,15 @@ import (
|
|||
|
||||
// CreateOptions are the options used for creating a context
|
||||
type CreateOptions struct {
|
||||
Name string
|
||||
Description string
|
||||
Name string
|
||||
Description string
|
||||
Docker map[string]string
|
||||
From string
|
||||
|
||||
// Deprecated
|
||||
DefaultStackOrchestrator string
|
||||
Docker map[string]string
|
||||
Kubernetes map[string]string
|
||||
From string
|
||||
// Deprecated
|
||||
Kubernetes map[string]string
|
||||
}
|
||||
|
||||
func longCreateDescription() string {
|
||||
|
@ -33,13 +35,6 @@ func longCreateDescription() string {
|
|||
fmt.Fprintf(tw, "%s\t%s\n", d.name, d.description)
|
||||
}
|
||||
tw.Flush()
|
||||
buf.WriteString("\nKubernetes endpoint config:\n\n")
|
||||
tw = tabwriter.NewWriter(buf, 20, 1, 3, ' ', 0)
|
||||
fmt.Fprintln(tw, "NAME\tDESCRIPTION")
|
||||
for _, d := range kubernetesConfigKeysDescriptions {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", d.name, d.description)
|
||||
}
|
||||
tw.Flush()
|
||||
buf.WriteString("\nExample:\n\n$ docker context create my-context --description \"some description\" --docker \"host=tcp://myserver:2376,ca=~/ca-file,cert=~/cert-file,key=~/key-file\"\n")
|
||||
return buf.String()
|
||||
}
|
||||
|
@ -63,10 +58,12 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
"default-stack-orchestrator", "",
|
||||
"Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)")
|
||||
flags.SetAnnotation("default-stack-orchestrator", "deprecated", nil)
|
||||
flags.MarkDeprecated("default-stack-orchestrator", "option will be ignored")
|
||||
flags.StringToStringVar(&opts.Docker, "docker", nil, "set the docker endpoint")
|
||||
flags.StringToStringVar(&opts.Kubernetes, "kubernetes", nil, "set the kubernetes endpoint")
|
||||
flags.SetAnnotation("kubernetes", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubernetes", "deprecated", nil)
|
||||
flags.MarkDeprecated("kubernetes", "option will be ignored")
|
||||
flags.StringVar(&opts.From, "from", "", "create context from a named context")
|
||||
return cmd
|
||||
}
|
||||
|
@ -74,20 +71,17 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
// RunCreate creates a Docker context
|
||||
func RunCreate(cli command.Cli, o *CreateOptions) error {
|
||||
s := cli.ContextStore()
|
||||
if err := checkContextNameForCreation(s, o.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
stackOrchestrator, err := command.NormalizeOrchestrator(o.DefaultStackOrchestrator)
|
||||
err := checkContextNameForCreation(s, o.Name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to parse default-stack-orchestrator")
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case o.From == "" && o.Docker == nil && o.Kubernetes == nil:
|
||||
err = createFromExistingContext(s, cli.CurrentContext(), stackOrchestrator, o)
|
||||
err = createFromExistingContext(s, cli.CurrentContext(), o)
|
||||
case o.From != "":
|
||||
err = createFromExistingContext(s, o.From, stackOrchestrator, o)
|
||||
err = createFromExistingContext(s, o.From, o)
|
||||
default:
|
||||
err = createNewContext(o, stackOrchestrator, cli, s)
|
||||
err = createNewContext(o, cli, s)
|
||||
}
|
||||
if err == nil {
|
||||
fmt.Fprintln(cli.Out(), o.Name)
|
||||
|
@ -96,11 +90,11 @@ func RunCreate(cli command.Cli, o *CreateOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func createNewContext(o *CreateOptions, stackOrchestrator command.Orchestrator, cli command.Cli, s store.Writer) error {
|
||||
func createNewContext(o *CreateOptions, cli command.Cli, s store.Writer) error {
|
||||
if o.Docker == nil {
|
||||
return errors.New("docker endpoint configuration is required")
|
||||
}
|
||||
contextMetadata := newContextMetadata(stackOrchestrator, o)
|
||||
contextMetadata := newContextMetadata(o)
|
||||
contextTLSData := store.ContextTLSData{
|
||||
Endpoints: make(map[string]store.EndpointTLSData),
|
||||
}
|
||||
|
@ -112,22 +106,7 @@ func createNewContext(o *CreateOptions, stackOrchestrator command.Orchestrator,
|
|||
if dockerTLS != nil {
|
||||
contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerTLS
|
||||
}
|
||||
if o.Kubernetes != nil {
|
||||
kubernetesEP, kubernetesTLS, err := getKubernetesEndpointMetadataAndTLS(cli, o.Kubernetes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create kubernetes endpoint config")
|
||||
}
|
||||
if kubernetesEP == nil && stackOrchestrator.HasKubernetes() {
|
||||
return errors.Errorf("cannot specify orchestrator %q without configuring a Kubernetes endpoint", stackOrchestrator)
|
||||
}
|
||||
if kubernetesEP != nil {
|
||||
contextMetadata.Endpoints[kubernetes.KubernetesEndpoint] = kubernetesEP
|
||||
}
|
||||
if kubernetesTLS != nil {
|
||||
contextTLSData.Endpoints[kubernetes.KubernetesEndpoint] = *kubernetesTLS
|
||||
}
|
||||
}
|
||||
if err := validateEndpointsAndOrchestrator(contextMetadata); err != nil {
|
||||
if err := validateEndpoints(contextMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.CreateOrUpdate(contextMetadata); err != nil {
|
||||
|
@ -152,26 +131,24 @@ func checkContextNameForCreation(s store.Reader, name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func createFromExistingContext(s store.ReaderWriter, fromContextName string, stackOrchestrator command.Orchestrator, o *CreateOptions) error {
|
||||
func createFromExistingContext(s store.ReaderWriter, fromContextName string, o *CreateOptions) error {
|
||||
if len(o.Docker) != 0 || len(o.Kubernetes) != 0 {
|
||||
return errors.New("cannot use --docker or --kubernetes flags when --from is set")
|
||||
}
|
||||
reader := store.Export(fromContextName, &descriptionAndOrchestratorStoreDecorator{
|
||||
Reader: s,
|
||||
description: o.Description,
|
||||
orchestrator: stackOrchestrator,
|
||||
reader := store.Export(fromContextName, &descriptionDecorator{
|
||||
Reader: s,
|
||||
description: o.Description,
|
||||
})
|
||||
defer reader.Close()
|
||||
return store.Import(o.Name, s, reader)
|
||||
}
|
||||
|
||||
type descriptionAndOrchestratorStoreDecorator struct {
|
||||
type descriptionDecorator struct {
|
||||
store.Reader
|
||||
description string
|
||||
orchestrator command.Orchestrator
|
||||
description string
|
||||
}
|
||||
|
||||
func (d *descriptionAndOrchestratorStoreDecorator) GetMetadata(name string) (store.Metadata, error) {
|
||||
func (d *descriptionDecorator) GetMetadata(name string) (store.Metadata, error) {
|
||||
c, err := d.Reader.GetMetadata(name)
|
||||
if err != nil {
|
||||
return c, err
|
||||
|
@ -183,19 +160,15 @@ func (d *descriptionAndOrchestratorStoreDecorator) GetMetadata(name string) (sto
|
|||
if d.description != "" {
|
||||
typedContext.Description = d.description
|
||||
}
|
||||
if d.orchestrator != command.Orchestrator("") {
|
||||
typedContext.StackOrchestrator = d.orchestrator
|
||||
}
|
||||
c.Metadata = typedContext
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func newContextMetadata(stackOrchestrator command.Orchestrator, o *CreateOptions) store.Metadata {
|
||||
func newContextMetadata(o *CreateOptions) store.Metadata {
|
||||
return store.Metadata{
|
||||
Endpoints: make(map[string]interface{}),
|
||||
Metadata: command.DockerContext{
|
||||
Description: o.Description,
|
||||
StackOrchestrator: stackOrchestrator,
|
||||
Description: o.Description,
|
||||
},
|
||||
Name: o.Name,
|
||||
}
|
||||
|
|
|
@ -9,11 +9,9 @@ import (
|
|||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/env"
|
||||
)
|
||||
|
||||
func makeFakeCli(t *testing.T, opts ...func(*test.FakeCli)) (*test.FakeCli, func()) {
|
||||
|
@ -22,7 +20,6 @@ func makeFakeCli(t *testing.T, opts ...func(*test.FakeCli)) (*test.FakeCli, func
|
|||
storeConfig := store.NewConfig(
|
||||
func() interface{} { return &command.DockerContext{} },
|
||||
store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }),
|
||||
store.EndpointTypeGetter(kubernetes.KubernetesEndpoint, func() interface{} { return &kubernetes.EndpointMeta{} }),
|
||||
)
|
||||
store := &command.ContextStoreWithDefault{
|
||||
Store: store.New(dir, storeConfig),
|
||||
|
@ -35,8 +32,7 @@ func makeFakeCli(t *testing.T, opts ...func(*test.FakeCli)) (*test.FakeCli, func
|
|||
},
|
||||
},
|
||||
Metadata: command.DockerContext{
|
||||
Description: "",
|
||||
StackOrchestrator: command.OrchestratorSwarm,
|
||||
Description: "",
|
||||
},
|
||||
Name: command.DefaultContextName,
|
||||
},
|
||||
|
@ -61,7 +57,7 @@ func withCliConfig(configFile *configfile.ConfigFile) func(*test.FakeCli) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCreateInvalids(t *testing.T) {
|
||||
func TestCreate(t *testing.T) {
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
assert.NilError(t, cli.ContextStore().CreateOrUpdate(store.Metadata{Name: "existing-context"}))
|
||||
|
@ -104,15 +100,7 @@ func TestCreateInvalids(t *testing.T) {
|
|||
Name: "invalid-orchestrator",
|
||||
DefaultStackOrchestrator: "invalid",
|
||||
},
|
||||
expecterErr: `specified orchestrator "invalid" is invalid, please use either kubernetes, swarm or all`,
|
||||
},
|
||||
{
|
||||
options: CreateOptions{
|
||||
Name: "orchestrator-kubernetes-no-endpoint",
|
||||
DefaultStackOrchestrator: "kubernetes",
|
||||
Docker: map[string]string{},
|
||||
},
|
||||
expecterErr: `cannot specify orchestrator "kubernetes" without configuring a Kubernetes endpoint`,
|
||||
expecterErr: "",
|
||||
},
|
||||
{
|
||||
options: CreateOptions{
|
||||
|
@ -120,14 +108,18 @@ func TestCreateInvalids(t *testing.T) {
|
|||
DefaultStackOrchestrator: "all",
|
||||
Docker: map[string]string{},
|
||||
},
|
||||
expecterErr: `cannot specify orchestrator "all" without configuring a Kubernetes endpoint`,
|
||||
expecterErr: "",
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.options.Name, func(t *testing.T) {
|
||||
err := RunCreate(cli, &tc.options)
|
||||
assert.ErrorContains(t, err, tc.expecterErr)
|
||||
if tc.expecterErr == "" {
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, tc.expecterErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -162,76 +154,27 @@ func TestCreateOrchestratorEmpty(t *testing.T) {
|
|||
assertContextCreateLogging(t, cli, "test")
|
||||
}
|
||||
|
||||
func validateTestKubeEndpoint(t *testing.T, s store.Reader, name string) {
|
||||
t.Helper()
|
||||
ctxMetadata, err := s.GetMetadata(name)
|
||||
assert.NilError(t, err)
|
||||
kubeMeta := ctxMetadata.Endpoints[kubernetes.KubernetesEndpoint].(kubernetes.EndpointMeta)
|
||||
kubeEP, err := kubeMeta.WithTLSData(s, name)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "https://someserver.example.com", kubeEP.Host)
|
||||
assert.Equal(t, "the-ca", string(kubeEP.TLSData.CA))
|
||||
assert.Equal(t, "the-cert", string(kubeEP.TLSData.Cert))
|
||||
assert.Equal(t, "the-key", string(kubeEP.TLSData.Key))
|
||||
}
|
||||
|
||||
func createTestContextWithKube(t *testing.T, cli command.Cli) {
|
||||
t.Helper()
|
||||
revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")
|
||||
defer revert()
|
||||
|
||||
err := RunCreate(cli, &CreateOptions{
|
||||
Name: "test",
|
||||
DefaultStackOrchestrator: "all",
|
||||
Kubernetes: map[string]string{
|
||||
keyFrom: "default",
|
||||
},
|
||||
Docker: map[string]string{},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestCreateOrchestratorAllKubernetesEndpointFromCurrent(t *testing.T) {
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
createTestContextWithKube(t, cli)
|
||||
assertContextCreateLogging(t, cli, "test")
|
||||
validateTestKubeEndpoint(t, cli.ContextStore(), "test")
|
||||
}
|
||||
|
||||
func TestCreateFromContext(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
description string
|
||||
orchestrator string
|
||||
expectedDescription string
|
||||
docker map[string]string
|
||||
kubernetes map[string]string
|
||||
expectedOrchestrator command.Orchestrator
|
||||
name string
|
||||
description string
|
||||
expectedDescription string
|
||||
docker map[string]string
|
||||
kubernetes map[string]string
|
||||
}{
|
||||
{
|
||||
name: "no-override",
|
||||
expectedDescription: "original description",
|
||||
expectedOrchestrator: command.OrchestratorSwarm,
|
||||
name: "no-override",
|
||||
expectedDescription: "original description",
|
||||
},
|
||||
{
|
||||
name: "override-description",
|
||||
description: "new description",
|
||||
expectedDescription: "new description",
|
||||
expectedOrchestrator: command.OrchestratorSwarm,
|
||||
},
|
||||
{
|
||||
name: "override-orchestrator",
|
||||
orchestrator: "kubernetes",
|
||||
expectedDescription: "original description",
|
||||
expectedOrchestrator: command.OrchestratorKubernetes,
|
||||
name: "override-description",
|
||||
description: "new description",
|
||||
expectedDescription: "new description",
|
||||
},
|
||||
}
|
||||
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")
|
||||
defer revert()
|
||||
cli.ResetOutputBuffers()
|
||||
assert.NilError(t, RunCreate(cli, &CreateOptions{
|
||||
Name: "original",
|
||||
|
@ -239,10 +182,6 @@ func TestCreateFromContext(t *testing.T) {
|
|||
Docker: map[string]string{
|
||||
keyHost: "tcp://42.42.42.42:2375",
|
||||
},
|
||||
Kubernetes: map[string]string{
|
||||
keyFrom: "default",
|
||||
},
|
||||
DefaultStackOrchestrator: "swarm",
|
||||
}))
|
||||
assertContextCreateLogging(t, cli, "original")
|
||||
|
||||
|
@ -253,10 +192,6 @@ func TestCreateFromContext(t *testing.T) {
|
|||
Docker: map[string]string{
|
||||
keyHost: "tcp://24.24.24.24:2375",
|
||||
},
|
||||
Kubernetes: map[string]string{
|
||||
keyFrom: "default",
|
||||
},
|
||||
DefaultStackOrchestrator: "swarm",
|
||||
}))
|
||||
assertContextCreateLogging(t, cli, "dummy")
|
||||
|
||||
|
@ -267,12 +202,10 @@ func TestCreateFromContext(t *testing.T) {
|
|||
t.Run(c.name, func(t *testing.T) {
|
||||
cli.ResetOutputBuffers()
|
||||
err := RunCreate(cli, &CreateOptions{
|
||||
From: "original",
|
||||
Name: c.name,
|
||||
Description: c.description,
|
||||
DefaultStackOrchestrator: c.orchestrator,
|
||||
Docker: c.docker,
|
||||
Kubernetes: c.kubernetes,
|
||||
From: "original",
|
||||
Name: c.name,
|
||||
Description: c.description,
|
||||
Docker: c.docker,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assertContextCreateLogging(t, cli, c.name)
|
||||
|
@ -282,47 +215,32 @@ func TestCreateFromContext(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
dockerEndpoint, err := docker.EndpointFromContext(newContext)
|
||||
assert.NilError(t, err)
|
||||
kubeEndpoint := kubernetes.EndpointFromContext(newContext)
|
||||
assert.Check(t, kubeEndpoint != nil)
|
||||
assert.Equal(t, newContextTyped.Description, c.expectedDescription)
|
||||
assert.Equal(t, newContextTyped.StackOrchestrator, c.expectedOrchestrator)
|
||||
assert.Equal(t, dockerEndpoint.Host, "tcp://42.42.42.42:2375")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver.example.com")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateFromCurrent(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
description string
|
||||
orchestrator string
|
||||
expectedDescription string
|
||||
expectedOrchestrator command.Orchestrator
|
||||
name string
|
||||
description string
|
||||
orchestrator string
|
||||
expectedDescription string
|
||||
}{
|
||||
{
|
||||
name: "no-override",
|
||||
expectedDescription: "original description",
|
||||
expectedOrchestrator: command.OrchestratorSwarm,
|
||||
name: "no-override",
|
||||
expectedDescription: "original description",
|
||||
},
|
||||
{
|
||||
name: "override-description",
|
||||
description: "new description",
|
||||
expectedDescription: "new description",
|
||||
expectedOrchestrator: command.OrchestratorSwarm,
|
||||
},
|
||||
{
|
||||
name: "override-orchestrator",
|
||||
orchestrator: "kubernetes",
|
||||
expectedDescription: "original description",
|
||||
expectedOrchestrator: command.OrchestratorKubernetes,
|
||||
name: "override-description",
|
||||
description: "new description",
|
||||
expectedDescription: "new description",
|
||||
},
|
||||
}
|
||||
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
revert := env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")
|
||||
defer revert()
|
||||
cli.ResetOutputBuffers()
|
||||
assert.NilError(t, RunCreate(cli, &CreateOptions{
|
||||
Name: "original",
|
||||
|
@ -330,10 +248,6 @@ func TestCreateFromCurrent(t *testing.T) {
|
|||
Docker: map[string]string{
|
||||
keyHost: "tcp://42.42.42.42:2375",
|
||||
},
|
||||
Kubernetes: map[string]string{
|
||||
keyFrom: "default",
|
||||
},
|
||||
DefaultStackOrchestrator: "swarm",
|
||||
}))
|
||||
assertContextCreateLogging(t, cli, "original")
|
||||
|
||||
|
@ -344,9 +258,8 @@ func TestCreateFromCurrent(t *testing.T) {
|
|||
t.Run(c.name, func(t *testing.T) {
|
||||
cli.ResetOutputBuffers()
|
||||
err := RunCreate(cli, &CreateOptions{
|
||||
Name: c.name,
|
||||
Description: c.description,
|
||||
DefaultStackOrchestrator: c.orchestrator,
|
||||
Name: c.name,
|
||||
Description: c.description,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assertContextCreateLogging(t, cli, c.name)
|
||||
|
@ -356,12 +269,8 @@ func TestCreateFromCurrent(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
dockerEndpoint, err := docker.EndpointFromContext(newContext)
|
||||
assert.NilError(t, err)
|
||||
kubeEndpoint := kubernetes.EndpointFromContext(newContext)
|
||||
assert.Check(t, kubeEndpoint != nil)
|
||||
assert.Equal(t, newContextTyped.Description, c.expectedDescription)
|
||||
assert.Equal(t, newContextTyped.StackOrchestrator, c.expectedOrchestrator)
|
||||
assert.Equal(t, dockerEndpoint.Host, "tcp://42.42.42.42:2375")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver.example.com")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
@ -19,7 +20,7 @@ func TestExportImportWithFile(t *testing.T) {
|
|||
contextFile := filepath.Join(contextDir, "exported")
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
createTestContextWithKube(t, cli)
|
||||
createTestContext(t, cli)
|
||||
cli.ErrBuffer().Reset()
|
||||
assert.NilError(t, RunExport(cli, &ExportOptions{
|
||||
ContextName: "test",
|
||||
|
@ -45,7 +46,7 @@ func TestExportImportWithFile(t *testing.T) {
|
|||
func TestExportImportPipe(t *testing.T) {
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
createTestContextWithKube(t, cli)
|
||||
createTestContext(t, cli)
|
||||
cli.ErrBuffer().Reset()
|
||||
cli.OutBuffer().Reset()
|
||||
assert.NilError(t, RunExport(cli, &ExportOptions{
|
||||
|
@ -70,31 +71,6 @@ func TestExportImportPipe(t *testing.T) {
|
|||
assert.Equal(t, "Successfully imported context \"test2\"\n", cli.ErrBuffer().String())
|
||||
}
|
||||
|
||||
func TestExportKubeconfig(t *testing.T) {
|
||||
contextDir, err := ioutil.TempDir("", t.Name()+"context")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(contextDir)
|
||||
contextFile := filepath.Join(contextDir, "exported")
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
createTestContextWithKube(t, cli)
|
||||
cli.ErrBuffer().Reset()
|
||||
assert.NilError(t, RunExport(cli, &ExportOptions{
|
||||
ContextName: "test",
|
||||
Dest: contextFile,
|
||||
Kubeconfig: true,
|
||||
}))
|
||||
assert.Equal(t, cli.ErrBuffer().String(), fmt.Sprintf("Written file %q\n", contextFile))
|
||||
assert.NilError(t, RunCreate(cli, &CreateOptions{
|
||||
Name: "test2",
|
||||
Kubernetes: map[string]string{
|
||||
keyKubeconfig: contextFile,
|
||||
},
|
||||
Docker: map[string]string{},
|
||||
}))
|
||||
validateTestKubeEndpoint(t, cli.ContextStore(), "test2")
|
||||
}
|
||||
|
||||
func TestExportExistingFile(t *testing.T) {
|
||||
contextDir, err := ioutil.TempDir("", t.Name()+"context")
|
||||
assert.NilError(t, err)
|
||||
|
@ -102,9 +78,18 @@ func TestExportExistingFile(t *testing.T) {
|
|||
contextFile := filepath.Join(contextDir, "exported")
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
createTestContextWithKube(t, cli)
|
||||
cli.ErrBuffer().Reset()
|
||||
assert.NilError(t, ioutil.WriteFile(contextFile, []byte{}, 0644))
|
||||
err = RunExport(cli, &ExportOptions{ContextName: "test", Dest: contextFile})
|
||||
assert.Assert(t, os.IsExist(err))
|
||||
}
|
||||
|
||||
func createTestContext(t *testing.T, cli command.Cli) {
|
||||
t.Helper()
|
||||
|
||||
err := RunCreate(cli, &CreateOptions{
|
||||
Name: "test",
|
||||
Docker: map[string]string{},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -9,10 +8,8 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// ExportOptions are the options used for exporting a context
|
||||
|
@ -82,31 +79,7 @@ func RunExport(dockerCli command.Cli, opts *ExportOptions) error {
|
|||
if err := store.ValidateContextName(opts.ContextName); err != nil && opts.ContextName != command.DefaultContextName {
|
||||
return err
|
||||
}
|
||||
ctxMeta, err := dockerCli.ContextStore().GetMetadata(opts.ContextName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !opts.Kubeconfig {
|
||||
reader := store.Export(opts.ContextName, dockerCli.ContextStore())
|
||||
defer reader.Close()
|
||||
return writeTo(dockerCli, reader, opts.Dest)
|
||||
}
|
||||
kubernetesEndpointMeta := kubernetes.EndpointFromContext(ctxMeta)
|
||||
if kubernetesEndpointMeta == nil {
|
||||
return fmt.Errorf("context %q has no kubernetes endpoint", opts.ContextName)
|
||||
}
|
||||
kubernetesEndpoint, err := kubernetesEndpointMeta.WithTLSData(dockerCli.ContextStore(), opts.ContextName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeConfig := kubernetesEndpoint.KubernetesConfig()
|
||||
rawCfg, err := kubeConfig.RawConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := clientcmd.Write(rawCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeTo(dockerCli, bytes.NewBuffer(data), opts.Dest)
|
||||
reader := store.Export(opts.ContextName, dockerCli.ContextStore())
|
||||
defer reader.Close()
|
||||
return writeTo(dockerCli, reader, opts.Dest)
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
kubecontext "github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/fvbommel/sortorder"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -56,21 +55,14 @@ func runList(dockerCli command.Cli, opts *listOptions) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubernetesEndpoint := kubecontext.EndpointFromContext(rawMeta)
|
||||
kubEndpointText := ""
|
||||
if kubernetesEndpoint != nil {
|
||||
kubEndpointText = fmt.Sprintf("%s (%s)", kubernetesEndpoint.Host, kubernetesEndpoint.DefaultNamespace)
|
||||
}
|
||||
if rawMeta.Name == command.DefaultContextName {
|
||||
meta.Description = "Current DOCKER_HOST based configuration"
|
||||
}
|
||||
desc := formatter.ClientContext{
|
||||
Name: rawMeta.Name,
|
||||
Current: rawMeta.Name == curContext,
|
||||
Description: meta.Description,
|
||||
StackOrchestrator: string(meta.StackOrchestrator),
|
||||
DockerEndpoint: dockerEndpoint.Host,
|
||||
KubernetesEndpoint: kubEndpointText,
|
||||
Name: rawMeta.Name,
|
||||
Current: rawMeta.Name == curContext,
|
||||
Description: meta.Description,
|
||||
DockerEndpoint: dockerEndpoint.Host,
|
||||
}
|
||||
contexts = append(contexts, &desc)
|
||||
}
|
||||
|
|
|
@ -2,18 +2,14 @@ package context
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -24,9 +20,6 @@ const (
|
|||
keyCert = "cert"
|
||||
keyKey = "key"
|
||||
keySkipTLSVerify = "skip-tls-verify"
|
||||
keyKubeconfig = "config-file"
|
||||
keyKubecontext = "context-override"
|
||||
keyKubenamespace = "namespace-override"
|
||||
)
|
||||
|
||||
type configKeyDescription struct {
|
||||
|
@ -43,12 +36,6 @@ var (
|
|||
keyKey: {},
|
||||
keySkipTLSVerify: {},
|
||||
}
|
||||
allowedKubernetesConfigKeys = map[string]struct{}{
|
||||
keyFrom: {},
|
||||
keyKubeconfig: {},
|
||||
keyKubecontext: {},
|
||||
keyKubenamespace: {},
|
||||
}
|
||||
dockerConfigKeysDescriptions = []configKeyDescription{
|
||||
{
|
||||
name: keyFrom,
|
||||
|
@ -75,24 +62,6 @@ var (
|
|||
description: "Skip TLS certificate validation",
|
||||
},
|
||||
}
|
||||
kubernetesConfigKeysDescriptions = []configKeyDescription{
|
||||
{
|
||||
name: keyFrom,
|
||||
description: "Copy named context's Kubernetes endpoint configuration",
|
||||
},
|
||||
{
|
||||
name: keyKubeconfig,
|
||||
description: "Path to a Kubernetes config file",
|
||||
},
|
||||
{
|
||||
name: keyKubecontext,
|
||||
description: "Overrides the context set in the kubernetes config file",
|
||||
},
|
||||
{
|
||||
name: keyKubenamespace,
|
||||
description: "Overrides the namespace set in the kubernetes config file",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func parseBool(config map[string]string, name string) (bool, error) {
|
||||
|
@ -164,56 +133,3 @@ func getDockerEndpointMetadataAndTLS(dockerCli command.Cli, config map[string]st
|
|||
}
|
||||
return ep.EndpointMeta, ep.TLSData.ToStoreTLSData(), nil
|
||||
}
|
||||
|
||||
func getKubernetesEndpoint(dockerCli command.Cli, config map[string]string) (*kubernetes.Endpoint, error) {
|
||||
if err := validateConfig(config, allowedKubernetesConfigKeys); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(config) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if contextName, ok := config[keyFrom]; ok {
|
||||
ctxMeta, err := dockerCli.ContextStore().GetMetadata(contextName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpointMeta := kubernetes.EndpointFromContext(ctxMeta)
|
||||
if endpointMeta != nil {
|
||||
res, err := endpointMeta.WithTLSData(dockerCli.ContextStore(), dockerCli.CurrentContext())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// fallback to env-based kubeconfig
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
ep, err := kubernetes.FromKubeConfig(kubeconfig, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ep, nil
|
||||
}
|
||||
if config[keyKubeconfig] != "" {
|
||||
ep, err := kubernetes.FromKubeConfig(config[keyKubeconfig], config[keyKubecontext], config[keyKubenamespace])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ep, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func getKubernetesEndpointMetadataAndTLS(dockerCli command.Cli, config map[string]string) (*kubernetes.EndpointMeta, *store.EndpointTLSData, error) {
|
||||
ep, err := getKubernetesEndpoint(dockerCli, config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if ep == nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &ep.EndpointMeta, ep.TLSData.ToStoreTLSData(), nil
|
||||
}
|
||||
|
|
|
@ -2,27 +2,15 @@
|
|||
{
|
||||
"Name": "current",
|
||||
"Metadata": {
|
||||
"Description": "description of current",
|
||||
"StackOrchestrator": "all"
|
||||
"Description": "description of current"
|
||||
},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "https://someswarmserver.example.com",
|
||||
"SkipTLSVerify": false
|
||||
},
|
||||
"kubernetes": {
|
||||
"Host": "https://someserver.example.com",
|
||||
"SkipTLSVerify": false,
|
||||
"DefaultNamespace": "default"
|
||||
}
|
||||
},
|
||||
"TLSMaterial": {
|
||||
"kubernetes": [
|
||||
"ca.pem",
|
||||
"cert.pem",
|
||||
"key.pem"
|
||||
]
|
||||
},
|
||||
"TLSMaterial": {},
|
||||
"Storage": {
|
||||
"MetadataPath": "<METADATA_PATH>",
|
||||
"TLSPath": "<TLS_PATH>"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
current * description of current https://someswarmserver.example.com https://someserver.example.com (default) all
|
||||
default Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
other description of other https://someswarmserver.example.com https://someserver.example.com (default) all
|
||||
unset description of unset https://someswarmserver.example.com https://someserver.example.com (default)
|
||||
NAME DESCRIPTION DOCKER ENDPOINT
|
||||
current * description of current https://someswarmserver.example.com
|
||||
default Current DOCKER_HOST based configuration unix:///var/run/docker.sock
|
||||
other description of other https://someswarmserver.example.com
|
||||
unset description of unset https://someswarmserver.example.com
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -16,11 +15,14 @@ import (
|
|||
|
||||
// UpdateOptions are the options used to update a context
|
||||
type UpdateOptions struct {
|
||||
Name string
|
||||
Description string
|
||||
Name string
|
||||
Description string
|
||||
Docker map[string]string
|
||||
|
||||
// Deprecated
|
||||
DefaultStackOrchestrator string
|
||||
Docker map[string]string
|
||||
Kubernetes map[string]string
|
||||
// Deprecated
|
||||
Kubernetes map[string]string
|
||||
}
|
||||
|
||||
func longUpdateDescription() string {
|
||||
|
@ -32,13 +34,6 @@ func longUpdateDescription() string {
|
|||
fmt.Fprintf(tw, "%s\t%s\n", d.name, d.description)
|
||||
}
|
||||
tw.Flush()
|
||||
buf.WriteString("\nKubernetes endpoint config:\n\n")
|
||||
tw = tabwriter.NewWriter(buf, 20, 1, 3, ' ', 0)
|
||||
fmt.Fprintln(tw, "NAME\tDESCRIPTION")
|
||||
for _, d := range kubernetesConfigKeysDescriptions {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", d.name, d.description)
|
||||
}
|
||||
tw.Flush()
|
||||
buf.WriteString("\nExample:\n\n$ docker context update my-context --description \"some description\" --docker \"host=tcp://myserver:2376,ca=~/ca-file,cert=~/cert-file,key=~/key-file\"\n")
|
||||
return buf.String()
|
||||
}
|
||||
|
@ -62,10 +57,12 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
"default-stack-orchestrator", "",
|
||||
"Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)")
|
||||
flags.SetAnnotation("default-stack-orchestrator", "deprecated", nil)
|
||||
flags.MarkDeprecated("default-stack-orchestrator", "option will be ignored")
|
||||
flags.StringToStringVar(&opts.Docker, "docker", nil, "set the docker endpoint")
|
||||
flags.StringToStringVar(&opts.Kubernetes, "kubernetes", nil, "set the kubernetes endpoint")
|
||||
flags.SetAnnotation("kubernetes", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubernetes", "deprecated", nil)
|
||||
flags.MarkDeprecated("kubernetes", "option will be ignored")
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -83,13 +80,6 @@ func RunUpdate(cli command.Cli, o *UpdateOptions) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.DefaultStackOrchestrator != "" {
|
||||
stackOrchestrator, err := command.NormalizeOrchestrator(o.DefaultStackOrchestrator)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to parse default-stack-orchestrator")
|
||||
}
|
||||
dockerContext.StackOrchestrator = stackOrchestrator
|
||||
}
|
||||
if o.Description != "" {
|
||||
dockerContext.Description = o.Description
|
||||
}
|
||||
|
@ -106,19 +96,7 @@ func RunUpdate(cli command.Cli, o *UpdateOptions) error {
|
|||
c.Endpoints[docker.DockerEndpoint] = dockerEP
|
||||
tlsDataToReset[docker.DockerEndpoint] = dockerTLS
|
||||
}
|
||||
if o.Kubernetes != nil {
|
||||
kubernetesEP, kubernetesTLS, err := getKubernetesEndpointMetadataAndTLS(cli, o.Kubernetes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create kubernetes endpoint config")
|
||||
}
|
||||
if kubernetesEP == nil {
|
||||
delete(c.Endpoints, kubernetes.KubernetesEndpoint)
|
||||
} else {
|
||||
c.Endpoints[kubernetes.KubernetesEndpoint] = kubernetesEP
|
||||
tlsDataToReset[kubernetes.KubernetesEndpoint] = kubernetesTLS
|
||||
}
|
||||
}
|
||||
if err := validateEndpointsAndOrchestrator(c); err != nil {
|
||||
if err := validateEndpoints(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.CreateOrUpdate(c); err != nil {
|
||||
|
@ -135,13 +113,7 @@ func RunUpdate(cli command.Cli, o *UpdateOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateEndpointsAndOrchestrator(c store.Metadata) error {
|
||||
dockerContext, err := command.GetDockerContext(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := c.Endpoints[kubernetes.KubernetesEndpoint]; !ok && dockerContext.StackOrchestrator.HasKubernetes() {
|
||||
return errors.Errorf("cannot specify orchestrator %q without configuring a Kubernetes endpoint", dockerContext.StackOrchestrator)
|
||||
}
|
||||
return nil
|
||||
func validateEndpoints(c store.Metadata) error {
|
||||
_, err := command.GetDockerContext(c)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
@ -29,7 +28,6 @@ func TestUpdateDescriptionOnly(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
dc, err := command.GetDockerContext(c)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, dc.StackOrchestrator, command.OrchestratorSwarm)
|
||||
assert.Equal(t, dc.Description, "description")
|
||||
|
||||
assert.Equal(t, "test\n", cli.OutBuffer().String())
|
||||
|
@ -50,40 +48,11 @@ func TestUpdateDockerOnly(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
dc, err := command.GetDockerContext(c)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, dc.StackOrchestrator, command.OrchestratorSwarm)
|
||||
assert.Equal(t, dc.Description, "description of test")
|
||||
assert.Check(t, cmp.Contains(c.Endpoints, kubernetes.KubernetesEndpoint))
|
||||
assert.Check(t, cmp.Contains(c.Endpoints, docker.DockerEndpoint))
|
||||
assert.Equal(t, c.Endpoints[docker.DockerEndpoint].(docker.EndpointMeta).Host, "tcp://some-host")
|
||||
}
|
||||
|
||||
func TestUpdateStackOrchestratorStrategy(t *testing.T) {
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
err := RunCreate(cli, &CreateOptions{
|
||||
Name: "test",
|
||||
DefaultStackOrchestrator: "swarm",
|
||||
Docker: map[string]string{},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
err = RunUpdate(cli, &UpdateOptions{
|
||||
Name: "test",
|
||||
DefaultStackOrchestrator: "kubernetes",
|
||||
})
|
||||
assert.ErrorContains(t, err, `cannot specify orchestrator "kubernetes" without configuring a Kubernetes endpoint`)
|
||||
}
|
||||
|
||||
func TestUpdateStackOrchestratorStrategyRemoveKubeEndpoint(t *testing.T) {
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
createTestContextWithKubeAndSwarm(t, cli, "test", "kubernetes")
|
||||
err := RunUpdate(cli, &UpdateOptions{
|
||||
Name: "test",
|
||||
Kubernetes: map[string]string{},
|
||||
})
|
||||
assert.ErrorContains(t, err, `cannot specify orchestrator "kubernetes" without configuring a Kubernetes endpoint`)
|
||||
}
|
||||
|
||||
func TestUpdateInvalidDockerHost(t *testing.T) {
|
||||
cli, cleanup := makeFakeCli(t)
|
||||
defer cleanup()
|
||||
|
|
|
@ -9,19 +9,17 @@ import (
|
|||
|
||||
func TestDockerContextMetadataKeepAdditionalFields(t *testing.T) {
|
||||
c := DockerContext{
|
||||
Description: "test",
|
||||
StackOrchestrator: OrchestratorSwarm,
|
||||
Description: "test",
|
||||
AdditionalFields: map[string]interface{}{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
jsonBytes, err := json.Marshal(c)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, `{"Description":"test","StackOrchestrator":"swarm","foo":"bar"}`, string(jsonBytes))
|
||||
assert.Equal(t, `{"Description":"test","foo":"bar"}`, string(jsonBytes))
|
||||
|
||||
var c2 DockerContext
|
||||
assert.NilError(t, json.Unmarshal(jsonBytes, &c2))
|
||||
assert.Equal(t, c2.AdditionalFields["foo"], "bar")
|
||||
assert.Equal(t, c2.StackOrchestrator, OrchestratorSwarm)
|
||||
assert.Equal(t, c2.Description, "test")
|
||||
}
|
||||
|
|
|
@ -41,23 +41,18 @@ type EndpointDefaultResolver interface {
|
|||
// the lack of a default (e.g. because the config file which
|
||||
// would contain it is missing). If there is no default then
|
||||
// returns nil, nil, nil.
|
||||
ResolveDefault(Orchestrator) (interface{}, *store.EndpointTLSData, error)
|
||||
ResolveDefault() (interface{}, *store.EndpointTLSData, error)
|
||||
}
|
||||
|
||||
// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
||||
func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, storeconfig store.Config, stderr io.Writer) (*DefaultContext, error) {
|
||||
stackOrchestrator, err := GetStackOrchestrator("", "", config.StackOrchestrator, stderr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contextTLSData := store.ContextTLSData{
|
||||
Endpoints: make(map[string]store.EndpointTLSData),
|
||||
}
|
||||
contextMetadata := store.Metadata{
|
||||
Endpoints: make(map[string]interface{}),
|
||||
Metadata: DockerContext{
|
||||
Description: "",
|
||||
StackOrchestrator: stackOrchestrator,
|
||||
Description: "",
|
||||
},
|
||||
Name: DefaultContextName,
|
||||
}
|
||||
|
@ -77,7 +72,7 @@ func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.Conf
|
|||
}
|
||||
ep := get()
|
||||
if i, ok := ep.(EndpointDefaultResolver); ok {
|
||||
meta, tls, err := i.ResolveDefault(stackOrchestrator)
|
||||
meta, tls, err := i.ResolveDefault()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -71,7 +71,6 @@ func TestDefaultContextInitializer(t *testing.T) {
|
|||
}, cli.ConfigFile(), DefaultContextStoreConfig(), cli.Err())
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, OrchestratorSwarm, ctx.Meta.Metadata.(DockerContext).StackOrchestrator)
|
||||
assert.DeepEqual(t, "ssh://someswarmserver", ctx.Meta.Endpoints[docker.DockerEndpoint].(docker.EndpointMeta).Host)
|
||||
golden.Assert(t, string(ctx.TLS.Endpoints[docker.DockerEndpoint].Files["ca.pem"]), "ca.pem")
|
||||
}
|
||||
|
|
|
@ -2,12 +2,10 @@ package formatter
|
|||
|
||||
const (
|
||||
// ClientContextTableFormat is the default client context format
|
||||
ClientContextTableFormat = "table {{.Name}}{{if .Current}} *{{end}}\t{{.Description}}\t{{.DockerEndpoint}}\t{{.KubernetesEndpoint}}\t{{.StackOrchestrator}}"
|
||||
ClientContextTableFormat = "table {{.Name}}{{if .Current}} *{{end}}\t{{.Description}}\t{{.DockerEndpoint}}"
|
||||
|
||||
dockerEndpointHeader = "DOCKER ENDPOINT"
|
||||
kubernetesEndpointHeader = "KUBERNETES ENDPOINT"
|
||||
stackOrchestrastorHeader = "ORCHESTRATOR"
|
||||
quietContextFormat = "{{.Name}}"
|
||||
dockerEndpointHeader = "DOCKER ENDPOINT"
|
||||
quietContextFormat = "{{.Name}}"
|
||||
)
|
||||
|
||||
// NewClientContextFormat returns a Format for rendering using a Context
|
||||
|
@ -23,12 +21,10 @@ func NewClientContextFormat(source string, quiet bool) Format {
|
|||
|
||||
// ClientContext is a context for display
|
||||
type ClientContext struct {
|
||||
Name string
|
||||
Description string
|
||||
DockerEndpoint string
|
||||
KubernetesEndpoint string
|
||||
StackOrchestrator string
|
||||
Current bool
|
||||
Name string
|
||||
Description string
|
||||
DockerEndpoint string
|
||||
Current bool
|
||||
}
|
||||
|
||||
// ClientContextWrite writes formatted contexts using the Context
|
||||
|
@ -52,11 +48,9 @@ type clientContextContext struct {
|
|||
func newClientContextContext() *clientContextContext {
|
||||
ctx := clientContextContext{}
|
||||
ctx.Header = SubHeaderContext{
|
||||
"Name": NameHeader,
|
||||
"Description": DescriptionHeader,
|
||||
"DockerEndpoint": dockerEndpointHeader,
|
||||
"KubernetesEndpoint": kubernetesEndpointHeader,
|
||||
"StackOrchestrator": stackOrchestrastorHeader,
|
||||
"Name": NameHeader,
|
||||
"Description": DescriptionHeader,
|
||||
"DockerEndpoint": dockerEndpointHeader,
|
||||
}
|
||||
return &ctx
|
||||
}
|
||||
|
@ -82,9 +76,5 @@ func (c *clientContextContext) DockerEndpoint() string {
|
|||
}
|
||||
|
||||
func (c *clientContextContext) KubernetesEndpoint() string {
|
||||
return c.c.KubernetesEndpoint
|
||||
}
|
||||
|
||||
func (c *clientContextContext) StackOrchestrator() string {
|
||||
return c.c.StackOrchestrator
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Orchestrator type acts as an enum describing supported orchestrators.
|
||||
type Orchestrator string
|
||||
|
||||
const (
|
||||
// OrchestratorKubernetes orchestrator
|
||||
OrchestratorKubernetes = Orchestrator("kubernetes")
|
||||
// OrchestratorSwarm orchestrator
|
||||
OrchestratorSwarm = Orchestrator("swarm")
|
||||
// OrchestratorAll orchestrator
|
||||
OrchestratorAll = Orchestrator("all")
|
||||
orchestratorUnset = Orchestrator("")
|
||||
|
||||
defaultOrchestrator = OrchestratorSwarm
|
||||
envVarDockerStackOrchestrator = "DOCKER_STACK_ORCHESTRATOR"
|
||||
envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR"
|
||||
)
|
||||
|
||||
// HasKubernetes returns true if defined orchestrator has Kubernetes capabilities.
|
||||
func (o Orchestrator) HasKubernetes() bool {
|
||||
return o == OrchestratorKubernetes || o == OrchestratorAll
|
||||
}
|
||||
|
||||
// HasSwarm returns true if defined orchestrator has Swarm capabilities.
|
||||
func (o Orchestrator) HasSwarm() bool {
|
||||
return o == OrchestratorSwarm || o == OrchestratorAll
|
||||
}
|
||||
|
||||
// HasAll returns true if defined orchestrator has both Swarm and Kubernetes capabilities.
|
||||
func (o Orchestrator) HasAll() bool {
|
||||
return o == OrchestratorAll
|
||||
}
|
||||
|
||||
func normalize(value string) (Orchestrator, error) {
|
||||
switch value {
|
||||
case "kubernetes":
|
||||
return OrchestratorKubernetes, nil
|
||||
case "swarm":
|
||||
return OrchestratorSwarm, nil
|
||||
case "", "unset": // unset is the old value for orchestratorUnset. Keep accepting this for backward compat
|
||||
return orchestratorUnset, nil
|
||||
case "all":
|
||||
return OrchestratorAll, nil
|
||||
default:
|
||||
return defaultOrchestrator, fmt.Errorf("specified orchestrator %q is invalid, please use either kubernetes, swarm or all", value)
|
||||
}
|
||||
}
|
||||
|
||||
// NormalizeOrchestrator parses an orchestrator value and checks if it is valid
|
||||
func NormalizeOrchestrator(value string) (Orchestrator, error) {
|
||||
return normalize(value)
|
||||
}
|
||||
|
||||
// GetStackOrchestrator checks DOCKER_STACK_ORCHESTRATOR environment variable and configuration file
|
||||
// orchestrator value and returns user defined Orchestrator.
|
||||
func GetStackOrchestrator(flagValue, contextValue, globalDefault string, stderr io.Writer) (Orchestrator, error) {
|
||||
// Check flag
|
||||
if o, err := normalize(flagValue); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
// Check environment variable
|
||||
env := os.Getenv(envVarDockerStackOrchestrator)
|
||||
if env == "" && os.Getenv(envVarDockerOrchestrator) != "" {
|
||||
fmt.Fprintf(stderr, "WARNING: experimental environment variable %s is set. Please use %s instead\n", envVarDockerOrchestrator, envVarDockerStackOrchestrator)
|
||||
}
|
||||
if o, err := normalize(env); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
if o, err := normalize(contextValue); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
if o, err := normalize(globalDefault); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
// Nothing set, use default orchestrator
|
||||
return defaultOrchestrator, nil
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/env"
|
||||
)
|
||||
|
||||
func TestOrchestratorSwitch(t *testing.T) {
|
||||
var testcases = []struct {
|
||||
doc string
|
||||
globalOrchestrator string
|
||||
envOrchestrator string
|
||||
flagOrchestrator string
|
||||
contextOrchestrator string
|
||||
expectedOrchestrator string
|
||||
expectedKubernetes bool
|
||||
expectedSwarm bool
|
||||
}{
|
||||
{
|
||||
doc: "default",
|
||||
expectedOrchestrator: "swarm",
|
||||
expectedKubernetes: false,
|
||||
expectedSwarm: true,
|
||||
},
|
||||
{
|
||||
doc: "kubernetesConfigFile",
|
||||
globalOrchestrator: "kubernetes",
|
||||
expectedOrchestrator: "kubernetes",
|
||||
expectedKubernetes: true,
|
||||
expectedSwarm: false,
|
||||
},
|
||||
{
|
||||
doc: "kubernetesEnv",
|
||||
envOrchestrator: "kubernetes",
|
||||
expectedOrchestrator: "kubernetes",
|
||||
expectedKubernetes: true,
|
||||
expectedSwarm: false,
|
||||
},
|
||||
{
|
||||
doc: "kubernetesFlag",
|
||||
flagOrchestrator: "kubernetes",
|
||||
expectedOrchestrator: "kubernetes",
|
||||
expectedKubernetes: true,
|
||||
expectedSwarm: false,
|
||||
},
|
||||
{
|
||||
doc: "allOrchestratorFlag",
|
||||
flagOrchestrator: "all",
|
||||
expectedOrchestrator: "all",
|
||||
expectedKubernetes: true,
|
||||
expectedSwarm: true,
|
||||
},
|
||||
{
|
||||
doc: "kubernetesContext",
|
||||
contextOrchestrator: "kubernetes",
|
||||
expectedOrchestrator: "kubernetes",
|
||||
expectedKubernetes: true,
|
||||
},
|
||||
{
|
||||
doc: "contextOverridesConfigFile",
|
||||
globalOrchestrator: "kubernetes",
|
||||
contextOrchestrator: "swarm",
|
||||
expectedOrchestrator: "swarm",
|
||||
expectedKubernetes: false,
|
||||
expectedSwarm: true,
|
||||
},
|
||||
{
|
||||
doc: "envOverridesConfigFile",
|
||||
globalOrchestrator: "kubernetes",
|
||||
envOrchestrator: "swarm",
|
||||
expectedOrchestrator: "swarm",
|
||||
expectedKubernetes: false,
|
||||
expectedSwarm: true,
|
||||
},
|
||||
{
|
||||
doc: "flagOverridesEnv",
|
||||
envOrchestrator: "kubernetes",
|
||||
flagOrchestrator: "swarm",
|
||||
expectedOrchestrator: "swarm",
|
||||
expectedKubernetes: false,
|
||||
expectedSwarm: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
t.Run(testcase.doc, func(t *testing.T) {
|
||||
if testcase.envOrchestrator != "" {
|
||||
defer env.Patch(t, "DOCKER_STACK_ORCHESTRATOR", testcase.envOrchestrator)()
|
||||
}
|
||||
orchestrator, err := GetStackOrchestrator(testcase.flagOrchestrator, testcase.contextOrchestrator, testcase.globalOrchestrator, ioutil.Discard)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(testcase.expectedKubernetes, orchestrator.HasKubernetes()))
|
||||
assert.Check(t, is.Equal(testcase.expectedSwarm, orchestrator.HasSwarm()))
|
||||
assert.Check(t, is.Equal(testcase.expectedOrchestrator, string(orchestrator)))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,47 +1,20 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var errUnsupportedAllOrchestrator = fmt.Errorf(`no orchestrator specified: use either "kubernetes" or "swarm"`)
|
||||
|
||||
type commonOptions struct {
|
||||
orchestrator command.Orchestrator
|
||||
}
|
||||
|
||||
func (o *commonOptions) Orchestrator() command.Orchestrator {
|
||||
if o == nil {
|
||||
return command.OrchestratorSwarm
|
||||
}
|
||||
return o.orchestrator
|
||||
}
|
||||
|
||||
// NewStackCommand returns a cobra command for `stack` subcommands
|
||||
func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts commonOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "stack [OPTIONS]",
|
||||
Short: "Manage Docker stacks",
|
||||
Args: cli.NoArgs,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
orchestrator, err := getOrchestrator(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.orchestrator = orchestrator
|
||||
hideOrchestrationFlags(cmd, orchestrator)
|
||||
return checkSupportedFlag(cmd, orchestrator)
|
||||
},
|
||||
|
||||
RunE: command.ShowHelp(dockerCli.Err()),
|
||||
RunE: command.ShowHelp(dockerCli.Err()),
|
||||
Annotations: map[string]string{
|
||||
"version": "1.25",
|
||||
},
|
||||
|
@ -52,71 +25,18 @@ func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
|||
fmt.Fprintln(dockerCli.Err(), err)
|
||||
return
|
||||
}
|
||||
if err := cmd.PersistentPreRunE(c, args); err != nil {
|
||||
fmt.Fprintln(dockerCli.Err(), err)
|
||||
return
|
||||
}
|
||||
hideOrchestrationFlags(c, opts.orchestrator)
|
||||
defaultHelpFunc(c, args)
|
||||
})
|
||||
cmd.AddCommand(
|
||||
newDeployCommand(dockerCli, &opts),
|
||||
newListCommand(dockerCli, &opts),
|
||||
newPsCommand(dockerCli, &opts),
|
||||
newRemoveCommand(dockerCli, &opts),
|
||||
newServicesCommand(dockerCli, &opts),
|
||||
newDeployCommand(dockerCli),
|
||||
newListCommand(dockerCli),
|
||||
newPsCommand(dockerCli),
|
||||
newRemoveCommand(dockerCli),
|
||||
newServicesCommand(dockerCli),
|
||||
)
|
||||
flags := cmd.PersistentFlags()
|
||||
flags.String("kubeconfig", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "deprecated", nil)
|
||||
flags.String("orchestrator", "", "Orchestrator to use (swarm|kubernetes|all)")
|
||||
flags.String("orchestrator", "", "Orchestrator to use (swarm|all)")
|
||||
flags.SetAnnotation("orchestrator", "deprecated", nil)
|
||||
flags.MarkDeprecated("orchestrator", "option will be ignored")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func getOrchestrator(dockerCli command.Cli, cmd *cobra.Command) (command.Orchestrator, error) {
|
||||
var orchestratorFlag string
|
||||
if o, err := cmd.Flags().GetString("orchestrator"); err == nil {
|
||||
orchestratorFlag = o
|
||||
}
|
||||
return dockerCli.StackOrchestrator(orchestratorFlag)
|
||||
}
|
||||
|
||||
func hideOrchestrationFlags(cmd *cobra.Command, orchestrator command.Orchestrator) {
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
if _, ok := f.Annotations["kubernetes"]; ok && !orchestrator.HasKubernetes() {
|
||||
f.Hidden = true
|
||||
}
|
||||
if _, ok := f.Annotations["swarm"]; ok && !orchestrator.HasSwarm() {
|
||||
f.Hidden = true
|
||||
}
|
||||
})
|
||||
for _, subcmd := range cmd.Commands() {
|
||||
hideOrchestrationFlags(subcmd, orchestrator)
|
||||
}
|
||||
}
|
||||
|
||||
func checkSupportedFlag(cmd *cobra.Command, orchestrator command.Orchestrator) error {
|
||||
errs := []string{}
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
if !f.Changed {
|
||||
return
|
||||
}
|
||||
if _, ok := f.Annotations["kubernetes"]; ok && !orchestrator.HasKubernetes() {
|
||||
errs = append(errs, fmt.Sprintf(`"--%s" is only supported on a Docker cli with kubernetes features enabled`, f.Name))
|
||||
}
|
||||
if _, ok := f.Annotations["swarm"]; ok && !orchestrator.HasSwarm() {
|
||||
errs = append(errs, fmt.Sprintf(`"--%s" is only supported on a Docker cli with swarm features enabled`, f.Name))
|
||||
}
|
||||
})
|
||||
for _, subcmd := range cmd.Commands() {
|
||||
if err := checkSupportedFlag(subcmd, orchestrator); err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,10 +4,6 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// validateStackName checks if the provided string is a valid stack name (namespace).
|
||||
|
@ -33,18 +29,3 @@ func validateStackNames(namespaces []string) error {
|
|||
func quotesOrWhitespace(r rune) bool {
|
||||
return unicode.IsSpace(r) || r == '"' || r == '\''
|
||||
}
|
||||
|
||||
func runOrchestratedCommand(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, swarmCmd func() error, kubernetesCmd func(*kubernetes.KubeCli) error) error {
|
||||
switch {
|
||||
case commonOrchestrator.HasAll():
|
||||
return errUnsupportedAllOrchestrator
|
||||
case commonOrchestrator.HasKubernetes():
|
||||
kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(flags, commonOrchestrator))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return kubernetesCmd(kli)
|
||||
default:
|
||||
return swarmCmd()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package stack
|
|||
import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes"
|
||||
"github.com/docker/cli/cli/command/stack/loader"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/cli/cli/command/stack/swarm"
|
||||
|
@ -12,7 +11,7 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func newDeployCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command {
|
||||
func newDeployCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts options.Deploy
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -29,7 +28,7 @@ func newDeployCommand(dockerCli command.Cli, common *commonOptions) *cobra.Comma
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return RunDeploy(dockerCli, cmd.Flags(), config, common.Orchestrator(), opts)
|
||||
return RunDeploy(dockerCli, cmd.Flags(), config, opts)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -45,13 +44,10 @@ func newDeployCommand(dockerCli command.Cli, common *commonOptions) *cobra.Comma
|
|||
`Query the registry to resolve image digest and supported platforms ("`+swarm.ResolveImageAlways+`"|"`+swarm.ResolveImageChanged+`"|"`+swarm.ResolveImageNever+`")`)
|
||||
flags.SetAnnotation("resolve-image", "version", []string{"1.30"})
|
||||
flags.SetAnnotation("resolve-image", "swarm", nil)
|
||||
kubernetes.AddNamespaceFlag(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RunDeploy performs a stack deploy against the specified orchestrator
|
||||
func RunDeploy(dockerCli command.Cli, flags *pflag.FlagSet, config *composetypes.Config, commonOrchestrator command.Orchestrator, opts options.Deploy) error {
|
||||
return runOrchestratedCommand(dockerCli, flags, commonOrchestrator,
|
||||
func() error { return swarm.RunDeploy(dockerCli, opts, config) },
|
||||
func(kli *kubernetes.KubeCli) error { return kubernetes.RunDeploy(kli, opts, config) })
|
||||
// RunDeploy performs a stack deploy against the specified swarm cluster
|
||||
func RunDeploy(dockerCli command.Cli, flags *pflag.FlagSet, config *composetypes.Config, opts options.Deploy) error {
|
||||
return swarm.RunDeploy(dockerCli, opts, config)
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
func TestDeployWithEmptyName(t *testing.T) {
|
||||
cmd := newDeployCommand(test.NewFakeCli(&fakeClient{}), nil)
|
||||
cmd := newDeployCommand(test.NewFakeCli(&fakeClient{}))
|
||||
cmd.SetArgs([]string{"' '"})
|
||||
cmd.SetOut(ioutil.Discard)
|
||||
|
||||
|
|
|
@ -7,15 +7,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// KubernetesStackTableFormat is the default Kubernetes stack format
|
||||
KubernetesStackTableFormat formatter.Format = "table {{.Name}}\t{{.Services}}\t{{.Orchestrator}}\t{{.Namespace}}"
|
||||
|
||||
// SwarmStackTableFormat is the default Swarm stack format
|
||||
SwarmStackTableFormat formatter.Format = "table {{.Name}}\t{{.Services}}\t{{.Orchestrator}}"
|
||||
SwarmStackTableFormat formatter.Format = "table {{.Name}}\t{{.Services}}"
|
||||
|
||||
stackServicesHeader = "SERVICES"
|
||||
stackOrchestrastorHeader = "ORCHESTRATOR"
|
||||
stackNamespaceHeader = "NAMESPACE"
|
||||
stackServicesHeader = "SERVICES"
|
||||
|
||||
// TableFormatKey is an alias for formatter.TableFormatKey
|
||||
TableFormatKey = formatter.TableFormatKey
|
||||
|
@ -33,10 +28,6 @@ type Stack struct {
|
|||
Name string
|
||||
// Services is the number of the services
|
||||
Services int
|
||||
// Orchestrator is the platform where the stack is deployed
|
||||
Orchestrator string
|
||||
// Namespace is the Kubernetes namespace assigned to the stack
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// StackWrite writes formatted stacks using the Context
|
||||
|
@ -60,10 +51,8 @@ type stackContext struct {
|
|||
func newStackContext() *stackContext {
|
||||
stackCtx := stackContext{}
|
||||
stackCtx.Header = formatter.SubHeaderContext{
|
||||
"Name": formatter.NameHeader,
|
||||
"Services": stackServicesHeader,
|
||||
"Orchestrator": stackOrchestrastorHeader,
|
||||
"Namespace": stackNamespaceHeader,
|
||||
"Name": formatter.NameHeader,
|
||||
"Services": stackServicesHeader,
|
||||
}
|
||||
return &stackCtx
|
||||
}
|
||||
|
@ -79,11 +68,3 @@ func (s *stackContext) Name() string {
|
|||
func (s *stackContext) Services() string {
|
||||
return strconv.Itoa(s.s.Services)
|
||||
}
|
||||
|
||||
func (s *stackContext) Orchestrator() string {
|
||||
return s.s.Orchestrator
|
||||
}
|
||||
|
||||
func (s *stackContext) Namespace() string {
|
||||
return s.s.Namespace
|
||||
}
|
||||
|
|
|
@ -27,17 +27,9 @@ func TestStackContextWrite(t *testing.T) {
|
|||
// Table format
|
||||
{
|
||||
formatter.Context{Format: SwarmStackTableFormat},
|
||||
`NAME SERVICES ORCHESTRATOR
|
||||
baz 2 orchestrator1
|
||||
bar 1 orchestrator2
|
||||
`,
|
||||
},
|
||||
// Kubernetes table format adds Namespace column
|
||||
{
|
||||
formatter.Context{Format: KubernetesStackTableFormat},
|
||||
`NAME SERVICES ORCHESTRATOR NAMESPACE
|
||||
baz 2 orchestrator1 namespace1
|
||||
bar 1 orchestrator2 namespace2
|
||||
`NAME SERVICES
|
||||
baz 2
|
||||
bar 1
|
||||
`,
|
||||
},
|
||||
{
|
||||
|
@ -57,8 +49,8 @@ bar
|
|||
}
|
||||
|
||||
stacks := []*Stack{
|
||||
{Name: "baz", Services: 2, Orchestrator: "orchestrator1", Namespace: "namespace1"},
|
||||
{Name: "bar", Services: 1, Orchestrator: "orchestrator2", Namespace: "namespace2"},
|
||||
{Name: "baz", Services: 2},
|
||||
{Name: "bar", Services: 1},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
|
|
|
@ -1,145 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
kubecontext "github.com/docker/cli/cli/context/kubernetes"
|
||||
kubernetes "github.com/docker/compose-on-kubernetes/api"
|
||||
cliv1beta1 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta1"
|
||||
"github.com/pkg/errors"
|
||||
flag "github.com/spf13/pflag"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// KubeCli holds kubernetes specifics (client, namespace) with the command.Cli
|
||||
type KubeCli struct {
|
||||
command.Cli
|
||||
kubeConfig *restclient.Config
|
||||
kubeNamespace string
|
||||
clientSet *kubeclient.Clientset
|
||||
}
|
||||
|
||||
// Options contains resolved parameters to initialize kubernetes clients
|
||||
type Options struct {
|
||||
Namespace string
|
||||
Config string
|
||||
Orchestrator command.Orchestrator
|
||||
}
|
||||
|
||||
// NewOptions returns an Options initialized with command line flags
|
||||
func NewOptions(flags *flag.FlagSet, orchestrator command.Orchestrator) Options {
|
||||
opts := Options{
|
||||
Orchestrator: orchestrator,
|
||||
}
|
||||
if namespace, err := flags.GetString("namespace"); err == nil {
|
||||
opts.Namespace = namespace
|
||||
}
|
||||
if kubeConfig, err := flags.GetString("kubeconfig"); err == nil {
|
||||
opts.Config = kubeConfig
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// AddNamespaceFlag adds the namespace flag to the given flag set
|
||||
func AddNamespaceFlag(flags *flag.FlagSet) {
|
||||
flags.String("namespace", "", "Kubernetes namespace to use")
|
||||
flags.SetAnnotation("namespace", "kubernetes", nil)
|
||||
flags.SetAnnotation("namespace", "deprecated", nil)
|
||||
}
|
||||
|
||||
// WrapCli wraps command.Cli with kubernetes specifics
|
||||
func WrapCli(dockerCli command.Cli, opts Options) (*KubeCli, error) {
|
||||
cli := &KubeCli{
|
||||
Cli: dockerCli,
|
||||
}
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
err error
|
||||
)
|
||||
if dockerCli.CurrentContext() == "" {
|
||||
clientConfig = kubernetes.NewKubernetesConfig(opts.Config)
|
||||
} else {
|
||||
clientConfig, err = kubecontext.ConfigFromContext(dockerCli.CurrentContext(), dockerCli.ContextStore())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cli.kubeNamespace = opts.Namespace
|
||||
if opts.Namespace == "" {
|
||||
configNamespace, _, err := clientConfig.Namespace()
|
||||
switch {
|
||||
case os.IsNotExist(err), os.IsPermission(err):
|
||||
return nil, errors.Wrap(err, "unable to load configuration file")
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
cli.kubeNamespace = configNamespace
|
||||
}
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cli.kubeConfig = config
|
||||
|
||||
clientSet, err := kubeclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cli.clientSet = clientSet
|
||||
|
||||
if opts.Orchestrator.HasAll() {
|
||||
if err := cli.checkHostsMatch(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func (c *KubeCli) composeClient() (*Factory, error) {
|
||||
return NewFactory(c.kubeNamespace, c.kubeConfig, c.clientSet)
|
||||
}
|
||||
|
||||
func (c *KubeCli) checkHostsMatch() error {
|
||||
daemonEndpoint, err := url.Parse(c.Client().DaemonHost())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeEndpoint, err := url.Parse(c.kubeConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if daemonEndpoint.Hostname() == kubeEndpoint.Hostname() {
|
||||
return nil
|
||||
}
|
||||
// The daemon can be local in Docker for Desktop, e.g. "npipe", "unix", ...
|
||||
if daemonEndpoint.Scheme != "tcp" {
|
||||
ips, err := net.LookupIP(kubeEndpoint.Hostname())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if ip.IsLoopback() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(c.Err(), "WARNING: Swarm and Kubernetes hosts do not match (docker host=%s, kubernetes host=%s).\n"+
|
||||
" Update $DOCKER_HOST (or pass -H), or use 'kubectl config use-context' to match.\n", daemonEndpoint.Hostname(), kubeEndpoint.Hostname())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *KubeCli) stacksv1beta1() (cliv1beta1.StackInterface, error) {
|
||||
raw, err := newStackV1Beta1(c.kubeConfig, c.kubeNamespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return raw.stacks, nil
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/kubernetes"
|
||||
"github.com/pkg/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2"
|
||||
typesappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// Factory is the kubernetes client factory
|
||||
type Factory struct {
|
||||
namespace string
|
||||
config *restclient.Config
|
||||
coreClientSet corev1.CoreV1Interface
|
||||
appsClientSet appsv1beta2.AppsV1beta2Interface
|
||||
clientSet *kubeclient.Clientset
|
||||
}
|
||||
|
||||
// NewFactory creates a kubernetes client factory
|
||||
func NewFactory(namespace string, config *restclient.Config, clientSet *kubeclient.Clientset) (*Factory, error) {
|
||||
coreClientSet, err := corev1.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appsClientSet, err := appsv1beta2.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Factory{
|
||||
namespace: namespace,
|
||||
config: config,
|
||||
coreClientSet: coreClientSet,
|
||||
appsClientSet: appsClientSet,
|
||||
clientSet: clientSet,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ConfigMaps returns a client for kubernetes's config maps
|
||||
func (s *Factory) ConfigMaps() corev1.ConfigMapInterface {
|
||||
return s.coreClientSet.ConfigMaps(s.namespace)
|
||||
}
|
||||
|
||||
// Secrets returns a client for kubernetes's secrets
|
||||
func (s *Factory) Secrets() corev1.SecretInterface {
|
||||
return s.coreClientSet.Secrets(s.namespace)
|
||||
}
|
||||
|
||||
// Services returns a client for kubernetes's secrets
|
||||
func (s *Factory) Services() corev1.ServiceInterface {
|
||||
return s.coreClientSet.Services(s.namespace)
|
||||
}
|
||||
|
||||
// Pods returns a client for kubernetes's pods
|
||||
func (s *Factory) Pods() corev1.PodInterface {
|
||||
return s.coreClientSet.Pods(s.namespace)
|
||||
}
|
||||
|
||||
// Nodes returns a client for kubernetes's nodes
|
||||
func (s *Factory) Nodes() corev1.NodeInterface {
|
||||
return s.coreClientSet.Nodes()
|
||||
}
|
||||
|
||||
// ReplicationControllers returns a client for kubernetes replication controllers
|
||||
func (s *Factory) ReplicationControllers() corev1.ReplicationControllerInterface {
|
||||
return s.coreClientSet.ReplicationControllers(s.namespace)
|
||||
}
|
||||
|
||||
// ReplicaSets returns a client for kubernetes replace sets
|
||||
func (s *Factory) ReplicaSets() typesappsv1beta2.ReplicaSetInterface {
|
||||
return s.appsClientSet.ReplicaSets(s.namespace)
|
||||
}
|
||||
|
||||
// DaemonSets returns a client for kubernetes daemon sets
|
||||
func (s *Factory) DaemonSets() typesappsv1beta2.DaemonSetInterface {
|
||||
return s.appsClientSet.DaemonSets(s.namespace)
|
||||
}
|
||||
|
||||
// Stacks returns a client for Docker's Stack on Kubernetes
|
||||
func (s *Factory) Stacks(allNamespaces bool) (StackClient, error) {
|
||||
version, err := kubernetes.GetStackAPIVersion(s.clientSet.Discovery())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespace := s.namespace
|
||||
if allNamespaces {
|
||||
namespace = metav1.NamespaceAll
|
||||
}
|
||||
|
||||
switch version {
|
||||
case kubernetes.StackAPIV1Beta1:
|
||||
return newStackV1Beta1(s.config, namespace)
|
||||
case kubernetes.StackAPIV1Beta2:
|
||||
return newStackV1Beta2(s.config, namespace)
|
||||
case kubernetes.StackAPIV1Alpha3:
|
||||
return newStackV1Alpha3(s.config, namespace)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported stack API version: %q", version)
|
||||
}
|
||||
}
|
|
@ -1,269 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/compose-on-kubernetes/api/labels"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// Pod conversion
|
||||
func podToTask(pod apiv1.Pod) swarm.Task {
|
||||
var startTime time.Time
|
||||
if pod.Status.StartTime != nil {
|
||||
startTime = (*pod.Status.StartTime).Time
|
||||
}
|
||||
task := swarm.Task{
|
||||
ID: string(pod.UID),
|
||||
NodeID: pod.Spec.NodeName,
|
||||
Spec: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
Image: getContainerImage(pod.Spec.Containers),
|
||||
},
|
||||
},
|
||||
DesiredState: podPhaseToState(pod.Status.Phase),
|
||||
Status: swarm.TaskStatus{
|
||||
State: podPhaseToState(pod.Status.Phase),
|
||||
Timestamp: startTime,
|
||||
PortStatus: swarm.PortStatus{
|
||||
Ports: getPorts(pod.Spec.Containers),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return task
|
||||
}
|
||||
|
||||
func podPhaseToState(phase apiv1.PodPhase) swarm.TaskState {
|
||||
switch phase {
|
||||
case apiv1.PodPending:
|
||||
return swarm.TaskStatePending
|
||||
case apiv1.PodRunning:
|
||||
return swarm.TaskStateRunning
|
||||
case apiv1.PodSucceeded:
|
||||
return swarm.TaskStateComplete
|
||||
case apiv1.PodFailed:
|
||||
return swarm.TaskStateFailed
|
||||
default:
|
||||
return swarm.TaskState("unknown")
|
||||
}
|
||||
}
|
||||
|
||||
func toSwarmProtocol(protocol apiv1.Protocol) swarm.PortConfigProtocol {
|
||||
switch protocol {
|
||||
case apiv1.ProtocolTCP:
|
||||
return swarm.PortConfigProtocolTCP
|
||||
case apiv1.ProtocolUDP:
|
||||
return swarm.PortConfigProtocolUDP
|
||||
}
|
||||
return swarm.PortConfigProtocol("unknown")
|
||||
}
|
||||
|
||||
func fetchPods(stackName string, pods corev1.PodInterface, f filters.Args) ([]apiv1.Pod, error) {
|
||||
services := f.Get("service")
|
||||
// for existing script compatibility, support either <servicename> or <stackname>_<servicename> format
|
||||
stackNamePrefix := stackName + "_"
|
||||
for _, s := range services {
|
||||
if strings.HasPrefix(s, stackNamePrefix) {
|
||||
services = append(services, strings.TrimPrefix(s, stackNamePrefix))
|
||||
}
|
||||
}
|
||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorForStack(stackName, services...)}
|
||||
var result []apiv1.Pod
|
||||
podsList, err := pods.List(listOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes := f.Get("node")
|
||||
for _, pod := range podsList.Items {
|
||||
if filterPod(pod, nodes) &&
|
||||
// name filter is done client side for matching partials
|
||||
f.FuzzyMatch("name", stackNamePrefix+pod.Name) {
|
||||
|
||||
result = append(result, pod)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func filterPod(pod apiv1.Pod, nodes []string) bool {
|
||||
if len(nodes) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, name := range nodes {
|
||||
if pod.Spec.NodeName == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getContainerImage(containers []apiv1.Container) string {
|
||||
if len(containers) == 0 {
|
||||
return ""
|
||||
}
|
||||
return containers[0].Image
|
||||
}
|
||||
|
||||
func getPorts(containers []apiv1.Container) []swarm.PortConfig {
|
||||
if len(containers) == 0 || len(containers[0].Ports) == 0 {
|
||||
return nil
|
||||
}
|
||||
ports := make([]swarm.PortConfig, len(containers[0].Ports))
|
||||
for i, port := range containers[0].Ports {
|
||||
ports[i] = swarm.PortConfig{
|
||||
PublishedPort: uint32(port.HostPort),
|
||||
TargetPort: uint32(port.ContainerPort),
|
||||
Protocol: toSwarmProtocol(port.Protocol),
|
||||
}
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
type tasksBySlot []swarm.Task
|
||||
|
||||
func (t tasksBySlot) Len() int {
|
||||
return len(t)
|
||||
}
|
||||
|
||||
func (t tasksBySlot) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
}
|
||||
|
||||
func (t tasksBySlot) Less(i, j int) bool {
|
||||
// Sort by slot.
|
||||
if t[i].Slot != t[j].Slot {
|
||||
return t[i].Slot < t[j].Slot
|
||||
}
|
||||
|
||||
// If same slot, sort by most recent.
|
||||
return t[j].Meta.CreatedAt.Before(t[i].CreatedAt)
|
||||
}
|
||||
|
||||
const (
|
||||
publishedServiceSuffix = "-published"
|
||||
publishedOnRandomPortSuffix = "-random-ports"
|
||||
)
|
||||
|
||||
func convertToServices(replicas *appsv1beta2.ReplicaSetList, daemons *appsv1beta2.DaemonSetList, services *apiv1.ServiceList) ([]swarm.Service, error) {
|
||||
result := make([]swarm.Service, len(replicas.Items))
|
||||
|
||||
for i, r := range replicas.Items {
|
||||
s, err := replicatedService(r, services)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[i] = *s
|
||||
}
|
||||
for _, d := range daemons.Items {
|
||||
s, err := globalService(d, services)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, *s)
|
||||
}
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i].ID < result[j].ID
|
||||
})
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func uint64ptr(i int32) *uint64 {
|
||||
var o uint64
|
||||
if i > 0 {
|
||||
o = uint64(i)
|
||||
}
|
||||
return &o
|
||||
}
|
||||
|
||||
func replicatedService(r appsv1beta2.ReplicaSet, services *apiv1.ServiceList) (*swarm.Service, error) {
|
||||
s, err := convertToService(r.Labels[labels.ForServiceName], services, r.Spec.Template.Spec.Containers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Spec.Mode = swarm.ServiceMode{
|
||||
Replicated: &swarm.ReplicatedService{Replicas: uint64ptr(r.Status.Replicas)},
|
||||
}
|
||||
s.ServiceStatus = &swarm.ServiceStatus{
|
||||
RunningTasks: uint64(r.Status.AvailableReplicas),
|
||||
DesiredTasks: uint64(r.Status.Replicas),
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func globalService(d appsv1beta2.DaemonSet, services *apiv1.ServiceList) (*swarm.Service, error) {
|
||||
s, err := convertToService(d.Labels[labels.ForServiceName], services, d.Spec.Template.Spec.Containers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Spec.Mode = swarm.ServiceMode{
|
||||
Global: &swarm.GlobalService{},
|
||||
}
|
||||
s.ServiceStatus = &swarm.ServiceStatus{
|
||||
RunningTasks: uint64(d.Status.NumberReady),
|
||||
DesiredTasks: uint64(d.Status.DesiredNumberScheduled),
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func convertToService(serviceName string, services *apiv1.ServiceList, containers []apiv1.Container) (*swarm.Service, error) {
|
||||
serviceHeadless, err := findService(services, serviceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stack, ok := serviceHeadless.Labels[labels.ForStackName]
|
||||
if ok {
|
||||
stack += "_"
|
||||
}
|
||||
uid := string(serviceHeadless.UID)
|
||||
s := &swarm.Service{
|
||||
ID: uid,
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: stack + serviceHeadless.Name,
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
Image: getContainerImage(containers),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if serviceNodePort, err := findService(services, serviceName+publishedOnRandomPortSuffix); err == nil && serviceNodePort.Spec.Type == apiv1.ServiceTypeNodePort {
|
||||
s.Endpoint = serviceEndpoint(serviceNodePort, swarm.PortConfigPublishModeHost)
|
||||
}
|
||||
if serviceLoadBalancer, err := findService(services, serviceName+publishedServiceSuffix); err == nil && serviceLoadBalancer.Spec.Type == apiv1.ServiceTypeLoadBalancer {
|
||||
s.Endpoint = serviceEndpoint(serviceLoadBalancer, swarm.PortConfigPublishModeIngress)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func findService(services *apiv1.ServiceList, name string) (apiv1.Service, error) {
|
||||
for _, s := range services.Items {
|
||||
if s.Name == name {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return apiv1.Service{}, fmt.Errorf("could not find service '%s'", name)
|
||||
}
|
||||
|
||||
func serviceEndpoint(service apiv1.Service, publishMode swarm.PortConfigPublishMode) swarm.Endpoint {
|
||||
configs := make([]swarm.PortConfig, len(service.Spec.Ports))
|
||||
for i, p := range service.Spec.Ports {
|
||||
configs[i] = swarm.PortConfig{
|
||||
PublishMode: publishMode,
|
||||
PublishedPort: uint32(p.Port),
|
||||
TargetPort: uint32(p.TargetPort.IntValue()),
|
||||
Protocol: toSwarmProtocol(p.Protocol),
|
||||
}
|
||||
}
|
||||
return swarm.Endpoint{Ports: configs}
|
||||
}
|
|
@ -1,185 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function
|
||||
"github.com/docker/compose-on-kubernetes/api/labels"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"gotest.tools/v3/assert"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apimachineryTypes "k8s.io/apimachinery/pkg/types"
|
||||
apimachineryUtil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func TestReplicasConversionNeedsAService(t *testing.T) {
|
||||
replicas := appsv1beta2.ReplicaSetList{
|
||||
Items: []appsv1beta2.ReplicaSet{makeReplicaSet("unknown", 0, 0)},
|
||||
}
|
||||
services := apiv1.ServiceList{}
|
||||
_, err := convertToServices(&replicas, &appsv1beta2.DaemonSetList{}, &services)
|
||||
assert.ErrorContains(t, err, "could not find service")
|
||||
}
|
||||
|
||||
func TestKubernetesServiceToSwarmServiceConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
doc string
|
||||
replicas *appsv1beta2.ReplicaSetList
|
||||
services *apiv1.ServiceList
|
||||
expectedServices []swarm.Service
|
||||
}{
|
||||
{
|
||||
doc: "Match replicas with headless stack services",
|
||||
replicas: &appsv1beta2.ReplicaSetList{
|
||||
Items: []appsv1beta2.ReplicaSet{
|
||||
makeReplicaSet("service1", 2, 5),
|
||||
makeReplicaSet("service2", 3, 3),
|
||||
},
|
||||
},
|
||||
services: &apiv1.ServiceList{
|
||||
Items: []apiv1.Service{
|
||||
makeKubeService("service1", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil),
|
||||
makeKubeService("service2", "stack", "uid2", apiv1.ServiceTypeClusterIP, nil),
|
||||
makeKubeService("service3", "other-stack", "uid2", apiv1.ServiceTypeClusterIP, nil),
|
||||
},
|
||||
},
|
||||
expectedServices: []swarm.Service{
|
||||
makeSwarmService(t, "stack_service1", "uid1", ReplicatedService(5), ServiceStatus(5, 2)),
|
||||
makeSwarmService(t, "stack_service2", "uid2", ReplicatedService(3), ServiceStatus(3, 3)),
|
||||
},
|
||||
},
|
||||
{
|
||||
doc: "Headless service and LoadBalancer Service are tied to the same Swarm service",
|
||||
replicas: &appsv1beta2.ReplicaSetList{
|
||||
Items: []appsv1beta2.ReplicaSet{
|
||||
makeReplicaSet("service", 1, 1),
|
||||
},
|
||||
},
|
||||
services: &apiv1.ServiceList{
|
||||
Items: []apiv1.Service{
|
||||
makeKubeService("service", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil),
|
||||
makeKubeService("service-published", "stack", "uid2", apiv1.ServiceTypeLoadBalancer, []apiv1.ServicePort{
|
||||
{
|
||||
Port: 80,
|
||||
TargetPort: apimachineryUtil.FromInt(80),
|
||||
Protocol: apiv1.ProtocolTCP,
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
expectedServices: []swarm.Service{
|
||||
makeSwarmService(t, "stack_service", "uid1",
|
||||
ReplicatedService(1),
|
||||
ServiceStatus(1, 1),
|
||||
withPort(swarm.PortConfig{
|
||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
||||
PublishedPort: 80,
|
||||
TargetPort: 80,
|
||||
Protocol: swarm.PortConfigProtocolTCP,
|
||||
}),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
doc: "Headless service and NodePort Service are tied to the same Swarm service",
|
||||
replicas: &appsv1beta2.ReplicaSetList{
|
||||
Items: []appsv1beta2.ReplicaSet{
|
||||
makeReplicaSet("service", 1, 1),
|
||||
},
|
||||
},
|
||||
services: &apiv1.ServiceList{
|
||||
Items: []apiv1.Service{
|
||||
makeKubeService("service", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil),
|
||||
makeKubeService("service-random-ports", "stack", "uid2", apiv1.ServiceTypeNodePort, []apiv1.ServicePort{
|
||||
{
|
||||
Port: 35666,
|
||||
TargetPort: apimachineryUtil.FromInt(80),
|
||||
Protocol: apiv1.ProtocolTCP,
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
expectedServices: []swarm.Service{
|
||||
makeSwarmService(t, "stack_service", "uid1",
|
||||
ReplicatedService(1),
|
||||
ServiceStatus(1, 1),
|
||||
withPort(swarm.PortConfig{
|
||||
PublishMode: swarm.PortConfigPublishModeHost,
|
||||
PublishedPort: 35666,
|
||||
TargetPort: 80,
|
||||
Protocol: swarm.PortConfigProtocolTCP,
|
||||
}),
|
||||
),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
swarmServices, err := convertToServices(tc.replicas, &appsv1beta2.DaemonSetList{}, tc.services)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, tc.expectedServices, swarmServices)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeReplicaSet(service string, available, replicas int32) appsv1beta2.ReplicaSet {
|
||||
return appsv1beta2.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
labels.ForServiceName: service,
|
||||
},
|
||||
},
|
||||
Spec: appsv1beta2.ReplicaSetSpec{
|
||||
Template: apiv1.PodTemplateSpec{
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
{
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1beta2.ReplicaSetStatus{
|
||||
AvailableReplicas: available,
|
||||
Replicas: replicas,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeKubeService(service, stack, uid string, serviceType apiv1.ServiceType, ports []apiv1.ServicePort) apiv1.Service {
|
||||
return apiv1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stack,
|
||||
},
|
||||
Name: service,
|
||||
UID: apimachineryTypes.UID(uid),
|
||||
},
|
||||
Spec: apiv1.ServiceSpec{
|
||||
Type: serviceType,
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TODO convertToServices currently doesn't set swarm.EndpointSpec.Ports
|
||||
func withPort(port swarm.PortConfig) func(*swarm.Service) {
|
||||
return func(service *swarm.Service) {
|
||||
if service.Endpoint.Ports == nil {
|
||||
service.Endpoint.Ports = make([]swarm.PortConfig, 0)
|
||||
}
|
||||
service.Endpoint.Ports = append(service.Endpoint.Ports, port)
|
||||
}
|
||||
}
|
||||
|
||||
func makeSwarmService(t *testing.T, name, id string, opts ...func(*swarm.Service)) swarm.Service {
|
||||
t.Helper()
|
||||
options := []func(*swarm.Service){ServiceID(id), ServiceName(name), ServiceImage("image")}
|
||||
options = append(options, opts...)
|
||||
return *Service(options...)
|
||||
}
|
|
@ -1,578 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
"github.com/docker/cli/cli/compose/schema"
|
||||
composeTypes "github.com/docker/cli/cli/compose/types"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
latest "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1beta1"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1beta2"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/pkg/errors"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// kubernatesExtraField is an extra field on ServiceConfigs containing kubernetes-specific extensions to compose format
|
||||
kubernatesExtraField = "x-kubernetes"
|
||||
)
|
||||
|
||||
// NewStackConverter returns a converter from types.Config (compose) to the specified
|
||||
// stack version or error out if the version is not supported or existent.
|
||||
func NewStackConverter(version string) (StackConverter, error) {
|
||||
switch version {
|
||||
case "v1beta1":
|
||||
return stackV1Beta1Converter{}, nil
|
||||
case "v1beta2":
|
||||
return stackV1Beta2Converter{}, nil
|
||||
case "v1alpha3":
|
||||
return stackV1Alpha3Converter{}, nil
|
||||
default:
|
||||
return nil, errors.Errorf("stack version %s unsupported", version)
|
||||
}
|
||||
}
|
||||
|
||||
// StackConverter converts a compose types.Config to a Stack
|
||||
type StackConverter interface {
|
||||
FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error)
|
||||
}
|
||||
|
||||
type stackV1Beta1Converter struct{}
|
||||
|
||||
func (s stackV1Beta1Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) {
|
||||
cfg.Version = v1beta1.MaxComposeVersion
|
||||
st, err := fromCompose(stderr, name, cfg, v1beta1Capabilities)
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
res, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
// reload the result to check that it produced a valid 3.5 compose file
|
||||
resparsedConfig, err := loader.ParseYAML(res)
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
if err = schema.Validate(resparsedConfig, v1beta1.MaxComposeVersion); err != nil {
|
||||
return Stack{}, errors.Wrapf(err, "the compose yaml file is invalid with v%s", v1beta1.MaxComposeVersion)
|
||||
}
|
||||
|
||||
st.ComposeFile = string(res)
|
||||
return st, nil
|
||||
}
|
||||
|
||||
type stackV1Beta2Converter struct{}
|
||||
|
||||
func (s stackV1Beta2Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) {
|
||||
return fromCompose(stderr, name, cfg, v1beta2Capabilities)
|
||||
}
|
||||
|
||||
type stackV1Alpha3Converter struct{}
|
||||
|
||||
func (s stackV1Alpha3Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) {
|
||||
return fromCompose(stderr, name, cfg, v1alpha3Capabilities)
|
||||
}
|
||||
|
||||
func fromCompose(stderr io.Writer, name string, cfg *composetypes.Config, capabilities composeCapabilities) (Stack, error) {
|
||||
spec, err := fromComposeConfig(stderr, cfg, capabilities)
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
return Stack{
|
||||
Name: name,
|
||||
Spec: spec,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func loadStackData(composefile string) (*composetypes.Config, error) {
|
||||
parsed, err := loader.ParseYAML([]byte(composefile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return loader.Load(composetypes.ConfigDetails{
|
||||
ConfigFiles: []composetypes.ConfigFile{
|
||||
{
|
||||
Config: parsed,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Conversions from internal stack to different stack compose component versions.
|
||||
func stackFromV1beta1(in *v1beta1.Stack) (Stack, error) {
|
||||
cfg, err := loadStackData(in.Spec.ComposeFile)
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
spec, err := fromComposeConfig(ioutil.Discard, cfg, v1beta1Capabilities)
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
return Stack{
|
||||
Name: in.ObjectMeta.Name,
|
||||
Namespace: in.ObjectMeta.Namespace,
|
||||
ComposeFile: in.Spec.ComposeFile,
|
||||
Spec: spec,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func stackToV1beta1(s Stack) *v1beta1.Stack {
|
||||
return &v1beta1.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name,
|
||||
},
|
||||
Spec: v1beta1.StackSpec{
|
||||
ComposeFile: s.ComposeFile,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stackFromV1beta2(in *v1beta2.Stack) (Stack, error) {
|
||||
var spec *latest.StackSpec
|
||||
if in.Spec != nil {
|
||||
spec = &latest.StackSpec{}
|
||||
if err := latest.Convert_v1beta2_StackSpec_To_v1alpha3_StackSpec(in.Spec, spec, nil); err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
}
|
||||
return Stack{
|
||||
Name: in.ObjectMeta.Name,
|
||||
Namespace: in.ObjectMeta.Namespace,
|
||||
Spec: spec,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func stackToV1beta2(s Stack) (*v1beta2.Stack, error) {
|
||||
var spec *v1beta2.StackSpec
|
||||
if s.Spec != nil {
|
||||
spec = &v1beta2.StackSpec{}
|
||||
if err := latest.Convert_v1alpha3_StackSpec_To_v1beta2_StackSpec(s.Spec, spec, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &v1beta2.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name,
|
||||
},
|
||||
Spec: spec,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func stackFromV1alpha3(in *latest.Stack) Stack {
|
||||
return Stack{
|
||||
Name: in.ObjectMeta.Name,
|
||||
Namespace: in.ObjectMeta.Namespace,
|
||||
Spec: in.Spec,
|
||||
}
|
||||
}
|
||||
|
||||
func stackToV1alpha3(s Stack) *latest.Stack {
|
||||
return &latest.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name,
|
||||
},
|
||||
Spec: s.Spec,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeConfig(stderr io.Writer, c *composeTypes.Config, capabilities composeCapabilities) (*latest.StackSpec, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
warnUnsupportedFeatures(stderr, c)
|
||||
serviceConfigs := make([]latest.ServiceConfig, len(c.Services))
|
||||
for i, s := range c.Services {
|
||||
svc, err := fromComposeServiceConfig(s, capabilities)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceConfigs[i] = svc
|
||||
}
|
||||
return &latest.StackSpec{
|
||||
Services: serviceConfigs,
|
||||
Secrets: fromComposeSecrets(c.Secrets),
|
||||
Configs: fromComposeConfigs(c.Configs),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromComposeSecrets(s map[string]composeTypes.SecretConfig) map[string]latest.SecretConfig {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
m := map[string]latest.SecretConfig{}
|
||||
for key, value := range s {
|
||||
m[key] = latest.SecretConfig{
|
||||
Name: value.Name,
|
||||
File: value.File,
|
||||
External: latest.External{
|
||||
Name: value.External.Name,
|
||||
External: value.External.External,
|
||||
},
|
||||
Labels: value.Labels,
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func fromComposeConfigs(s map[string]composeTypes.ConfigObjConfig) map[string]latest.ConfigObjConfig {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
m := map[string]latest.ConfigObjConfig{}
|
||||
for key, value := range s {
|
||||
m[key] = latest.ConfigObjConfig{
|
||||
Name: value.Name,
|
||||
File: value.File,
|
||||
External: latest.External{
|
||||
Name: value.External.Name,
|
||||
External: value.External.External,
|
||||
},
|
||||
Labels: value.Labels,
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func fromComposeServiceConfig(s composeTypes.ServiceConfig, capabilities composeCapabilities) (latest.ServiceConfig, error) {
|
||||
var (
|
||||
userID *int64
|
||||
err error
|
||||
)
|
||||
if s.User != "" {
|
||||
numerical, err := strconv.Atoi(s.User)
|
||||
if err == nil {
|
||||
unixUserID := int64(numerical)
|
||||
userID = &unixUserID
|
||||
}
|
||||
}
|
||||
kubeExtra, err := resolveServiceExtra(s)
|
||||
if err != nil {
|
||||
return latest.ServiceConfig{}, err
|
||||
}
|
||||
if kubeExtra.PullSecret != "" && !capabilities.hasPullSecrets {
|
||||
return latest.ServiceConfig{}, errors.Errorf(`stack API version %s does not support pull secrets (field "x-kubernetes.pull_secret"), please use version v1alpha3 or higher`, capabilities.apiVersion)
|
||||
}
|
||||
if kubeExtra.PullPolicy != "" && !capabilities.hasPullPolicies {
|
||||
return latest.ServiceConfig{}, errors.Errorf(`stack API version %s does not support pull policies (field "x-kubernetes.pull_policy"), please use version v1alpha3 or higher`, capabilities.apiVersion)
|
||||
}
|
||||
|
||||
internalPorts, err := setupIntraStackNetworking(s, kubeExtra, capabilities)
|
||||
if err != nil {
|
||||
return latest.ServiceConfig{}, err
|
||||
}
|
||||
|
||||
return latest.ServiceConfig{
|
||||
Name: s.Name,
|
||||
CapAdd: s.CapAdd,
|
||||
CapDrop: s.CapDrop,
|
||||
Command: s.Command,
|
||||
Configs: fromComposeServiceConfigs(s.Configs),
|
||||
Deploy: latest.DeployConfig{
|
||||
Mode: s.Deploy.Mode,
|
||||
Replicas: s.Deploy.Replicas,
|
||||
Labels: s.Deploy.Labels,
|
||||
UpdateConfig: fromComposeUpdateConfig(s.Deploy.UpdateConfig),
|
||||
Resources: fromComposeResources(s.Deploy.Resources),
|
||||
RestartPolicy: fromComposeRestartPolicy(s.Deploy.RestartPolicy),
|
||||
Placement: fromComposePlacement(s.Deploy.Placement),
|
||||
},
|
||||
Entrypoint: s.Entrypoint,
|
||||
Environment: s.Environment,
|
||||
ExtraHosts: s.ExtraHosts,
|
||||
Hostname: s.Hostname,
|
||||
HealthCheck: fromComposeHealthcheck(s.HealthCheck),
|
||||
Image: s.Image,
|
||||
Ipc: s.Ipc,
|
||||
Labels: s.Labels,
|
||||
Pid: s.Pid,
|
||||
Ports: fromComposePorts(s.Ports),
|
||||
Privileged: s.Privileged,
|
||||
ReadOnly: s.ReadOnly,
|
||||
Secrets: fromComposeServiceSecrets(s.Secrets),
|
||||
StdinOpen: s.StdinOpen,
|
||||
StopGracePeriod: composetypes.ConvertDurationPtr(s.StopGracePeriod),
|
||||
Tmpfs: s.Tmpfs,
|
||||
Tty: s.Tty,
|
||||
User: userID,
|
||||
Volumes: fromComposeServiceVolumeConfig(s.Volumes),
|
||||
WorkingDir: s.WorkingDir,
|
||||
PullSecret: kubeExtra.PullSecret,
|
||||
PullPolicy: kubeExtra.PullPolicy,
|
||||
InternalServiceType: kubeExtra.InternalServiceType,
|
||||
InternalPorts: internalPorts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func setupIntraStackNetworking(s composeTypes.ServiceConfig, kubeExtra kubernetesExtra, capabilities composeCapabilities) ([]latest.InternalPort, error) {
|
||||
if kubeExtra.InternalServiceType != latest.InternalServiceTypeAuto && !capabilities.hasIntraStackLoadBalancing {
|
||||
return nil,
|
||||
errors.Errorf(`stack API version %s does not support intra-stack load balancing (field "x-kubernetes.internal_service_type"), please use version v1alpha3 or higher`,
|
||||
capabilities.apiVersion)
|
||||
}
|
||||
if !capabilities.hasIntraStackLoadBalancing {
|
||||
return nil, nil
|
||||
}
|
||||
if err := validateInternalServiceType(kubeExtra.InternalServiceType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
internalPorts, err := toInternalPorts(s.Expose)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return internalPorts, nil
|
||||
}
|
||||
|
||||
func validateInternalServiceType(internalServiceType latest.InternalServiceType) error {
|
||||
switch internalServiceType {
|
||||
case latest.InternalServiceTypeAuto, latest.InternalServiceTypeClusterIP, latest.InternalServiceTypeHeadless:
|
||||
default:
|
||||
return errors.Errorf(`invalid value %q for field "x-kubernetes.internal_service_type", valid values are %q or %q`, internalServiceType,
|
||||
latest.InternalServiceTypeClusterIP,
|
||||
latest.InternalServiceTypeHeadless)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func toInternalPorts(expose []string) ([]latest.InternalPort, error) {
|
||||
var internalPorts []latest.InternalPort
|
||||
for _, sourcePort := range expose {
|
||||
proto, port := nat.SplitProtoPort(sourcePort)
|
||||
start, end, err := nat.ParsePortRange(port)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid format for expose: %q, error: %s", sourcePort, err)
|
||||
}
|
||||
for i := start; i <= end; i++ {
|
||||
k8sProto := v1.Protocol(strings.ToUpper(proto))
|
||||
switch k8sProto {
|
||||
case v1.ProtocolSCTP, v1.ProtocolTCP, v1.ProtocolUDP:
|
||||
default:
|
||||
return nil, errors.Errorf("invalid protocol for expose: %q, supported values are %q, %q and %q", sourcePort, v1.ProtocolSCTP, v1.ProtocolTCP, v1.ProtocolUDP)
|
||||
}
|
||||
internalPorts = append(internalPorts, latest.InternalPort{
|
||||
Port: int32(i),
|
||||
Protocol: k8sProto,
|
||||
})
|
||||
}
|
||||
}
|
||||
return internalPorts, nil
|
||||
}
|
||||
|
||||
func resolveServiceExtra(s composeTypes.ServiceConfig) (kubernetesExtra, error) {
|
||||
if iface, ok := s.Extras[kubernatesExtraField]; ok {
|
||||
var result kubernetesExtra
|
||||
if err := mapstructure.Decode(iface, &result); err != nil {
|
||||
return kubernetesExtra{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
return kubernetesExtra{}, nil
|
||||
}
|
||||
|
||||
func fromComposePorts(ports []composeTypes.ServicePortConfig) []latest.ServicePortConfig {
|
||||
if ports == nil {
|
||||
return nil
|
||||
}
|
||||
p := make([]latest.ServicePortConfig, len(ports))
|
||||
for i, port := range ports {
|
||||
p[i] = latest.ServicePortConfig{
|
||||
Mode: port.Mode,
|
||||
Target: port.Target,
|
||||
Published: port.Published,
|
||||
Protocol: port.Protocol,
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func fromComposeServiceSecrets(secrets []composeTypes.ServiceSecretConfig) []latest.ServiceSecretConfig {
|
||||
if secrets == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]latest.ServiceSecretConfig, len(secrets))
|
||||
for i, secret := range secrets {
|
||||
c[i] = latest.ServiceSecretConfig{
|
||||
Source: secret.Source,
|
||||
Target: secret.Target,
|
||||
UID: secret.UID,
|
||||
Mode: secret.Mode,
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func fromComposeServiceConfigs(configs []composeTypes.ServiceConfigObjConfig) []latest.ServiceConfigObjConfig {
|
||||
if configs == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]latest.ServiceConfigObjConfig, len(configs))
|
||||
for i, config := range configs {
|
||||
c[i] = latest.ServiceConfigObjConfig{
|
||||
Source: config.Source,
|
||||
Target: config.Target,
|
||||
UID: config.UID,
|
||||
Mode: config.Mode,
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func fromComposeHealthcheck(h *composeTypes.HealthCheckConfig) *latest.HealthCheckConfig {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
return &latest.HealthCheckConfig{
|
||||
Test: h.Test,
|
||||
Timeout: composetypes.ConvertDurationPtr(h.Timeout),
|
||||
Interval: composetypes.ConvertDurationPtr(h.Interval),
|
||||
Retries: h.Retries,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposePlacement(p composeTypes.Placement) latest.Placement {
|
||||
return latest.Placement{
|
||||
Constraints: fromComposeConstraints(p.Constraints),
|
||||
}
|
||||
}
|
||||
|
||||
var constraintEquals = regexp.MustCompile(`([\w\.]*)\W*(==|!=)\W*([\w\.]*)`)
|
||||
|
||||
const (
|
||||
swarmOs = "node.platform.os"
|
||||
swarmArch = "node.platform.arch"
|
||||
swarmHostname = "node.hostname"
|
||||
swarmLabelPrefix = "node.labels."
|
||||
)
|
||||
|
||||
func fromComposeConstraints(s []string) *latest.Constraints {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
constraints := &latest.Constraints{}
|
||||
for _, constraint := range s {
|
||||
matches := constraintEquals.FindStringSubmatch(constraint)
|
||||
if len(matches) == 4 {
|
||||
key := matches[1]
|
||||
operator := matches[2]
|
||||
value := matches[3]
|
||||
constraint := &latest.Constraint{
|
||||
Operator: operator,
|
||||
Value: value,
|
||||
}
|
||||
switch {
|
||||
case key == swarmOs:
|
||||
constraints.OperatingSystem = constraint
|
||||
case key == swarmArch:
|
||||
constraints.Architecture = constraint
|
||||
case key == swarmHostname:
|
||||
constraints.Hostname = constraint
|
||||
case strings.HasPrefix(key, swarmLabelPrefix):
|
||||
if constraints.MatchLabels == nil {
|
||||
constraints.MatchLabels = map[string]latest.Constraint{}
|
||||
}
|
||||
constraints.MatchLabels[strings.TrimPrefix(key, swarmLabelPrefix)] = *constraint
|
||||
}
|
||||
}
|
||||
}
|
||||
return constraints
|
||||
}
|
||||
|
||||
func fromComposeResources(r composeTypes.Resources) latest.Resources {
|
||||
return latest.Resources{
|
||||
Limits: fromComposeResourcesResourceLimit(r.Limits),
|
||||
Reservations: fromComposeResourcesResource(r.Reservations),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO create ResourceLimit type and support for limiting Pids on k8s
|
||||
func fromComposeResourcesResourceLimit(r *composeTypes.ResourceLimit) *latest.Resource {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &latest.Resource{
|
||||
MemoryBytes: int64(r.MemoryBytes),
|
||||
NanoCPUs: r.NanoCPUs,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeResourcesResource(r *composeTypes.Resource) *latest.Resource {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &latest.Resource{
|
||||
MemoryBytes: int64(r.MemoryBytes),
|
||||
NanoCPUs: r.NanoCPUs,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeUpdateConfig(u *composeTypes.UpdateConfig) *latest.UpdateConfig {
|
||||
if u == nil {
|
||||
return nil
|
||||
}
|
||||
return &latest.UpdateConfig{
|
||||
Parallelism: u.Parallelism,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeRestartPolicy(r *composeTypes.RestartPolicy) *latest.RestartPolicy {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &latest.RestartPolicy{
|
||||
Condition: r.Condition,
|
||||
}
|
||||
}
|
||||
|
||||
func fromComposeServiceVolumeConfig(vs []composeTypes.ServiceVolumeConfig) []latest.ServiceVolumeConfig {
|
||||
if vs == nil {
|
||||
return nil
|
||||
}
|
||||
volumes := []latest.ServiceVolumeConfig{}
|
||||
for _, v := range vs {
|
||||
volumes = append(volumes, latest.ServiceVolumeConfig{
|
||||
Type: v.Type,
|
||||
Source: v.Source,
|
||||
Target: v.Target,
|
||||
ReadOnly: v.ReadOnly,
|
||||
})
|
||||
}
|
||||
return volumes
|
||||
}
|
||||
|
||||
var (
|
||||
v1beta1Capabilities = composeCapabilities{
|
||||
apiVersion: "v1beta1",
|
||||
}
|
||||
v1beta2Capabilities = composeCapabilities{
|
||||
apiVersion: "v1beta2",
|
||||
}
|
||||
v1alpha3Capabilities = composeCapabilities{
|
||||
apiVersion: "v1alpha3",
|
||||
hasPullSecrets: true,
|
||||
hasPullPolicies: true,
|
||||
hasIntraStackLoadBalancing: true,
|
||||
}
|
||||
)
|
||||
|
||||
type composeCapabilities struct {
|
||||
apiVersion string
|
||||
hasPullSecrets bool
|
||||
hasPullPolicies bool
|
||||
hasIntraStackLoadBalancing bool
|
||||
}
|
||||
|
||||
type kubernetesExtra struct {
|
||||
PullSecret string `mapstructure:"pull_secret"`
|
||||
PullPolicy string `mapstructure:"pull_policy"`
|
||||
InternalServiceType latest.InternalServiceType `mapstructure:"internal_service_type"`
|
||||
}
|
|
@ -1,349 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1alpha3"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1beta1"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1beta2"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestNewStackConverter(t *testing.T) {
|
||||
_, err := NewStackConverter("v1alpha1")
|
||||
assert.Check(t, is.ErrorContains(err, "stack version v1alpha1 unsupported"))
|
||||
|
||||
_, err = NewStackConverter("v1beta1")
|
||||
assert.NilError(t, err)
|
||||
_, err = NewStackConverter("v1beta2")
|
||||
assert.NilError(t, err)
|
||||
_, err = NewStackConverter("v1alpha3")
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestConvertFromToV1beta1(t *testing.T) {
|
||||
composefile := `version: "3.3"
|
||||
services:
|
||||
test:
|
||||
image: nginx
|
||||
secrets:
|
||||
test:
|
||||
file: testdata/secret
|
||||
configs:
|
||||
test:
|
||||
file: testdata/config
|
||||
`
|
||||
stackv1beta1 := &v1beta1.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: v1beta1.StackSpec{
|
||||
ComposeFile: composefile,
|
||||
},
|
||||
}
|
||||
|
||||
result, err := stackFromV1beta1(stackv1beta1)
|
||||
assert.NilError(t, err)
|
||||
expected := Stack{
|
||||
Name: "test",
|
||||
ComposeFile: composefile,
|
||||
Spec: &v1alpha3.StackSpec{
|
||||
Services: []v1alpha3.ServiceConfig{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "nginx",
|
||||
Environment: make(map[string]*string),
|
||||
},
|
||||
},
|
||||
Secrets: map[string]v1alpha3.SecretConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/secret")},
|
||||
},
|
||||
Configs: map[string]v1alpha3.ConfigObjConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/config")},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.DeepEqual(t, expected, result)
|
||||
assert.DeepEqual(t, stackv1beta1, stackToV1beta1(result))
|
||||
}
|
||||
|
||||
func TestConvertFromToV1beta2(t *testing.T) {
|
||||
stackv1beta2 := &v1beta2.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: &v1beta2.StackSpec{
|
||||
Services: []v1beta2.ServiceConfig{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "nginx",
|
||||
Environment: make(map[string]*string),
|
||||
},
|
||||
},
|
||||
Secrets: map[string]v1beta2.SecretConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/secret")},
|
||||
},
|
||||
Configs: map[string]v1beta2.ConfigObjConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/config")},
|
||||
},
|
||||
},
|
||||
}
|
||||
expected := Stack{
|
||||
Name: "test",
|
||||
Spec: &v1alpha3.StackSpec{
|
||||
Services: []v1alpha3.ServiceConfig{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "nginx",
|
||||
Environment: make(map[string]*string),
|
||||
},
|
||||
},
|
||||
Secrets: map[string]v1alpha3.SecretConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/secret")},
|
||||
},
|
||||
Configs: map[string]v1alpha3.ConfigObjConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/config")},
|
||||
},
|
||||
},
|
||||
}
|
||||
result, err := stackFromV1beta2(stackv1beta2)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, expected, result)
|
||||
gotBack, err := stackToV1beta2(result)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, stackv1beta2, gotBack)
|
||||
}
|
||||
|
||||
func TestConvertFromToV1alpha3(t *testing.T) {
|
||||
stackv1alpha3 := &v1alpha3.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: &v1alpha3.StackSpec{
|
||||
Services: []v1alpha3.ServiceConfig{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "nginx",
|
||||
Environment: make(map[string]*string),
|
||||
},
|
||||
},
|
||||
Secrets: map[string]v1alpha3.SecretConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/secret")},
|
||||
},
|
||||
Configs: map[string]v1alpha3.ConfigObjConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/config")},
|
||||
},
|
||||
},
|
||||
}
|
||||
expected := Stack{
|
||||
Name: "test",
|
||||
Spec: &v1alpha3.StackSpec{
|
||||
Services: []v1alpha3.ServiceConfig{
|
||||
{
|
||||
Name: "test",
|
||||
Image: "nginx",
|
||||
Environment: make(map[string]*string),
|
||||
},
|
||||
},
|
||||
Secrets: map[string]v1alpha3.SecretConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/secret")},
|
||||
},
|
||||
Configs: map[string]v1alpha3.ConfigObjConfig{
|
||||
"test": {File: filepath.FromSlash("testdata/config")},
|
||||
},
|
||||
},
|
||||
}
|
||||
result := stackFromV1alpha3(stackv1alpha3)
|
||||
assert.DeepEqual(t, expected, result)
|
||||
gotBack := stackToV1alpha3(result)
|
||||
assert.DeepEqual(t, stackv1alpha3, gotBack)
|
||||
}
|
||||
|
||||
func loadTestStackWith(t *testing.T, with string) *composetypes.Config {
|
||||
t.Helper()
|
||||
filePath := fmt.Sprintf("testdata/compose-with-%s.yml", with)
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
assert.NilError(t, err)
|
||||
yamlData, err := loader.ParseYAML(data)
|
||||
assert.NilError(t, err)
|
||||
cfg, err := loader.Load(composetypes.ConfigDetails{
|
||||
ConfigFiles: []composetypes.ConfigFile{
|
||||
{Config: yamlData, Filename: filePath},
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
return cfg
|
||||
}
|
||||
|
||||
func TestHandlePullSecret(t *testing.T) {
|
||||
testData := loadTestStackWith(t, "pull-secret")
|
||||
cases := []struct {
|
||||
version string
|
||||
err string
|
||||
}{
|
||||
{version: "v1beta1", err: `stack API version v1beta1 does not support pull secrets (field "x-kubernetes.pull_secret"), please use version v1alpha3 or higher`},
|
||||
{version: "v1beta2", err: `stack API version v1beta2 does not support pull secrets (field "x-kubernetes.pull_secret"), please use version v1alpha3 or higher`},
|
||||
{version: "v1alpha3"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.version, func(t *testing.T) {
|
||||
conv, err := NewStackConverter(c.version)
|
||||
assert.NilError(t, err)
|
||||
s, err := conv.FromCompose(ioutil.Discard, "test", testData)
|
||||
if c.err != "" {
|
||||
assert.Error(t, err, c.err)
|
||||
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, s.Spec.Services[0].PullSecret, "some-secret")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandlePullPolicy(t *testing.T) {
|
||||
testData := loadTestStackWith(t, "pull-policy")
|
||||
cases := []struct {
|
||||
version string
|
||||
err string
|
||||
}{
|
||||
{version: "v1beta1", err: `stack API version v1beta1 does not support pull policies (field "x-kubernetes.pull_policy"), please use version v1alpha3 or higher`},
|
||||
{version: "v1beta2", err: `stack API version v1beta2 does not support pull policies (field "x-kubernetes.pull_policy"), please use version v1alpha3 or higher`},
|
||||
{version: "v1alpha3"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.version, func(t *testing.T) {
|
||||
conv, err := NewStackConverter(c.version)
|
||||
assert.NilError(t, err)
|
||||
s, err := conv.FromCompose(ioutil.Discard, "test", testData)
|
||||
if c.err != "" {
|
||||
assert.Error(t, err, c.err)
|
||||
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, s.Spec.Services[0].PullPolicy, "Never")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleInternalServiceType(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
value string
|
||||
caps composeCapabilities
|
||||
err string
|
||||
expected v1alpha3.InternalServiceType
|
||||
}{
|
||||
{
|
||||
name: "v1beta1",
|
||||
value: "ClusterIP",
|
||||
caps: v1beta1Capabilities,
|
||||
err: `stack API version v1beta1 does not support intra-stack load balancing (field "x-kubernetes.internal_service_type"), please use version v1alpha3 or higher`,
|
||||
},
|
||||
{
|
||||
name: "v1beta2",
|
||||
value: "ClusterIP",
|
||||
caps: v1beta2Capabilities,
|
||||
err: `stack API version v1beta2 does not support intra-stack load balancing (field "x-kubernetes.internal_service_type"), please use version v1alpha3 or higher`,
|
||||
},
|
||||
{
|
||||
name: "v1alpha3",
|
||||
value: "ClusterIP",
|
||||
caps: v1alpha3Capabilities,
|
||||
expected: v1alpha3.InternalServiceTypeClusterIP,
|
||||
},
|
||||
{
|
||||
name: "v1alpha3-invalid",
|
||||
value: "invalid",
|
||||
caps: v1alpha3Capabilities,
|
||||
err: `invalid value "invalid" for field "x-kubernetes.internal_service_type", valid values are "ClusterIP" or "Headless"`,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
res, err := fromComposeServiceConfig(composetypes.ServiceConfig{
|
||||
Name: "test",
|
||||
Image: "test",
|
||||
Extras: map[string]interface{}{
|
||||
"x-kubernetes": map[string]interface{}{
|
||||
"internal_service_type": c.value,
|
||||
},
|
||||
},
|
||||
}, c.caps)
|
||||
if c.err == "" {
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, res.InternalServiceType, c.expected)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, c.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnoreExpose(t *testing.T) {
|
||||
testData := loadTestStackWith(t, "expose")
|
||||
for _, version := range []string{"v1beta1", "v1beta2"} {
|
||||
conv, err := NewStackConverter(version)
|
||||
assert.NilError(t, err)
|
||||
s, err := conv.FromCompose(ioutil.Discard, "test", testData)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, len(s.Spec.Services[0].InternalPorts), 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseExpose(t *testing.T) {
|
||||
testData := loadTestStackWith(t, "expose")
|
||||
conv, err := NewStackConverter("v1alpha3")
|
||||
assert.NilError(t, err)
|
||||
s, err := conv.FromCompose(ioutil.Discard, "test", testData)
|
||||
assert.NilError(t, err)
|
||||
expected := []v1alpha3.InternalPort{
|
||||
{
|
||||
Port: 1,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Port: 2,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Port: 3,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Port: 4,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Port: 5,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
{
|
||||
Port: 6,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
{
|
||||
Port: 7,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
{
|
||||
Port: 8,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
}
|
||||
assert.DeepEqual(t, s.Spec.Services[0].InternalPorts, expected)
|
||||
}
|
|
@ -1,171 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/morikuni/aec"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// RunDeploy is the kubernetes implementation of docker stack deploy
|
||||
func RunDeploy(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Config) error {
|
||||
cmdOut := dockerCli.Out()
|
||||
|
||||
// Initialize clients
|
||||
composeClient, err := dockerCli.composeClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stacks, err := composeClient.Stacks(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stack, err := stacks.FromCompose(dockerCli.Err(), opts.Namespace, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configMaps := composeClient.ConfigMaps()
|
||||
secrets := composeClient.Secrets()
|
||||
services := composeClient.Services()
|
||||
|
||||
if err := stacks.IsColliding(services, stack); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := createResources(stack, stacks, configMaps, secrets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmdOut, "Waiting for the stack to be stable and running...")
|
||||
v1beta1Cli, err := dockerCli.stacksv1beta1()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pods := composeClient.Pods()
|
||||
watcher := &deployWatcher{
|
||||
stacks: v1beta1Cli,
|
||||
pods: pods,
|
||||
}
|
||||
statusUpdates := make(chan serviceStatus)
|
||||
displayDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(displayDone)
|
||||
display := newStatusDisplay(dockerCli.Out())
|
||||
for status := range statusUpdates {
|
||||
display.OnStatus(status)
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Watch(stack.Name, stack.getServices(), statusUpdates)
|
||||
close(statusUpdates)
|
||||
<-displayDone
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cmdOut, "\nStack %s is stable and running\n\n", stack.Name)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func createResources(stack Stack, stacks StackClient, configMaps corev1.ConfigMapInterface, secrets corev1.SecretInterface) error {
|
||||
var childResources []childResource
|
||||
|
||||
cr, err := stack.createFileBasedConfigMaps(configMaps)
|
||||
childResources = append(childResources, cr...) // make sure we collect childresources already created in case of failure
|
||||
if err != nil {
|
||||
deleteChildResources(childResources)
|
||||
return err
|
||||
}
|
||||
|
||||
cr, err = stack.createFileBasedSecrets(secrets)
|
||||
childResources = append(childResources, cr...) // make sure we collect childresources already created in case of failure
|
||||
if err != nil {
|
||||
deleteChildResources(childResources)
|
||||
return err
|
||||
}
|
||||
|
||||
return stacks.CreateOrUpdate(stack, childResources)
|
||||
}
|
||||
|
||||
type statusDisplay interface {
|
||||
OnStatus(serviceStatus)
|
||||
}
|
||||
type metaServiceState string
|
||||
|
||||
const (
|
||||
metaServiceStateReady = metaServiceState("Ready")
|
||||
metaServiceStatePending = metaServiceState("Pending")
|
||||
metaServiceStateFailed = metaServiceState("Failed")
|
||||
)
|
||||
|
||||
func metaStateFromStatus(status serviceStatus) metaServiceState {
|
||||
switch {
|
||||
case status.podsReady > 0:
|
||||
return metaServiceStateReady
|
||||
case status.podsPending > 0:
|
||||
return metaServiceStatePending
|
||||
default:
|
||||
return metaServiceStateFailed
|
||||
}
|
||||
}
|
||||
|
||||
type forwardOnlyStatusDisplay struct {
|
||||
o *streams.Out
|
||||
states map[string]metaServiceState
|
||||
}
|
||||
|
||||
func (d *forwardOnlyStatusDisplay) OnStatus(status serviceStatus) {
|
||||
state := metaStateFromStatus(status)
|
||||
if d.states[status.name] != state {
|
||||
d.states[status.name] = state
|
||||
fmt.Fprintf(d.o, "%s: %s\n", status.name, state)
|
||||
}
|
||||
}
|
||||
|
||||
type interactiveStatusDisplay struct {
|
||||
o *streams.Out
|
||||
statuses []serviceStatus
|
||||
}
|
||||
|
||||
func (d *interactiveStatusDisplay) OnStatus(status serviceStatus) {
|
||||
b := aec.EmptyBuilder
|
||||
for ix := 0; ix < len(d.statuses); ix++ {
|
||||
b = b.Up(1).EraseLine(aec.EraseModes.All)
|
||||
}
|
||||
b = b.Column(0)
|
||||
fmt.Fprint(d.o, b.ANSI)
|
||||
updated := false
|
||||
for ix, s := range d.statuses {
|
||||
if s.name == status.name {
|
||||
d.statuses[ix] = status
|
||||
s = status
|
||||
updated = true
|
||||
}
|
||||
displayInteractiveServiceStatus(s, d.o)
|
||||
}
|
||||
if !updated {
|
||||
d.statuses = append(d.statuses, status)
|
||||
displayInteractiveServiceStatus(status, d.o)
|
||||
}
|
||||
}
|
||||
|
||||
func displayInteractiveServiceStatus(status serviceStatus, o io.Writer) {
|
||||
state := metaStateFromStatus(status)
|
||||
totalFailed := status.podsFailed + status.podsSucceeded + status.podsUnknown
|
||||
fmt.Fprintf(o, "%[1]s: %[2]s\t\t[pod status: %[3]d/%[6]d ready, %[4]d/%[6]d pending, %[5]d/%[6]d failed]\n", status.name, state,
|
||||
status.podsReady, status.podsPending, totalFailed, status.podsTotal)
|
||||
}
|
||||
|
||||
func newStatusDisplay(o *streams.Out) statusDisplay {
|
||||
if !o.IsTerminal() {
|
||||
return &forwardOnlyStatusDisplay{o: o, states: map[string]metaServiceState{}}
|
||||
}
|
||||
return &interactiveStatusDisplay{o: o}
|
||||
}
|
|
@ -1,299 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
composev1alpha3 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1alpha3"
|
||||
composev1beta1 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta1"
|
||||
composev1beta2 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta2"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1alpha3"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1beta1"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1beta2"
|
||||
"gotest.tools/v3/assert"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func testStack() Stack {
|
||||
return Stack{
|
||||
Name: "test",
|
||||
Namespace: "test",
|
||||
ComposeFile: `version: "3.3"
|
||||
services:
|
||||
test:
|
||||
image: nginx
|
||||
secrets:
|
||||
test:
|
||||
file: testdata/secret
|
||||
configs:
|
||||
test:
|
||||
file: testdata/config
|
||||
`,
|
||||
Spec: &v1alpha3.StackSpec{
|
||||
Configs: map[string]v1alpha3.ConfigObjConfig{
|
||||
"test": {Name: "test", File: "testdata/config"},
|
||||
},
|
||||
Secrets: map[string]v1alpha3.SecretConfig{
|
||||
"test": {Name: "test", File: "testdata/secret"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateChildResourcesV1Beta1(t *testing.T) {
|
||||
k8sclientSet := fake.NewSimpleClientset()
|
||||
stack := testStack()
|
||||
configs := k8sclientSet.CoreV1().ConfigMaps("test")
|
||||
secrets := k8sclientSet.CoreV1().Secrets("test")
|
||||
assert.NilError(t, createResources(
|
||||
stack,
|
||||
&stackV1Beta1{stacks: &fakeV1beta1Client{}},
|
||||
configs,
|
||||
secrets))
|
||||
c, err := configs.Get("test", metav1.GetOptions{})
|
||||
assert.NilError(t, err)
|
||||
checkOwnerReferences(t, c.ObjectMeta, v1beta1.SchemeGroupVersion.String())
|
||||
s, err := secrets.Get("test", metav1.GetOptions{})
|
||||
assert.NilError(t, err)
|
||||
checkOwnerReferences(t, s.ObjectMeta, v1beta1.SchemeGroupVersion.String())
|
||||
}
|
||||
|
||||
func checkOwnerReferences(t *testing.T, objMeta metav1.ObjectMeta, stackVersion string) {
|
||||
t.Helper()
|
||||
assert.Equal(t, len(objMeta.OwnerReferences), 1)
|
||||
assert.Equal(t, objMeta.OwnerReferences[0].Name, "test")
|
||||
assert.Equal(t, objMeta.OwnerReferences[0].Kind, "Stack")
|
||||
assert.Equal(t, objMeta.OwnerReferences[0].APIVersion, stackVersion)
|
||||
}
|
||||
|
||||
func TestCreateChildResourcesV1Beta2(t *testing.T) {
|
||||
k8sclientSet := fake.NewSimpleClientset()
|
||||
stack := testStack()
|
||||
configs := k8sclientSet.CoreV1().ConfigMaps("test")
|
||||
secrets := k8sclientSet.CoreV1().Secrets("test")
|
||||
assert.NilError(t, createResources(
|
||||
stack,
|
||||
&stackV1Beta2{stacks: &fakeV1beta2Client{}},
|
||||
configs,
|
||||
secrets))
|
||||
c, err := configs.Get("test", metav1.GetOptions{})
|
||||
assert.NilError(t, err)
|
||||
checkOwnerReferences(t, c.ObjectMeta, v1beta2.SchemeGroupVersion.String())
|
||||
s, err := secrets.Get("test", metav1.GetOptions{})
|
||||
assert.NilError(t, err)
|
||||
checkOwnerReferences(t, s.ObjectMeta, v1beta2.SchemeGroupVersion.String())
|
||||
}
|
||||
|
||||
func TestCreateChildResourcesV1Alpha3(t *testing.T) {
|
||||
k8sclientSet := fake.NewSimpleClientset()
|
||||
stack := testStack()
|
||||
configs := k8sclientSet.CoreV1().ConfigMaps("test")
|
||||
secrets := k8sclientSet.CoreV1().Secrets("test")
|
||||
assert.NilError(t, createResources(
|
||||
stack,
|
||||
&stackV1Alpha3{stacks: &fakeV1alpha3Client{}},
|
||||
configs,
|
||||
secrets))
|
||||
c, err := configs.Get("test", metav1.GetOptions{})
|
||||
assert.NilError(t, err)
|
||||
checkOwnerReferences(t, c.ObjectMeta, v1alpha3.SchemeGroupVersion.String())
|
||||
s, err := secrets.Get("test", metav1.GetOptions{})
|
||||
assert.NilError(t, err)
|
||||
checkOwnerReferences(t, s.ObjectMeta, v1alpha3.SchemeGroupVersion.String())
|
||||
}
|
||||
|
||||
func TestCreateChildResourcesWithStackCreationErrorV1Beta1(t *testing.T) {
|
||||
k8sclientSet := fake.NewSimpleClientset()
|
||||
stack := testStack()
|
||||
configs := k8sclientSet.CoreV1().ConfigMaps("test")
|
||||
secrets := k8sclientSet.CoreV1().Secrets("test")
|
||||
err := createResources(
|
||||
stack,
|
||||
&stackV1Beta1{stacks: &fakeV1beta1Client{errorOnCreate: true}},
|
||||
configs,
|
||||
secrets)
|
||||
assert.Error(t, err, "some error")
|
||||
_, err = configs.Get("test", metav1.GetOptions{})
|
||||
assert.Check(t, kerrors.IsNotFound(err))
|
||||
_, err = secrets.Get("test", metav1.GetOptions{})
|
||||
assert.Check(t, kerrors.IsNotFound(err))
|
||||
}
|
||||
|
||||
func TestCreateChildResourcesWithStackCreationErrorV1Beta2(t *testing.T) {
|
||||
k8sclientSet := fake.NewSimpleClientset()
|
||||
stack := testStack()
|
||||
configs := k8sclientSet.CoreV1().ConfigMaps("test")
|
||||
secrets := k8sclientSet.CoreV1().Secrets("test")
|
||||
err := createResources(
|
||||
stack,
|
||||
&stackV1Beta2{stacks: &fakeV1beta2Client{errorOnCreate: true}},
|
||||
configs,
|
||||
secrets)
|
||||
assert.Error(t, err, "some error")
|
||||
_, err = configs.Get("test", metav1.GetOptions{})
|
||||
assert.Check(t, kerrors.IsNotFound(err))
|
||||
_, err = secrets.Get("test", metav1.GetOptions{})
|
||||
assert.Check(t, kerrors.IsNotFound(err))
|
||||
}
|
||||
|
||||
func TestCreateChildResourcesWithStackCreationErrorV1Alpha3(t *testing.T) {
|
||||
k8sclientSet := fake.NewSimpleClientset()
|
||||
stack := testStack()
|
||||
configs := k8sclientSet.CoreV1().ConfigMaps("test")
|
||||
secrets := k8sclientSet.CoreV1().Secrets("test")
|
||||
err := createResources(
|
||||
stack,
|
||||
&stackV1Alpha3{stacks: &fakeV1alpha3Client{errorOnCreate: true}},
|
||||
configs,
|
||||
secrets)
|
||||
assert.Error(t, err, "some error")
|
||||
_, err = configs.Get("test", metav1.GetOptions{})
|
||||
assert.Check(t, kerrors.IsNotFound(err))
|
||||
_, err = secrets.Get("test", metav1.GetOptions{})
|
||||
assert.Check(t, kerrors.IsNotFound(err))
|
||||
}
|
||||
|
||||
type fakeV1beta1Client struct {
|
||||
errorOnCreate bool
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) Create(s *v1beta1.Stack) (*v1beta1.Stack, error) {
|
||||
if c.errorOnCreate {
|
||||
return nil, errors.New("some error")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) Update(*v1beta1.Stack) (*v1beta1.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) UpdateStatus(*v1beta1.Stack) (*v1beta1.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) Get(name string, options metav1.GetOptions) (*v1beta1.Stack, error) {
|
||||
return nil, kerrors.NewNotFound(v1beta1.SchemeGroupVersion.WithResource("stacks").GroupResource(), name)
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) List(opts metav1.ListOptions) (*v1beta1.StackList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta1.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta1Client) WithSkipValidation() composev1beta1.StackInterface {
|
||||
return c
|
||||
}
|
||||
|
||||
type fakeV1beta2Client struct {
|
||||
errorOnCreate bool
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) Create(s *v1beta2.Stack) (*v1beta2.Stack, error) {
|
||||
if c.errorOnCreate {
|
||||
return nil, errors.New("some error")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) Update(*v1beta2.Stack) (*v1beta2.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) UpdateStatus(*v1beta2.Stack) (*v1beta2.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) Get(name string, options metav1.GetOptions) (*v1beta2.Stack, error) {
|
||||
return nil, kerrors.NewNotFound(v1beta1.SchemeGroupVersion.WithResource("stacks").GroupResource(), name)
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) List(opts metav1.ListOptions) (*v1beta2.StackList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1beta2.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1beta2Client) WithSkipValidation() composev1beta2.StackInterface {
|
||||
return c
|
||||
}
|
||||
|
||||
type fakeV1alpha3Client struct {
|
||||
errorOnCreate bool
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) Create(s *v1alpha3.Stack) (*v1alpha3.Stack, error) {
|
||||
if c.errorOnCreate {
|
||||
return nil, errors.New("some error")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) Update(*v1alpha3.Stack) (*v1alpha3.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) UpdateStatus(*v1alpha3.Stack) (*v1alpha3.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) Get(name string, options metav1.GetOptions) (*v1alpha3.Stack, error) {
|
||||
return nil, kerrors.NewNotFound(v1beta1.SchemeGroupVersion.WithResource("stacks").GroupResource(), name)
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) List(opts metav1.ListOptions) (*v1alpha3.StackList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1alpha3.Stack, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeV1alpha3Client) WithSkipValidation() composev1alpha3.StackInterface {
|
||||
return c
|
||||
}
|
|
@ -1,136 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/formatter"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/pkg/errors"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetStacks lists the kubernetes stacks
|
||||
func GetStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) {
|
||||
if opts.AllNamespaces || len(opts.Namespaces) == 0 {
|
||||
if isAllNamespacesDisabled(kubeCli.ConfigFile().Kubernetes) {
|
||||
opts.AllNamespaces = true
|
||||
}
|
||||
return getStacksWithAllNamespaces(kubeCli, opts)
|
||||
}
|
||||
return getStacksWithNamespaces(kubeCli, opts, removeDuplicates(opts.Namespaces))
|
||||
}
|
||||
|
||||
func isAllNamespacesDisabled(kubeCliConfig *configfile.KubernetesConfig) bool {
|
||||
return kubeCliConfig == nil || kubeCliConfig.AllNamespaces != "disabled"
|
||||
}
|
||||
|
||||
func getStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) {
|
||||
composeClient, err := kubeCli.composeClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stackSvc, err := composeClient.Stacks(opts.AllNamespaces)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks, err := stackSvc.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var formattedStacks []*formatter.Stack
|
||||
for _, stack := range stacks {
|
||||
formattedStacks = append(formattedStacks, &formatter.Stack{
|
||||
Name: stack.Name,
|
||||
Services: len(stack.getServices()),
|
||||
Orchestrator: "Kubernetes",
|
||||
Namespace: stack.Namespace,
|
||||
})
|
||||
}
|
||||
return formattedStacks, nil
|
||||
}
|
||||
|
||||
func getStacksWithAllNamespaces(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error) {
|
||||
stacks, err := getStacks(kubeCli, opts)
|
||||
if !apierrs.IsForbidden(err) {
|
||||
return stacks, err
|
||||
}
|
||||
namespaces, err2 := getUserVisibleNamespaces(*kubeCli)
|
||||
if err2 != nil {
|
||||
return nil, errors.Wrap(err2, "failed to query user visible namespaces")
|
||||
}
|
||||
if namespaces == nil {
|
||||
// UCP API not present, fall back to Kubernetes error
|
||||
return nil, err
|
||||
}
|
||||
opts.AllNamespaces = false
|
||||
return getStacksWithNamespaces(kubeCli, opts, namespaces)
|
||||
}
|
||||
|
||||
func getUserVisibleNamespaces(dockerCli command.Cli) ([]string, error) {
|
||||
host := dockerCli.Client().DaemonHost()
|
||||
endpoint, err := url.Parse(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoint.Scheme = "https"
|
||||
endpoint.Path = "/kubernetesNamespaces"
|
||||
resp, err := dockerCli.Client().HTTPClient().Get(endpoint.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "received %d status and unable to read response", resp.StatusCode)
|
||||
}
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
nms := &core_v1.NamespaceList{}
|
||||
if err := json.Unmarshal(body, nms); err != nil {
|
||||
return nil, errors.Wrapf(err, "unmarshal failed: %s", string(body))
|
||||
}
|
||||
namespaces := make([]string, len(nms.Items))
|
||||
for i, namespace := range nms.Items {
|
||||
namespaces[i] = namespace.Name
|
||||
}
|
||||
return namespaces, nil
|
||||
case http.StatusNotFound:
|
||||
// UCP API not present
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("received %d status while retrieving namespaces: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func getStacksWithNamespaces(kubeCli *KubeCli, opts options.List, namespaces []string) ([]*formatter.Stack, error) {
|
||||
stacks := []*formatter.Stack{}
|
||||
for _, namespace := range namespaces {
|
||||
kubeCli.kubeNamespace = namespace
|
||||
ss, err := getStacks(kubeCli, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks = append(stacks, ss...)
|
||||
}
|
||||
return stacks, nil
|
||||
}
|
||||
|
||||
func removeDuplicates(namespaces []string) []string {
|
||||
found := make(map[string]bool)
|
||||
results := namespaces[:0]
|
||||
for _, n := range namespaces {
|
||||
if !found[n] {
|
||||
results = append(results, n)
|
||||
found[n] = true
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/formatter"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/cli/cli/command/task"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
var supportedPSFilters = map[string]bool{
|
||||
"name": true,
|
||||
"service": true,
|
||||
"node": true,
|
||||
}
|
||||
|
||||
// RunPS is the kubernetes implementation of docker stack ps
|
||||
func RunPS(dockerCli *KubeCli, options options.PS) error {
|
||||
filters := options.Filter.Value()
|
||||
if err := filters.Validate(supportedPSFilters); err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := dockerCli.composeClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stacks, err := client.Stacks(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stackName := options.Namespace
|
||||
_, err = stacks.Get(stackName)
|
||||
if apierrs.IsNotFound(err) {
|
||||
return fmt.Errorf("nothing found in stack: %s", stackName)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pods, err := fetchPods(stackName, client.Pods(), filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
return fmt.Errorf("nothing found in stack: %s", stackName)
|
||||
}
|
||||
return printTasks(dockerCli, options, stackName, client, pods)
|
||||
}
|
||||
|
||||
func printTasks(dockerCli command.Cli, options options.PS, namespace string, client corev1.NodesGetter, pods []apiv1.Pod) error {
|
||||
format := options.Format
|
||||
if format == "" {
|
||||
format = task.DefaultFormat(dockerCli.ConfigFile(), options.Quiet)
|
||||
}
|
||||
|
||||
tasks := make([]swarm.Task, len(pods))
|
||||
for i, pod := range pods {
|
||||
tasks[i] = podToTask(pod)
|
||||
}
|
||||
sort.Stable(tasksBySlot(tasks))
|
||||
|
||||
names := map[string]string{}
|
||||
nodes := map[string]string{}
|
||||
|
||||
n, err := listNodes(client, options.NoResolve)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, task := range tasks {
|
||||
nodeValue, err := resolveNode(pods[i].Spec.NodeName, n, options.NoResolve)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
names[task.ID] = fmt.Sprintf("%s_%s", namespace, pods[i].Name)
|
||||
nodes[task.ID] = nodeValue
|
||||
}
|
||||
|
||||
tasksCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: task.NewTaskFormat(format, options.Quiet),
|
||||
Trunc: !options.NoTrunc,
|
||||
}
|
||||
|
||||
return task.FormatWrite(tasksCtx, tasks, names, nodes)
|
||||
}
|
||||
|
||||
func resolveNode(name string, nodes *apiv1.NodeList, noResolve bool) (string, error) {
|
||||
// Here we have a name and we need to resolve its identifier. To mimic swarm behavior
|
||||
// we need to resolve to the id when noResolve is set, otherwise we return the name.
|
||||
if noResolve {
|
||||
for _, node := range nodes.Items {
|
||||
if node.Name == name {
|
||||
return string(node.UID), nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("could not find node '%s'", name)
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func listNodes(client corev1.NodesGetter, noResolve bool) (*apiv1.NodeList, error) {
|
||||
if noResolve {
|
||||
return client.Nodes().List(metav1.ListOptions{})
|
||||
}
|
||||
return nil, nil
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RunRemove is the kubernetes implementation of docker stack remove
|
||||
func RunRemove(dockerCli *KubeCli, opts options.Remove) error {
|
||||
composeClient, err := dockerCli.composeClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stacks, err := composeClient.Stacks(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, stack := range opts.Namespaces {
|
||||
fmt.Fprintf(dockerCli.Out(), "Removing stack: %s\n", stack)
|
||||
if err := stacks.Delete(stack); err != nil {
|
||||
return errors.Wrapf(err, "Failed to remove stack %s", stack)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/compose-on-kubernetes/api/labels"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var supportedServicesFilters = map[string]bool{
|
||||
"mode": true,
|
||||
"name": true,
|
||||
"label": true,
|
||||
}
|
||||
|
||||
func generateSelector(labels map[string][]string) []string {
|
||||
var result []string
|
||||
for k, v := range labels {
|
||||
for _, val := range v {
|
||||
result = append(result, fmt.Sprintf("%s=%s", k, val))
|
||||
}
|
||||
if len(v) == 0 {
|
||||
result = append(result, k)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseLabelFilters(rawFilters []string) map[string][]string {
|
||||
labels := map[string][]string{}
|
||||
for _, rawLabel := range rawFilters {
|
||||
v := strings.SplitN(rawLabel, "=", 2)
|
||||
key := v[0]
|
||||
if len(v) > 1 {
|
||||
labels[key] = append(labels[key], v[1])
|
||||
} else if _, ok := labels[key]; !ok {
|
||||
labels[key] = []string{}
|
||||
}
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
func generateLabelSelector(f filters.Args, stackName string) string {
|
||||
selectors := append(generateSelector(parseLabelFilters(f.Get("label"))), labels.SelectorForStack(stackName))
|
||||
return strings.Join(selectors, ",")
|
||||
}
|
||||
|
||||
func getResourcesForServiceList(dockerCli *KubeCli, filters filters.Args, labelSelector string) (*appsv1beta2.ReplicaSetList, *appsv1beta2.DaemonSetList, *corev1.ServiceList, error) {
|
||||
client, err := dockerCli.composeClient()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
modes := filters.Get("mode")
|
||||
replicas := &appsv1beta2.ReplicaSetList{}
|
||||
if len(modes) == 0 || filters.ExactMatch("mode", "replicated") {
|
||||
if replicas, err = client.ReplicaSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
daemons := &appsv1beta2.DaemonSetList{}
|
||||
if len(modes) == 0 || filters.ExactMatch("mode", "global") {
|
||||
if daemons, err = client.DaemonSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
services, err := client.Services().List(metav1.ListOptions{LabelSelector: labelSelector})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return replicas, daemons, services, nil
|
||||
}
|
||||
|
||||
// GetServices is the kubernetes implementation of listing stack services
|
||||
func GetServices(dockerCli *KubeCli, opts options.Services) ([]swarm.Service, error) {
|
||||
filters := opts.Filter.Value()
|
||||
if err := filters.Validate(supportedServicesFilters); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := dockerCli.composeClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks, err := client.Stacks(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stackName := opts.Namespace
|
||||
_, err = stacks.Get(stackName)
|
||||
if apierrs.IsNotFound(err) {
|
||||
return []swarm.Service{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labelSelector := generateLabelSelector(filters, stackName)
|
||||
replicasList, daemonsList, servicesList, err := getResourcesForServiceList(dockerCli, filters, labelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert Replicas sets and kubernetes services to swarm services and formatter information
|
||||
services, err := convertToServices(replicasList, daemonsList, servicesList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services = filterServicesByName(services, filters.Get("name"), stackName)
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func filterServicesByName(services []swarm.Service, names []string, stackName string) []swarm.Service {
|
||||
if len(names) == 0 {
|
||||
return services
|
||||
}
|
||||
prefix := stackName + "_"
|
||||
// Accepts unprefixed service name (for compatibility with existing swarm scripts where service names are prefixed by stack names)
|
||||
for i, n := range names {
|
||||
if !strings.HasPrefix(n, prefix) {
|
||||
names[i] = stackName + "_" + n
|
||||
}
|
||||
}
|
||||
// Filter services
|
||||
result := []swarm.Service{}
|
||||
for _, s := range services {
|
||||
for _, n := range names {
|
||||
if strings.HasPrefix(s.Spec.Name, n) {
|
||||
result = append(result, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -1,138 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestServiceFiltersLabelSelectorGen(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
stackName string
|
||||
filters filters.Args
|
||||
expectedSelectorParts []string
|
||||
}{
|
||||
{
|
||||
name: "no-filter",
|
||||
stackName: "test",
|
||||
filters: filters.NewArgs(),
|
||||
expectedSelectorParts: []string{
|
||||
"com.docker.stack.namespace=test",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "label present filter",
|
||||
stackName: "test",
|
||||
filters: filters.NewArgs(
|
||||
filters.KeyValuePair{Key: "label", Value: "label-is-present"},
|
||||
),
|
||||
expectedSelectorParts: []string{
|
||||
"com.docker.stack.namespace=test",
|
||||
"label-is-present",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single value label filter",
|
||||
stackName: "test",
|
||||
filters: filters.NewArgs(
|
||||
filters.KeyValuePair{Key: "label", Value: "label1=test"},
|
||||
),
|
||||
expectedSelectorParts: []string{
|
||||
"com.docker.stack.namespace=test",
|
||||
"label1=test",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi value label filter",
|
||||
stackName: "test",
|
||||
filters: filters.NewArgs(
|
||||
filters.KeyValuePair{Key: "label", Value: "label1=test"},
|
||||
filters.KeyValuePair{Key: "label", Value: "label1=test2"},
|
||||
),
|
||||
expectedSelectorParts: []string{
|
||||
"com.docker.stack.namespace=test",
|
||||
"label1=test",
|
||||
"label1=test2",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 different labels filter",
|
||||
stackName: "test",
|
||||
filters: filters.NewArgs(
|
||||
filters.KeyValuePair{Key: "label", Value: "label1=test"},
|
||||
filters.KeyValuePair{Key: "label", Value: "label2=test2"},
|
||||
),
|
||||
expectedSelectorParts: []string{
|
||||
"com.docker.stack.namespace=test",
|
||||
"label1=test",
|
||||
"label2=test2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
result := generateLabelSelector(c.filters, c.stackName)
|
||||
for _, toFind := range c.expectedSelectorParts {
|
||||
assert.Assert(t, cmp.Contains(result, toFind))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestServiceFiltersServiceByName(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
filters []string
|
||||
services []swarm.Service
|
||||
expectedServices []swarm.Service
|
||||
}{
|
||||
{
|
||||
name: "no filter",
|
||||
filters: []string{},
|
||||
services: makeServices("s1", "s2"),
|
||||
expectedServices: makeServices("s1", "s2"),
|
||||
},
|
||||
{
|
||||
name: "single-name filter",
|
||||
filters: []string{"s1"},
|
||||
services: makeServices("s1", "s2"),
|
||||
expectedServices: makeServices("s1"),
|
||||
},
|
||||
{
|
||||
name: "filter by prefix",
|
||||
filters: []string{"prefix"},
|
||||
services: makeServices("prefix-s1", "prefix-s2", "s2"),
|
||||
expectedServices: makeServices("prefix-s1", "prefix-s2"),
|
||||
},
|
||||
{
|
||||
name: "multi-name filter",
|
||||
filters: []string{"s1", "s2"},
|
||||
services: makeServices("s1", "s2", "s3"),
|
||||
expectedServices: makeServices("s1", "s2"),
|
||||
},
|
||||
{
|
||||
name: "stack name prefix is valid",
|
||||
filters: []string{"stack_s1"},
|
||||
services: makeServices("s1", "s11", "s2"),
|
||||
expectedServices: makeServices("s1", "s11"),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
result := filterServicesByName(c.services, c.filters, "stack")
|
||||
assert.DeepEqual(t, c.expectedServices, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeServices(names ...string) []swarm.Service {
|
||||
result := make([]swarm.Service, len(names))
|
||||
for i, n := range names {
|
||||
result[i] = swarm.Service{Spec: swarm.ServiceSpec{Annotations: swarm.Annotations{Name: "stack_" + n}}}
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -1,161 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
latest "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3"
|
||||
"github.com/docker/compose-on-kubernetes/api/labels"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// Stack is the main type used by stack commands so they remain independent from kubernetes compose component version.
|
||||
type Stack struct {
|
||||
Name string
|
||||
Namespace string
|
||||
ComposeFile string
|
||||
Spec *latest.StackSpec
|
||||
}
|
||||
|
||||
type childResource interface {
|
||||
setOwner(metav1.OwnerReference) error
|
||||
delete() // does not report error, as if a deletion failed, we want to continue deleting other child resources
|
||||
}
|
||||
|
||||
func deleteChildResources(childResources []childResource) {
|
||||
for _, cr := range childResources {
|
||||
cr.delete()
|
||||
}
|
||||
}
|
||||
|
||||
func setChildResourcesOwner(childResources []childResource, owner metav1.OwnerReference) error {
|
||||
for _, cr := range childResources {
|
||||
if err := cr.setOwner(owner); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getServices returns all the stack service names, sorted lexicographically
|
||||
func (s *Stack) getServices() []string {
|
||||
services := make([]string, len(s.Spec.Services))
|
||||
for i, service := range s.Spec.Services {
|
||||
services[i] = service.Name
|
||||
}
|
||||
sort.Strings(services)
|
||||
return services
|
||||
}
|
||||
|
||||
// createFileBasedConfigMaps creates a Kubernetes ConfigMap for each Compose global file-based config.
|
||||
func (s *Stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface) ([]childResource, error) {
|
||||
var resources []childResource
|
||||
for name, config := range s.Spec.Configs {
|
||||
if config.File == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := filepath.Base(config.File)
|
||||
content, err := ioutil.ReadFile(config.File)
|
||||
if err != nil {
|
||||
return resources, err
|
||||
}
|
||||
|
||||
configMap, err := configMaps.Create(toConfigMap(s.Name, name, fileName, content))
|
||||
if err != nil {
|
||||
return resources, err
|
||||
}
|
||||
resources = append(resources, &configMapChildResource{client: configMaps, configMap: configMap})
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
type configMapChildResource struct {
|
||||
client corev1.ConfigMapInterface
|
||||
configMap *apiv1.ConfigMap
|
||||
}
|
||||
|
||||
func (r *configMapChildResource) setOwner(ref metav1.OwnerReference) error {
|
||||
r.configMap.OwnerReferences = append(r.configMap.OwnerReferences, ref)
|
||||
_, err := r.client.Update(r.configMap)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *configMapChildResource) delete() {
|
||||
r.client.Delete(r.configMap.Name, nil)
|
||||
}
|
||||
|
||||
// toConfigMap converts a Compose Config to a Kube ConfigMap.
|
||||
func toConfigMap(stackName, name, key string, content []byte) *apiv1.ConfigMap {
|
||||
return &apiv1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stackName,
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
key: string(content),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createFileBasedSecrets creates a Kubernetes Secret for each Compose global file-based secret.
|
||||
func (s *Stack) createFileBasedSecrets(secrets corev1.SecretInterface) ([]childResource, error) {
|
||||
var resources []childResource
|
||||
for name, secret := range s.Spec.Secrets {
|
||||
if secret.File == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := filepath.Base(secret.File)
|
||||
content, err := ioutil.ReadFile(secret.File)
|
||||
if err != nil {
|
||||
return resources, err
|
||||
}
|
||||
|
||||
secret, err := secrets.Create(toSecret(s.Name, name, fileName, content))
|
||||
if err != nil {
|
||||
return resources, err
|
||||
}
|
||||
resources = append(resources, &secretChildResource{client: secrets, secret: secret})
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
type secretChildResource struct {
|
||||
client corev1.SecretInterface
|
||||
secret *apiv1.Secret
|
||||
}
|
||||
|
||||
func (r *secretChildResource) setOwner(ref metav1.OwnerReference) error {
|
||||
r.secret.OwnerReferences = append(r.secret.OwnerReferences, ref)
|
||||
_, err := r.client.Update(r.secret)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *secretChildResource) delete() {
|
||||
r.client.Delete(r.secret.Name, nil)
|
||||
}
|
||||
|
||||
// toSecret converts a Compose Secret to a Kube Secret.
|
||||
func toSecret(stackName, name, key string, content []byte) *apiv1.Secret {
|
||||
return &apiv1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stackName,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
key: content,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,274 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
composev1alpha3 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1alpha3"
|
||||
composev1beta1 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta1"
|
||||
composev1beta2 "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta2"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1alpha3"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1beta1"
|
||||
"github.com/docker/compose-on-kubernetes/api/compose/v1beta2"
|
||||
"github.com/docker/compose-on-kubernetes/api/labels"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// StackClient talks to a kubernetes compose component.
|
||||
type StackClient interface {
|
||||
StackConverter
|
||||
CreateOrUpdate(s Stack, childResources []childResource) error
|
||||
Delete(name string) error
|
||||
Get(name string) (Stack, error)
|
||||
List(opts metav1.ListOptions) ([]Stack, error)
|
||||
IsColliding(servicesClient corev1.ServiceInterface, s Stack) error
|
||||
}
|
||||
|
||||
// stackV1Beta1 implements stackClient interface and talks to compose component v1beta1.
|
||||
type stackV1Beta1 struct {
|
||||
stackV1Beta1Converter
|
||||
stacks composev1beta1.StackInterface
|
||||
}
|
||||
|
||||
func newStackV1Beta1(config *rest.Config, namespace string) (*stackV1Beta1, error) {
|
||||
client, err := composev1beta1.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stackV1Beta1{stacks: client.Stacks(namespace)}, nil
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) CreateOrUpdate(internalStack Stack, childResources []childResource) error {
|
||||
// If it already exists, update the stack
|
||||
var (
|
||||
stack *v1beta1.Stack
|
||||
err error
|
||||
)
|
||||
if stack, err = s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil {
|
||||
stack.Spec.ComposeFile = internalStack.ComposeFile
|
||||
stack, err = s.stacks.Update(stack)
|
||||
} else {
|
||||
// Or create it
|
||||
stack, err = s.stacks.Create(stackToV1beta1(internalStack))
|
||||
}
|
||||
if err != nil {
|
||||
deleteChildResources(childResources)
|
||||
return err
|
||||
}
|
||||
blockOwnerDeletion := true
|
||||
isController := true
|
||||
return setChildResourcesOwner(childResources, metav1.OwnerReference{
|
||||
APIVersion: v1beta1.SchemeGroupVersion.String(),
|
||||
Kind: "Stack",
|
||||
Name: stack.Name,
|
||||
UID: stack.UID,
|
||||
BlockOwnerDeletion: &blockOwnerDeletion,
|
||||
Controller: &isController,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) Delete(name string) error {
|
||||
return s.stacks.Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) Get(name string) (Stack, error) {
|
||||
stackBeta1, err := s.stacks.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
return stackFromV1beta1(stackBeta1)
|
||||
}
|
||||
|
||||
func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]Stack, error) {
|
||||
list, err := s.stacks.List(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks := make([]Stack, len(list.Items))
|
||||
for i := range list.Items {
|
||||
stack, err := stackFromV1beta1(&list.Items[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks[i] = stack
|
||||
}
|
||||
return stacks, nil
|
||||
}
|
||||
|
||||
// IsColliding verifies that services defined in the stack collides with already deployed services
|
||||
func (s *stackV1Beta1) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error {
|
||||
for _, srv := range st.getServices() {
|
||||
if err := verify(servicesClient, st.Name, srv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verify checks whether the service is already present in kubernetes.
|
||||
// If we find the service by name but it doesn't have our label or it has a different value
|
||||
// than the stack name for the label, we fail (i.e. it will collide)
|
||||
func verify(services corev1.ServiceInterface, stackName string, service string) error {
|
||||
svc, err := services.Get(service, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if key, ok := svc.ObjectMeta.Labels[labels.ForStackName]; ok {
|
||||
if key != stackName {
|
||||
return fmt.Errorf("service %s already present in stack named %s", service, key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("service %s already present in the cluster", service)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// stackV1Beta2 implements stackClient interface and talks to compose component v1beta2.
|
||||
type stackV1Beta2 struct {
|
||||
stackV1Beta2Converter
|
||||
stacks composev1beta2.StackInterface
|
||||
}
|
||||
|
||||
func newStackV1Beta2(config *rest.Config, namespace string) (*stackV1Beta2, error) {
|
||||
client, err := composev1beta2.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stackV1Beta2{stacks: client.Stacks(namespace)}, nil
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) CreateOrUpdate(internalStack Stack, childResources []childResource) error {
|
||||
var (
|
||||
stack *v1beta2.Stack
|
||||
err error
|
||||
)
|
||||
resolved, err := stackToV1beta2(internalStack)
|
||||
if err != nil {
|
||||
deleteChildResources(childResources)
|
||||
return err
|
||||
}
|
||||
if stack, err = s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil {
|
||||
stack.Spec = resolved.Spec
|
||||
stack, err = s.stacks.Update(stack)
|
||||
} else {
|
||||
// Or create it
|
||||
stack, err = s.stacks.Create(resolved)
|
||||
}
|
||||
if err != nil {
|
||||
deleteChildResources(childResources)
|
||||
return err
|
||||
}
|
||||
blockOwnerDeletion := true
|
||||
isController := true
|
||||
return setChildResourcesOwner(childResources, metav1.OwnerReference{
|
||||
APIVersion: v1beta2.SchemeGroupVersion.String(),
|
||||
Kind: "Stack",
|
||||
Name: stack.Name,
|
||||
UID: stack.UID,
|
||||
BlockOwnerDeletion: &blockOwnerDeletion,
|
||||
Controller: &isController,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) Delete(name string) error {
|
||||
return s.stacks.Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) Get(name string) (Stack, error) {
|
||||
stackBeta2, err := s.stacks.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
return stackFromV1beta2(stackBeta2)
|
||||
}
|
||||
|
||||
func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]Stack, error) {
|
||||
list, err := s.stacks.List(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks := make([]Stack, len(list.Items))
|
||||
for i := range list.Items {
|
||||
if stacks[i], err = stackFromV1beta2(&list.Items[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return stacks, nil
|
||||
}
|
||||
|
||||
// IsColliding is handle server side with the compose api v1beta2, so nothing to do here
|
||||
func (s *stackV1Beta2) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stackV1Beta2 implements stackClient interface and talks to compose component v1beta2.
|
||||
type stackV1Alpha3 struct {
|
||||
stackV1Alpha3Converter
|
||||
stacks composev1alpha3.StackInterface
|
||||
}
|
||||
|
||||
func newStackV1Alpha3(config *rest.Config, namespace string) (*stackV1Alpha3, error) {
|
||||
client, err := composev1alpha3.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stackV1Alpha3{stacks: client.Stacks(namespace)}, nil
|
||||
}
|
||||
|
||||
func (s *stackV1Alpha3) CreateOrUpdate(internalStack Stack, childResources []childResource) error {
|
||||
var (
|
||||
stack *v1alpha3.Stack
|
||||
err error
|
||||
)
|
||||
resolved := stackToV1alpha3(internalStack)
|
||||
if stack, err = s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil {
|
||||
stack.Spec = resolved.Spec
|
||||
stack, err = s.stacks.Update(stack)
|
||||
} else {
|
||||
// Or create it
|
||||
stack, err = s.stacks.Create(resolved)
|
||||
}
|
||||
if err != nil {
|
||||
deleteChildResources(childResources)
|
||||
return err
|
||||
}
|
||||
blockOwnerDeletion := true
|
||||
isController := true
|
||||
return setChildResourcesOwner(childResources, metav1.OwnerReference{
|
||||
APIVersion: v1alpha3.SchemeGroupVersion.String(),
|
||||
Kind: "Stack",
|
||||
Name: stack.Name,
|
||||
UID: stack.UID,
|
||||
BlockOwnerDeletion: &blockOwnerDeletion,
|
||||
Controller: &isController,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *stackV1Alpha3) Delete(name string) error {
|
||||
return s.stacks.Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (s *stackV1Alpha3) Get(name string) (Stack, error) {
|
||||
stackAlpha3, err := s.stacks.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return Stack{}, err
|
||||
}
|
||||
return stackFromV1alpha3(stackAlpha3), nil
|
||||
}
|
||||
|
||||
func (s *stackV1Alpha3) List(opts metav1.ListOptions) ([]Stack, error) {
|
||||
list, err := s.stacks.List(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stacks := make([]Stack, len(list.Items))
|
||||
for i := range list.Items {
|
||||
stacks[i] = stackFromV1alpha3(&list.Items[i])
|
||||
}
|
||||
return stacks, nil
|
||||
}
|
||||
|
||||
// IsColliding is handle server side with the compose api v1beta2, so nothing to do here
|
||||
func (s *stackV1Alpha3) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error {
|
||||
return nil
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestFromCompose(t *testing.T) {
|
||||
stackClient := &stackV1Beta1{}
|
||||
s, err := stackClient.FromCompose(ioutil.Discard, "foo", &composetypes.Config{
|
||||
Version: "3.1",
|
||||
Filename: "banana",
|
||||
Services: []composetypes.ServiceConfig{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Image: "bar",
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "foo", s.Name)
|
||||
assert.Equal(t, string(`version: "3.5"
|
||||
services:
|
||||
bar:
|
||||
image: bar
|
||||
foo:
|
||||
image: foo
|
||||
`), s.ComposeFile)
|
||||
}
|
||||
|
||||
func TestFromComposeUnsupportedVersion(t *testing.T) {
|
||||
stackClient := &stackV1Beta1{}
|
||||
_, err := stackClient.FromCompose(ioutil.Discard, "foo", &composetypes.Config{
|
||||
Version: "3.6",
|
||||
Filename: "banana",
|
||||
Services: []composetypes.ServiceConfig{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
Volumes: []composetypes.ServiceVolumeConfig{
|
||||
{
|
||||
Type: "tmpfs",
|
||||
Target: "/app",
|
||||
Tmpfs: &composetypes.ServiceVolumeTmpfs{
|
||||
Size: 10000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.ErrorContains(t, err, "the compose yaml file is invalid with v3.5: services.foo.volumes.0 Additional property tmpfs is not allowed")
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
version: "3.7"
|
||||
services:
|
||||
test:
|
||||
image: "some-image"
|
||||
expose:
|
||||
- "1" # default protocol, single port
|
||||
- "2-4" # default protocol, port range
|
||||
- "5/udp" # specific protocol, single port
|
||||
- "6-8/udp" # specific protocol, port range
|
|
@ -1,6 +0,0 @@
|
|||
version: "3.7"
|
||||
services:
|
||||
test:
|
||||
image: "some-image"
|
||||
x-kubernetes:
|
||||
pull_policy: "Never"
|
|
@ -1,6 +0,0 @@
|
|||
version: "3.7"
|
||||
services:
|
||||
test:
|
||||
image: "some-private-image"
|
||||
x-kubernetes:
|
||||
pull_secret: "some-secret"
|
|
@ -1 +0,0 @@
|
|||
this is a config
|
|
@ -1 +0,0 @@
|
|||
this is a secret
|
|
@ -1,31 +0,0 @@
|
|||
top-level network "global" is ignored
|
||||
service "front": network "private" is ignored
|
||||
service "front": update_config.delay is not supported
|
||||
service "front": update_config.failure_action is not supported
|
||||
service "front": update_config.monitor is not supported
|
||||
service "front": update_config.max_failure_ratio is not supported
|
||||
service "front": restart_policy.delay is ignored
|
||||
service "front": restart_policy.max_attempts is ignored
|
||||
service "front": restart_policy.window is ignored
|
||||
service "front": container_name is deprecated
|
||||
service "front": expose is deprecated
|
||||
service "front": build is ignored
|
||||
service "front": cgroup_parent is ignored
|
||||
service "front": devices are ignored
|
||||
service "front": domainname is ignored
|
||||
service "front": external_links are ignored
|
||||
service "front": links are ignored
|
||||
service "front": mac_address is ignored
|
||||
service "front": network_mode is ignored
|
||||
service "front": restart is ignored
|
||||
service "front": security_opt are ignored
|
||||
service "front": ulimits are ignored
|
||||
service "front": depends_on are ignored
|
||||
service "front": credential_spec is ignored
|
||||
service "front": dns are ignored
|
||||
service "front": dns_search are ignored
|
||||
service "front": env_file are ignored
|
||||
service "front": stop_signal is ignored
|
||||
service "front": logging is ignored
|
||||
service "front": volume.propagation is ignored
|
||||
service "front": volume.nocopy is ignored
|
|
@ -1,145 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
)
|
||||
|
||||
func warnUnsupportedFeatures(stderr io.Writer, cfg *composetypes.Config) {
|
||||
warnForGlobalNetworks(stderr, cfg)
|
||||
for _, s := range cfg.Services {
|
||||
warnForServiceNetworks(stderr, s)
|
||||
warnForUnsupportedDeploymentStrategy(stderr, s)
|
||||
warnForUnsupportedRestartPolicy(stderr, s)
|
||||
warnForDeprecatedProperties(stderr, s)
|
||||
warnForUnsupportedProperties(stderr, s)
|
||||
}
|
||||
}
|
||||
|
||||
func warnForGlobalNetworks(stderr io.Writer, config *composetypes.Config) {
|
||||
for network := range config.Networks {
|
||||
fmt.Fprintf(stderr, "top-level network %q is ignored\n", network)
|
||||
}
|
||||
}
|
||||
|
||||
func warnServicef(stderr io.Writer, service, format string, args ...interface{}) {
|
||||
fmt.Fprintf(stderr, "service \"%s\": %s\n", service, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func warnForServiceNetworks(stderr io.Writer, s composetypes.ServiceConfig) {
|
||||
for network := range s.Networks {
|
||||
warnServicef(stderr, s.Name, "network %q is ignored", network)
|
||||
}
|
||||
}
|
||||
|
||||
func warnForDeprecatedProperties(stderr io.Writer, s composetypes.ServiceConfig) {
|
||||
if s.ContainerName != "" {
|
||||
warnServicef(stderr, s.Name, "container_name is deprecated")
|
||||
}
|
||||
if len(s.Expose) > 0 {
|
||||
warnServicef(stderr, s.Name, "expose is deprecated")
|
||||
}
|
||||
}
|
||||
|
||||
func warnForUnsupportedDeploymentStrategy(stderr io.Writer, s composetypes.ServiceConfig) {
|
||||
config := s.Deploy.UpdateConfig
|
||||
if config == nil {
|
||||
return
|
||||
}
|
||||
if config.Delay != 0 {
|
||||
warnServicef(stderr, s.Name, "update_config.delay is not supported")
|
||||
}
|
||||
if config.FailureAction != "" {
|
||||
warnServicef(stderr, s.Name, "update_config.failure_action is not supported")
|
||||
}
|
||||
if config.Monitor != 0 {
|
||||
warnServicef(stderr, s.Name, "update_config.monitor is not supported")
|
||||
}
|
||||
if config.MaxFailureRatio != 0 {
|
||||
warnServicef(stderr, s.Name, "update_config.max_failure_ratio is not supported")
|
||||
}
|
||||
}
|
||||
|
||||
func warnForUnsupportedRestartPolicy(stderr io.Writer, s composetypes.ServiceConfig) {
|
||||
policy := s.Deploy.RestartPolicy
|
||||
if policy == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if policy.Delay != nil {
|
||||
warnServicef(stderr, s.Name, "restart_policy.delay is ignored")
|
||||
}
|
||||
if policy.MaxAttempts != nil {
|
||||
warnServicef(stderr, s.Name, "restart_policy.max_attempts is ignored")
|
||||
}
|
||||
if policy.Window != nil {
|
||||
warnServicef(stderr, s.Name, "restart_policy.window is ignored")
|
||||
}
|
||||
}
|
||||
|
||||
func warnForUnsupportedProperties(stderr io.Writer, s composetypes.ServiceConfig) { // nolint: gocyclo
|
||||
if build := s.Build; build.Context != "" || build.Dockerfile != "" || len(build.Args) > 0 || len(build.Labels) > 0 || len(build.CacheFrom) > 0 || build.Network != "" || build.Target != "" {
|
||||
warnServicef(stderr, s.Name, "build is ignored")
|
||||
}
|
||||
if s.CgroupParent != "" {
|
||||
warnServicef(stderr, s.Name, "cgroup_parent is ignored")
|
||||
}
|
||||
if len(s.Devices) > 0 {
|
||||
warnServicef(stderr, s.Name, "devices are ignored")
|
||||
}
|
||||
if s.DomainName != "" {
|
||||
warnServicef(stderr, s.Name, "domainname is ignored")
|
||||
}
|
||||
if len(s.ExternalLinks) > 0 {
|
||||
warnServicef(stderr, s.Name, "external_links are ignored")
|
||||
}
|
||||
if len(s.Links) > 0 {
|
||||
warnServicef(stderr, s.Name, "links are ignored")
|
||||
}
|
||||
if s.MacAddress != "" {
|
||||
warnServicef(stderr, s.Name, "mac_address is ignored")
|
||||
}
|
||||
if s.NetworkMode != "" {
|
||||
warnServicef(stderr, s.Name, "network_mode is ignored")
|
||||
}
|
||||
if s.Restart != "" {
|
||||
warnServicef(stderr, s.Name, "restart is ignored")
|
||||
}
|
||||
if len(s.SecurityOpt) > 0 {
|
||||
warnServicef(stderr, s.Name, "security_opt are ignored")
|
||||
}
|
||||
if len(s.Ulimits) > 0 {
|
||||
warnServicef(stderr, s.Name, "ulimits are ignored")
|
||||
}
|
||||
if len(s.DependsOn) > 0 {
|
||||
warnServicef(stderr, s.Name, "depends_on are ignored")
|
||||
}
|
||||
if s.CredentialSpec.File != "" {
|
||||
warnServicef(stderr, s.Name, "credential_spec is ignored")
|
||||
}
|
||||
if len(s.DNS) > 0 {
|
||||
warnServicef(stderr, s.Name, "dns are ignored")
|
||||
}
|
||||
if len(s.DNSSearch) > 0 {
|
||||
warnServicef(stderr, s.Name, "dns_search are ignored")
|
||||
}
|
||||
if len(s.EnvFile) > 0 {
|
||||
warnServicef(stderr, s.Name, "env_file are ignored")
|
||||
}
|
||||
if s.StopSignal != "" {
|
||||
warnServicef(stderr, s.Name, "stop_signal is ignored")
|
||||
}
|
||||
if s.Logging != nil {
|
||||
warnServicef(stderr, s.Name, "logging is ignored")
|
||||
}
|
||||
for _, m := range s.Volumes {
|
||||
if m.Volume != nil && m.Volume.NoCopy {
|
||||
warnServicef(stderr, s.Name, "volume.nocopy is ignored")
|
||||
}
|
||||
if m.Bind != nil && m.Bind.Propagation != "" {
|
||||
warnServicef(stderr, s.Name, "volume.propagation is ignored")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"gotest.tools/v3/golden"
|
||||
)
|
||||
|
||||
func TestWarnings(t *testing.T) {
|
||||
duration := composetypes.Duration(5 * time.Second)
|
||||
attempts := uint64(3)
|
||||
config := &composetypes.Config{
|
||||
Version: "3.4",
|
||||
Services: []composetypes.ServiceConfig{
|
||||
{
|
||||
Name: "front",
|
||||
Build: composetypes.BuildConfig{
|
||||
Context: "ignored",
|
||||
},
|
||||
ContainerName: "ignored",
|
||||
CgroupParent: "ignored",
|
||||
CredentialSpec: composetypes.CredentialSpecConfig{File: "ignored"},
|
||||
DependsOn: []string{"ignored"},
|
||||
Deploy: composetypes.DeployConfig{
|
||||
UpdateConfig: &composetypes.UpdateConfig{
|
||||
Delay: composetypes.Duration(5 * time.Second),
|
||||
FailureAction: "rollback",
|
||||
Monitor: composetypes.Duration(10 * time.Second),
|
||||
MaxFailureRatio: 0.5,
|
||||
},
|
||||
RestartPolicy: &composetypes.RestartPolicy{
|
||||
Delay: &duration,
|
||||
MaxAttempts: &attempts,
|
||||
Window: &duration,
|
||||
},
|
||||
},
|
||||
Devices: []string{"ignored"},
|
||||
DNSSearch: []string{"ignored"},
|
||||
DNS: []string{"ignored"},
|
||||
DomainName: "ignored",
|
||||
EnvFile: []string{"ignored"},
|
||||
Expose: []string{"80"},
|
||||
ExternalLinks: []string{"ignored"},
|
||||
Image: "dockerdemos/front",
|
||||
Links: []string{"ignored"},
|
||||
Logging: &composetypes.LoggingConfig{Driver: "syslog"},
|
||||
MacAddress: "ignored",
|
||||
Networks: map[string]*composetypes.ServiceNetworkConfig{"private": {}},
|
||||
NetworkMode: "ignored",
|
||||
Restart: "ignored",
|
||||
SecurityOpt: []string{"ignored"},
|
||||
StopSignal: "ignored",
|
||||
Ulimits: map[string]*composetypes.UlimitsConfig{"nproc": {Hard: 65535}},
|
||||
User: "ignored",
|
||||
Volumes: []composetypes.ServiceVolumeConfig{
|
||||
{
|
||||
Type: "bind",
|
||||
Bind: &composetypes.ServiceVolumeBind{Propagation: "ignored"},
|
||||
},
|
||||
{
|
||||
Type: "volume",
|
||||
Volume: &composetypes.ServiceVolumeVolume{NoCopy: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Networks: map[string]composetypes.NetworkConfig{
|
||||
"global": {},
|
||||
},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
warnUnsupportedFeatures(&buf, config)
|
||||
warnings := buf.String()
|
||||
golden.Assert(t, warnings, "warnings.golden")
|
||||
}
|
|
@ -1,262 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apiv1beta1 "github.com/docker/compose-on-kubernetes/api/compose/v1beta1"
|
||||
"github.com/docker/compose-on-kubernetes/api/labels"
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtimeutil "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
type stackListWatch interface {
|
||||
List(opts metav1.ListOptions) (*apiv1beta1.StackList, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
type podListWatch interface {
|
||||
List(opts metav1.ListOptions) (*apiv1.PodList, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// DeployWatcher watches a stack deployment
|
||||
type deployWatcher struct {
|
||||
pods podListWatch
|
||||
stacks stackListWatch
|
||||
}
|
||||
|
||||
// Watch watches a stuck deployment and return a chan that will holds the state of the stack
|
||||
func (w *deployWatcher) Watch(name string, serviceNames []string, statusUpdates chan serviceStatus) error {
|
||||
errC := make(chan error, 1)
|
||||
defer close(errC)
|
||||
|
||||
handlers := runtimeutil.ErrorHandlers
|
||||
|
||||
// informer errors are reported using global error handlers
|
||||
runtimeutil.ErrorHandlers = append(handlers, func(err error) {
|
||||
errC <- err
|
||||
})
|
||||
defer func() {
|
||||
runtimeutil.ErrorHandlers = handlers
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := sync.WaitGroup{}
|
||||
defer func() {
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}()
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
w.watchStackStatus(ctx, name, errC)
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
w.waitForPods(ctx, name, serviceNames, errC, statusUpdates)
|
||||
}()
|
||||
|
||||
return <-errC
|
||||
}
|
||||
|
||||
type stackWatcher struct {
|
||||
resultChan chan error
|
||||
stackName string
|
||||
}
|
||||
|
||||
var _ cache.ResourceEventHandler = &stackWatcher{}
|
||||
|
||||
func (sw *stackWatcher) OnAdd(obj interface{}) {
|
||||
stack, ok := obj.(*apiv1beta1.Stack)
|
||||
switch {
|
||||
case !ok:
|
||||
sw.resultChan <- errors.Errorf("stack %s has incorrect type", sw.stackName)
|
||||
case stack.Status.Phase == apiv1beta1.StackFailure:
|
||||
sw.resultChan <- errors.Errorf("stack %s failed with status %s: %s", sw.stackName, stack.Status.Phase, stack.Status.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *stackWatcher) OnUpdate(oldObj, newObj interface{}) {
|
||||
sw.OnAdd(newObj)
|
||||
}
|
||||
|
||||
func (sw *stackWatcher) OnDelete(obj interface{}) {
|
||||
}
|
||||
|
||||
func (w *deployWatcher) watchStackStatus(ctx context.Context, stackname string, e chan error) {
|
||||
informer := newStackInformer(w.stacks, stackname)
|
||||
sw := &stackWatcher{
|
||||
resultChan: e,
|
||||
}
|
||||
informer.AddEventHandler(sw)
|
||||
informer.Run(ctx.Done())
|
||||
}
|
||||
|
||||
type serviceStatus struct {
|
||||
name string
|
||||
podsPending int
|
||||
podsRunning int
|
||||
podsSucceeded int
|
||||
podsFailed int
|
||||
podsUnknown int
|
||||
podsReady int
|
||||
podsTotal int
|
||||
}
|
||||
|
||||
type podWatcher struct {
|
||||
stackName string
|
||||
services map[string]serviceStatus
|
||||
resultChan chan error
|
||||
starts map[string]int32
|
||||
indexer cache.Indexer
|
||||
statusUpdates chan serviceStatus
|
||||
}
|
||||
|
||||
var _ cache.ResourceEventHandler = &podWatcher{}
|
||||
|
||||
func (pw *podWatcher) handlePod(obj interface{}) {
|
||||
pod, ok := obj.(*apiv1.Pod)
|
||||
if !ok {
|
||||
pw.resultChan <- errors.Errorf("Pod has incorrect type in stack %s", pw.stackName)
|
||||
return
|
||||
}
|
||||
serviceName := pod.Labels[labels.ForServiceName]
|
||||
pw.updateServiceStatus(serviceName)
|
||||
if pw.allReady() {
|
||||
select {
|
||||
case pw.resultChan <- nil:
|
||||
default:
|
||||
// result has already been reported, just don't block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pw *podWatcher) updateServiceStatus(serviceName string) {
|
||||
pods, _ := pw.indexer.ByIndex("byservice", serviceName)
|
||||
status := serviceStatus{name: serviceName}
|
||||
for _, obj := range pods {
|
||||
if pod, ok := obj.(*apiv1.Pod); ok {
|
||||
switch pod.Status.Phase {
|
||||
case apiv1.PodPending:
|
||||
status.podsPending++
|
||||
case apiv1.PodRunning:
|
||||
status.podsRunning++
|
||||
case apiv1.PodSucceeded:
|
||||
status.podsSucceeded++
|
||||
case apiv1.PodFailed:
|
||||
status.podsFailed++
|
||||
case apiv1.PodUnknown:
|
||||
status.podsUnknown++
|
||||
}
|
||||
if pw.isPodReady(pod) {
|
||||
status.podsReady++
|
||||
}
|
||||
}
|
||||
}
|
||||
status.podsTotal = len(pods)
|
||||
oldStatus := pw.services[serviceName]
|
||||
if oldStatus != status {
|
||||
pw.statusUpdates <- status
|
||||
}
|
||||
pw.services[serviceName] = status
|
||||
}
|
||||
|
||||
func (pw *podWatcher) isPodReady(pod *apiv1.Pod) bool {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Status == apiv1.ConditionTrue && condition.Type == apiv1.PodReady {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pw *podWatcher) allReady() bool {
|
||||
for _, status := range pw.services {
|
||||
if status.podsReady == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (pw *podWatcher) OnAdd(obj interface{}) {
|
||||
pw.handlePod(obj)
|
||||
}
|
||||
|
||||
func (pw *podWatcher) OnUpdate(oldObj, newObj interface{}) {
|
||||
pw.handlePod(newObj)
|
||||
}
|
||||
|
||||
func (pw *podWatcher) OnDelete(obj interface{}) {
|
||||
pw.handlePod(obj)
|
||||
}
|
||||
|
||||
func (w *deployWatcher) waitForPods(ctx context.Context, stackName string, serviceNames []string, e chan error, statusUpdates chan serviceStatus) {
|
||||
informer := newPodInformer(w.pods, stackName, cache.Indexers{
|
||||
"byservice": func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*apiv1.Pod)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Pod has incorrect type in stack %s", stackName)
|
||||
}
|
||||
return []string{pod.Labels[labels.ForServiceName]}, nil
|
||||
}})
|
||||
services := map[string]serviceStatus{}
|
||||
for _, name := range serviceNames {
|
||||
services[name] = serviceStatus{name: name}
|
||||
}
|
||||
pw := &podWatcher{
|
||||
stackName: stackName,
|
||||
services: services,
|
||||
resultChan: e,
|
||||
starts: map[string]int32{},
|
||||
indexer: informer.GetIndexer(),
|
||||
statusUpdates: statusUpdates,
|
||||
}
|
||||
informer.AddEventHandler(pw)
|
||||
informer.Run(ctx.Done())
|
||||
}
|
||||
|
||||
func newPodInformer(podsClient podListWatch, stackName string, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labels.SelectorForStack(stackName)
|
||||
return podsClient.List(options)
|
||||
},
|
||||
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labels.SelectorForStack(stackName)
|
||||
return podsClient.Watch(options)
|
||||
},
|
||||
},
|
||||
&apiv1.Pod{},
|
||||
time.Second*5,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func newStackInformer(stacksClient stackListWatch, stackName string) cache.SharedInformer {
|
||||
return cache.NewSharedInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", stackName).String()
|
||||
return stacksClient.List(options)
|
||||
},
|
||||
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", stackName).String()
|
||||
return stacksClient.Watch(options)
|
||||
},
|
||||
},
|
||||
&apiv1beta1.Stack{},
|
||||
time.Second*5,
|
||||
)
|
||||
}
|
|
@ -1,220 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
apiv1beta1 "github.com/docker/compose-on-kubernetes/api/compose/v1beta1"
|
||||
composelabels "github.com/docker/compose-on-kubernetes/api/labels"
|
||||
"gotest.tools/v3/assert"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
var podsResource = apiv1.SchemeGroupVersion.WithResource("pods")
|
||||
var podKind = apiv1.SchemeGroupVersion.WithKind("Pod")
|
||||
var stacksResource = apiv1beta1.SchemeGroupVersion.WithResource("stacks")
|
||||
var stackKind = apiv1beta1.SchemeGroupVersion.WithKind("Stack")
|
||||
|
||||
type testPodAndStackRepository struct {
|
||||
fake *k8stesting.Fake
|
||||
}
|
||||
|
||||
func (r *testPodAndStackRepository) stackListWatchForNamespace(ns string) *testStackListWatch {
|
||||
return &testStackListWatch{fake: r.fake, ns: ns}
|
||||
}
|
||||
func (r *testPodAndStackRepository) podListWatchForNamespace(ns string) *testPodListWatch {
|
||||
return &testPodListWatch{fake: r.fake, ns: ns}
|
||||
}
|
||||
|
||||
func newTestPodAndStackRepository(initialPods []apiv1.Pod, initialStacks []apiv1beta1.Stack, podWatchHandler, stackWatchHandler k8stesting.WatchReactionFunc) *testPodAndStackRepository {
|
||||
var scheme = runtime.NewScheme()
|
||||
var codecs = serializer.NewCodecFactory(scheme)
|
||||
metav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
apiv1.AddToScheme(scheme)
|
||||
apiv1beta1.AddToScheme(scheme)
|
||||
|
||||
o := k8stesting.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
||||
for _, obj := range initialPods {
|
||||
obj := obj
|
||||
if err := o.Add(&obj); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
for _, obj := range initialStacks {
|
||||
obj := obj
|
||||
if err := o.Add(&obj); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
fakePtr := &k8stesting.Fake{}
|
||||
fakePtr.AddReactor("*", "*", k8stesting.ObjectReaction(o))
|
||||
if podWatchHandler != nil {
|
||||
fakePtr.AddWatchReactor(podsResource.Resource, podWatchHandler)
|
||||
}
|
||||
if stackWatchHandler != nil {
|
||||
fakePtr.AddWatchReactor(stacksResource.Resource, stackWatchHandler)
|
||||
}
|
||||
fakePtr.AddWatchReactor("*", k8stesting.DefaultWatchReactor(watch.NewFake(), nil))
|
||||
return &testPodAndStackRepository{fake: fakePtr}
|
||||
}
|
||||
|
||||
type testStackListWatch struct {
|
||||
fake *k8stesting.Fake
|
||||
ns string
|
||||
}
|
||||
|
||||
func (s *testStackListWatch) List(opts metav1.ListOptions) (*apiv1beta1.StackList, error) {
|
||||
obj, err := s.fake.Invokes(k8stesting.NewListAction(stacksResource, stackKind, s.ns, opts), &apiv1beta1.StackList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := k8stesting.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &apiv1beta1.StackList{}
|
||||
for _, item := range obj.(*apiv1beta1.StackList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
func (s *testStackListWatch) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return s.fake.InvokesWatch(k8stesting.NewWatchAction(stacksResource, s.ns, opts))
|
||||
}
|
||||
|
||||
type testPodListWatch struct {
|
||||
fake *k8stesting.Fake
|
||||
ns string
|
||||
}
|
||||
|
||||
func (p *testPodListWatch) List(opts metav1.ListOptions) (*apiv1.PodList, error) {
|
||||
obj, err := p.fake.Invokes(k8stesting.NewListAction(podsResource, podKind, p.ns, opts), &apiv1.PodList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := k8stesting.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &apiv1.PodList{}
|
||||
for _, item := range obj.(*apiv1.PodList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
|
||||
}
|
||||
func (p *testPodListWatch) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
return p.fake.InvokesWatch(k8stesting.NewWatchAction(podsResource, p.ns, opts))
|
||||
}
|
||||
|
||||
func TestDeployWatchOk(t *testing.T) {
|
||||
stack := apiv1beta1.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-stack", Namespace: "test-ns"},
|
||||
}
|
||||
|
||||
serviceNames := []string{"svc1", "svc2"}
|
||||
testRepo := newTestPodAndStackRepository(nil, []apiv1beta1.Stack{stack}, func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) {
|
||||
res := watch.NewFake()
|
||||
go func() {
|
||||
pod1 := &apiv1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test1",
|
||||
Namespace: "test-ns",
|
||||
Labels: composelabels.ForService("test-stack", "svc1"),
|
||||
},
|
||||
Status: apiv1.PodStatus{
|
||||
Phase: apiv1.PodRunning,
|
||||
Conditions: []apiv1.PodCondition{
|
||||
{
|
||||
Type: apiv1.PodReady,
|
||||
Status: apiv1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod2 := &apiv1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test2",
|
||||
Namespace: "test-ns",
|
||||
Labels: composelabels.ForService("test-stack", "svc2"),
|
||||
},
|
||||
Status: apiv1.PodStatus{
|
||||
Phase: apiv1.PodRunning,
|
||||
Conditions: []apiv1.PodCondition{
|
||||
{
|
||||
Type: apiv1.PodReady,
|
||||
Status: apiv1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
res.Add(pod1)
|
||||
res.Add(pod2)
|
||||
}()
|
||||
|
||||
return true, res, nil
|
||||
}, nil)
|
||||
|
||||
testee := &deployWatcher{
|
||||
stacks: testRepo.stackListWatchForNamespace("test-ns"),
|
||||
pods: testRepo.podListWatchForNamespace("test-ns"),
|
||||
}
|
||||
|
||||
statusUpdates := make(chan serviceStatus)
|
||||
go func() {
|
||||
for range statusUpdates {
|
||||
}
|
||||
}()
|
||||
defer close(statusUpdates)
|
||||
err := testee.Watch(stack.Name, serviceNames, statusUpdates)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestDeployReconcileFailure(t *testing.T) {
|
||||
stack := apiv1beta1.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-stack", Namespace: "test-ns"},
|
||||
}
|
||||
|
||||
serviceNames := []string{"svc1", "svc2"}
|
||||
testRepo := newTestPodAndStackRepository(nil, []apiv1beta1.Stack{stack}, nil, func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) {
|
||||
res := watch.NewFake()
|
||||
go func() {
|
||||
sfailed := stack
|
||||
sfailed.Status = apiv1beta1.StackStatus{
|
||||
Phase: apiv1beta1.StackFailure,
|
||||
Message: "test error",
|
||||
}
|
||||
res.Modify(&sfailed)
|
||||
}()
|
||||
|
||||
return true, res, nil
|
||||
})
|
||||
|
||||
testee := &deployWatcher{
|
||||
stacks: testRepo.stackListWatchForNamespace("test-ns"),
|
||||
pods: testRepo.podListWatchForNamespace("test-ns"),
|
||||
}
|
||||
|
||||
statusUpdates := make(chan serviceStatus)
|
||||
go func() {
|
||||
for range statusUpdates {
|
||||
}
|
||||
}()
|
||||
defer close(statusUpdates)
|
||||
err := testee.Watch(stack.Name, serviceNames, statusUpdates)
|
||||
assert.ErrorContains(t, err, "Failure: test error")
|
||||
}
|
|
@ -6,14 +6,13 @@ import (
|
|||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/formatter"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/cli/cli/command/stack/swarm"
|
||||
"github.com/fvbommel/sortorder"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func newListCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command {
|
||||
func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
opts := options.List{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -22,52 +21,30 @@ func newListCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command
|
|||
Short: "List stacks",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return RunList(cmd, dockerCli, opts, common.orchestrator)
|
||||
return RunList(cmd, dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&opts.Format, "format", "", "Pretty-print stacks using a Go template")
|
||||
flags.StringSliceVar(&opts.Namespaces, "namespace", []string{}, "Kubernetes namespaces to use")
|
||||
flags.SetAnnotation("namespace", "kubernetes", nil)
|
||||
flags.SetAnnotation("namespace", "deprecated", nil)
|
||||
flags.BoolVarP(&opts.AllNamespaces, "all-namespaces", "", false, "List stacks from all Kubernetes namespaces")
|
||||
flags.SetAnnotation("all-namespaces", "kubernetes", nil)
|
||||
flags.SetAnnotation("all-namespaces", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RunList performs a stack list against the specified orchestrator
|
||||
func RunList(cmd *cobra.Command, dockerCli command.Cli, opts options.List, orchestrator command.Orchestrator) error {
|
||||
// RunList performs a stack list against the specified swarm cluster
|
||||
func RunList(cmd *cobra.Command, dockerCli command.Cli, opts options.List) error {
|
||||
stacks := []*formatter.Stack{}
|
||||
if orchestrator.HasSwarm() {
|
||||
ss, err := swarm.GetStacks(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stacks = append(stacks, ss...)
|
||||
ss, err := swarm.GetStacks(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if orchestrator.HasKubernetes() {
|
||||
kubeCli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags(), orchestrator))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ss, err := kubernetes.GetStacks(kubeCli, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stacks = append(stacks, ss...)
|
||||
}
|
||||
return format(dockerCli, opts, orchestrator, stacks)
|
||||
stacks = append(stacks, ss...)
|
||||
return format(dockerCli, opts, stacks)
|
||||
}
|
||||
|
||||
func format(dockerCli command.Cli, opts options.List, orchestrator command.Orchestrator, stacks []*formatter.Stack) error {
|
||||
func format(dockerCli command.Cli, opts options.List, stacks []*formatter.Stack) error {
|
||||
format := formatter.Format(opts.Format)
|
||||
if format == "" || format == formatter.TableFormatKey {
|
||||
format = formatter.SwarmStackTableFormat
|
||||
if orchestrator.HasKubernetes() {
|
||||
format = formatter.KubernetesStackTableFormat
|
||||
}
|
||||
}
|
||||
stackCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
|
@ -75,8 +52,7 @@ func format(dockerCli command.Cli, opts options.List, orchestrator command.Orche
|
|||
}
|
||||
sort.Slice(stacks, func(i, j int) bool {
|
||||
return sortorder.NaturalLess(stacks[i].Name, stacks[j].Name) ||
|
||||
!sortorder.NaturalLess(stacks[j].Name, stacks[i].Name) &&
|
||||
sortorder.NaturalLess(stacks[j].Namespace, stacks[i].Namespace)
|
||||
!sortorder.NaturalLess(stacks[j].Name, stacks[i].Name)
|
||||
})
|
||||
return formatter.StackWrite(stackCtx, stacks)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
. "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -14,10 +13,6 @@ import (
|
|||
"gotest.tools/v3/golden"
|
||||
)
|
||||
|
||||
var (
|
||||
orchestrator = commonOptions{orchestrator: command.OrchestratorSwarm}
|
||||
)
|
||||
|
||||
func TestListErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
args []string
|
||||
|
@ -52,7 +47,7 @@ func TestListErrors(t *testing.T) {
|
|||
for _, tc := range testCases {
|
||||
cmd := newListCommand(test.NewFakeCli(&fakeClient{
|
||||
serviceListFunc: tc.serviceListFunc,
|
||||
}), &orchestrator)
|
||||
}))
|
||||
cmd.SetArgs(tc.args)
|
||||
cmd.SetOut(ioutil.Discard)
|
||||
for key, value := range tc.flags {
|
||||
|
@ -118,7 +113,7 @@ func TestStackList(t *testing.T) {
|
|||
return services, nil
|
||||
},
|
||||
})
|
||||
cmd := newListCommand(cli, &orchestrator)
|
||||
cmd := newListCommand(cli)
|
||||
for key, value := range tc.flags {
|
||||
cmd.Flags().Set(key, value)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@ type Deploy struct {
|
|||
type List struct {
|
||||
Format string
|
||||
AllNamespaces bool
|
||||
Namespaces []string
|
||||
}
|
||||
|
||||
// PS holds docker stack ps options
|
||||
|
|
|
@ -3,7 +3,6 @@ package stack
|
|||
import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/cli/cli/command/stack/swarm"
|
||||
cliopts "github.com/docker/cli/opts"
|
||||
|
@ -11,7 +10,7 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func newPsCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command {
|
||||
func newPsCommand(dockerCli command.Cli) *cobra.Command {
|
||||
opts := options.PS{Filter: cliopts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -23,7 +22,7 @@ func newPsCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command {
|
|||
if err := validateStackName(opts.Namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
return RunPs(dockerCli, cmd.Flags(), common.Orchestrator(), opts)
|
||||
return RunPs(dockerCli, cmd.Flags(), opts)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
|
@ -32,13 +31,10 @@ func newPsCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command {
|
|||
flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided")
|
||||
flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display task IDs")
|
||||
flags.StringVar(&opts.Format, "format", "", "Pretty-print tasks using a Go template")
|
||||
kubernetes.AddNamespaceFlag(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RunPs performs a stack ps against the specified orchestrator
|
||||
func RunPs(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, opts options.PS) error {
|
||||
return runOrchestratedCommand(dockerCli, flags, commonOrchestrator,
|
||||
func() error { return swarm.RunPS(dockerCli, opts) },
|
||||
func(kli *kubernetes.KubeCli) error { return kubernetes.RunPS(kli, opts) })
|
||||
// RunPs performs a stack ps against the specified swarm cluster
|
||||
func RunPs(dockerCli command.Cli, flags *pflag.FlagSet, opts options.PS) error {
|
||||
return swarm.RunPS(dockerCli, opts)
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ func TestStackPsErrors(t *testing.T) {
|
|||
for _, tc := range testCases {
|
||||
cmd := newPsCommand(test.NewFakeCli(&fakeClient{
|
||||
taskListFunc: tc.taskListFunc,
|
||||
}), &orchestrator)
|
||||
}))
|
||||
cmd.SetArgs(tc.args)
|
||||
cmd.SetOut(ioutil.Discard)
|
||||
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
||||
|
@ -164,7 +164,7 @@ func TestStackPs(t *testing.T) {
|
|||
})
|
||||
cli.SetConfigFile(&tc.config)
|
||||
|
||||
cmd := newPsCommand(cli, &orchestrator)
|
||||
cmd := newPsCommand(cli)
|
||||
cmd.SetArgs(tc.args)
|
||||
for key, value := range tc.flags {
|
||||
cmd.Flags().Set(key, value)
|
||||
|
|
|
@ -3,14 +3,13 @@ package stack
|
|||
import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/cli/cli/command/stack/swarm"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func newRemoveCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command {
|
||||
func newRemoveCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts options.Remove
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -23,17 +22,13 @@ func newRemoveCommand(dockerCli command.Cli, common *commonOptions) *cobra.Comma
|
|||
if err := validateStackNames(opts.Namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
return RunRemove(dockerCli, cmd.Flags(), common.Orchestrator(), opts)
|
||||
return RunRemove(dockerCli, cmd.Flags(), opts)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
kubernetes.AddNamespaceFlag(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RunRemove performs a stack remove against the specified orchestrator
|
||||
func RunRemove(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, opts options.Remove) error {
|
||||
return runOrchestratedCommand(dockerCli, flags, commonOrchestrator,
|
||||
func() error { return swarm.RunRemove(dockerCli, opts) },
|
||||
func(kli *kubernetes.KubeCli) error { return kubernetes.RunRemove(kli, opts) })
|
||||
// RunRemove performs a stack remove against the specified swarm cluster
|
||||
func RunRemove(dockerCli command.Cli, flags *pflag.FlagSet, opts options.Remove) error {
|
||||
return swarm.RunRemove(dockerCli, opts)
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func fakeClientForRemoveStackTest(version string) *fakeClient {
|
|||
}
|
||||
|
||||
func TestRemoveWithEmptyName(t *testing.T) {
|
||||
cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{}), &orchestrator)
|
||||
cmd := newRemoveCommand(test.NewFakeCli(&fakeClient{}))
|
||||
cmd.SetArgs([]string{"good", "' '", "alsogood"})
|
||||
cmd.SetOut(ioutil.Discard)
|
||||
|
||||
|
@ -51,7 +51,7 @@ func TestRemoveWithEmptyName(t *testing.T) {
|
|||
|
||||
func TestRemoveStackVersion124DoesNotRemoveConfigsOrSecrets(t *testing.T) {
|
||||
client := fakeClientForRemoveStackTest("1.24")
|
||||
cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator)
|
||||
cmd := newRemoveCommand(test.NewFakeCli(client))
|
||||
cmd.SetArgs([]string{"foo", "bar"})
|
||||
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
@ -63,7 +63,7 @@ func TestRemoveStackVersion124DoesNotRemoveConfigsOrSecrets(t *testing.T) {
|
|||
|
||||
func TestRemoveStackVersion125DoesNotRemoveConfigs(t *testing.T) {
|
||||
client := fakeClientForRemoveStackTest("1.25")
|
||||
cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator)
|
||||
cmd := newRemoveCommand(test.NewFakeCli(client))
|
||||
cmd.SetArgs([]string{"foo", "bar"})
|
||||
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
@ -75,7 +75,7 @@ func TestRemoveStackVersion125DoesNotRemoveConfigs(t *testing.T) {
|
|||
|
||||
func TestRemoveStackVersion130RemovesEverything(t *testing.T) {
|
||||
client := fakeClientForRemoveStackTest("1.30")
|
||||
cmd := newRemoveCommand(test.NewFakeCli(client), &orchestrator)
|
||||
cmd := newRemoveCommand(test.NewFakeCli(client))
|
||||
cmd.SetArgs([]string{"foo", "bar"})
|
||||
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
@ -106,7 +106,7 @@ func TestRemoveStackSkipEmpty(t *testing.T) {
|
|||
configs: allConfigs,
|
||||
}
|
||||
fakeCli := test.NewFakeCli(fakeClient)
|
||||
cmd := newRemoveCommand(fakeCli, &orchestrator)
|
||||
cmd := newRemoveCommand(fakeCli)
|
||||
cmd.SetArgs([]string{"foo", "bar"})
|
||||
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
@ -154,7 +154,7 @@ func TestRemoveContinueAfterError(t *testing.T) {
|
|||
return nil
|
||||
},
|
||||
}
|
||||
cmd := newRemoveCommand(test.NewFakeCli(cli), &orchestrator)
|
||||
cmd := newRemoveCommand(test.NewFakeCli(cli))
|
||||
cmd.SetOut(ioutil.Discard)
|
||||
cmd.SetArgs([]string{"foo", "bar"})
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/service"
|
||||
"github.com/docker/cli/cli/command/stack/formatter"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes"
|
||||
"github.com/docker/cli/cli/command/stack/options"
|
||||
"github.com/docker/cli/cli/command/stack/swarm"
|
||||
cliopts "github.com/docker/cli/opts"
|
||||
|
@ -18,7 +17,7 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func newServicesCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command {
|
||||
func newServicesCommand(dockerCli command.Cli) *cobra.Command {
|
||||
opts := options.Services{Filter: cliopts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@ -30,40 +29,28 @@ func newServicesCommand(dockerCli command.Cli, common *commonOptions) *cobra.Com
|
|||
if err := validateStackName(opts.Namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
return RunServices(dockerCli, cmd.Flags(), common.Orchestrator(), opts)
|
||||
return RunServices(dockerCli, cmd.Flags(), opts)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display IDs")
|
||||
flags.StringVar(&opts.Format, "format", "", "Pretty-print services using a Go template")
|
||||
flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided")
|
||||
kubernetes.AddNamespaceFlag(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RunServices performs a stack services against the specified orchestrator
|
||||
func RunServices(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, opts options.Services) error {
|
||||
services, err := GetServices(dockerCli, flags, commonOrchestrator, opts)
|
||||
// RunServices performs a stack services against the specified swarm cluster
|
||||
func RunServices(dockerCli command.Cli, flags *pflag.FlagSet, opts options.Services) error {
|
||||
services, err := GetServices(dockerCli, flags, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return formatWrite(dockerCli, services, opts)
|
||||
}
|
||||
|
||||
// GetServices returns the services for the specified orchestrator
|
||||
func GetServices(dockerCli command.Cli, flags *pflag.FlagSet, commonOrchestrator command.Orchestrator, opts options.Services) ([]swarmtypes.Service, error) {
|
||||
switch {
|
||||
case commonOrchestrator.HasAll():
|
||||
return nil, errUnsupportedAllOrchestrator
|
||||
case commonOrchestrator.HasKubernetes():
|
||||
kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(flags, commonOrchestrator))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubernetes.GetServices(kli, opts)
|
||||
default:
|
||||
return swarm.GetServices(dockerCli, opts)
|
||||
}
|
||||
// GetServices returns the services for the specified swarm cluster
|
||||
func GetServices(dockerCli command.Cli, flags *pflag.FlagSet, opts options.Services) ([]swarmtypes.Service, error) {
|
||||
return swarm.GetServices(dockerCli, opts)
|
||||
}
|
||||
|
||||
func formatWrite(dockerCli command.Cli, services []swarmtypes.Service, opts options.Services) error {
|
||||
|
|
|
@ -74,7 +74,7 @@ func TestStackServicesErrors(t *testing.T) {
|
|||
nodeListFunc: tc.nodeListFunc,
|
||||
taskListFunc: tc.taskListFunc,
|
||||
})
|
||||
cmd := newServicesCommand(cli, &orchestrator)
|
||||
cmd := newServicesCommand(cli)
|
||||
cmd.SetArgs(tc.args)
|
||||
for key, value := range tc.flags {
|
||||
cmd.Flags().Set(key, value)
|
||||
|
@ -86,7 +86,7 @@ func TestStackServicesErrors(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRunServicesWithEmptyName(t *testing.T) {
|
||||
cmd := newServicesCommand(test.NewFakeCli(&fakeClient{}), &orchestrator)
|
||||
cmd := newServicesCommand(test.NewFakeCli(&fakeClient{}))
|
||||
cmd.SetArgs([]string{"' '"})
|
||||
cmd.SetOut(ioutil.Discard)
|
||||
|
||||
|
@ -99,7 +99,7 @@ func TestStackServicesEmptyServiceList(t *testing.T) {
|
|||
return []swarm.Service{}, nil
|
||||
},
|
||||
})
|
||||
cmd := newServicesCommand(fakeCli, &orchestrator)
|
||||
cmd := newServicesCommand(fakeCli)
|
||||
cmd.SetArgs([]string{"foo"})
|
||||
assert.NilError(t, cmd.Execute())
|
||||
assert.Check(t, is.Equal("", fakeCli.OutBuffer().String()))
|
||||
|
@ -112,7 +112,7 @@ func TestStackServicesWithQuietOption(t *testing.T) {
|
|||
return []swarm.Service{*Service(ServiceID("id-foo"))}, nil
|
||||
},
|
||||
})
|
||||
cmd := newServicesCommand(cli, &orchestrator)
|
||||
cmd := newServicesCommand(cli)
|
||||
cmd.Flags().Set("quiet", "true")
|
||||
cmd.SetArgs([]string{"foo"})
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
@ -127,7 +127,7 @@ func TestStackServicesWithFormat(t *testing.T) {
|
|||
}, nil
|
||||
},
|
||||
})
|
||||
cmd := newServicesCommand(cli, &orchestrator)
|
||||
cmd := newServicesCommand(cli)
|
||||
cmd.SetArgs([]string{"foo"})
|
||||
cmd.Flags().Set("format", "{{ .Name }}")
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
@ -145,7 +145,7 @@ func TestStackServicesWithConfigFormat(t *testing.T) {
|
|||
cli.SetConfigFile(&configfile.ConfigFile{
|
||||
ServicesFormat: "{{ .Name }}",
|
||||
})
|
||||
cmd := newServicesCommand(cli, &orchestrator)
|
||||
cmd := newServicesCommand(cli)
|
||||
cmd.SetArgs([]string{"foo"})
|
||||
assert.NilError(t, cmd.Execute())
|
||||
golden.Assert(t, cli.OutBuffer().String(), "stack-services-with-config-format.golden")
|
||||
|
@ -168,7 +168,7 @@ func TestStackServicesWithoutFormat(t *testing.T) {
|
|||
)}, nil
|
||||
},
|
||||
})
|
||||
cmd := newServicesCommand(cli, &orchestrator)
|
||||
cmd := newServicesCommand(cli)
|
||||
cmd.SetArgs([]string{"foo"})
|
||||
assert.NilError(t, cmd.Execute())
|
||||
golden.Assert(t, cli.OutBuffer().String(), "stack-services-without-format.golden")
|
||||
|
|
|
@ -29,9 +29,8 @@ func GetStacks(dockerCli command.Cli) ([]*formatter.Stack, error) {
|
|||
ztack, ok := m[name]
|
||||
if !ok {
|
||||
m[name] = &formatter.Stack{
|
||||
Name: name,
|
||||
Services: 1,
|
||||
Orchestrator: "Swarm",
|
||||
Name: name,
|
||||
Services: 1,
|
||||
}
|
||||
} else {
|
||||
ztack.Services++
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
NAME SERVICES ORCHESTRATOR
|
||||
service-name-1-foo 1 Swarm
|
||||
service-name-2-foo 1 Swarm
|
||||
service-name-10-foo 1 Swarm
|
||||
NAME SERVICES
|
||||
service-name-1-foo 1
|
||||
service-name-2-foo 1
|
||||
service-name-10-foo 1
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
NAME SERVICES ORCHESTRATOR
|
||||
service-name-bar 1 Swarm
|
||||
service-name-foo 1 Swarm
|
||||
NAME SERVICES
|
||||
service-name-bar 1
|
||||
service-name-foo 1
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
NAME SERVICES ORCHESTRATOR
|
||||
service-name-foo 1 Swarm
|
||||
NAME SERVICES
|
||||
service-name-foo 1
|
||||
|
|
|
@ -11,18 +11,12 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
kubecontext "github.com/docker/cli/cli/context/kubernetes"
|
||||
"github.com/docker/cli/cli/version"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
"github.com/docker/cli/templates"
|
||||
kubeapi "github.com/docker/compose-on-kubernetes/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/go-rosetta"
|
||||
kubernetesClient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
var versionTemplate = `{{with .Client -}}
|
||||
|
@ -61,8 +55,7 @@ Server:{{if ne .Platform.Name ""}} {{.Platform.Name}}{{end}}
|
|||
{{- end}}{{- end}}`
|
||||
|
||||
type versionOptions struct {
|
||||
format string
|
||||
kubeConfig string
|
||||
format string
|
||||
}
|
||||
|
||||
// versionInfo contains version information of both the Client, and Server
|
||||
|
@ -86,11 +79,6 @@ type clientVersion struct {
|
|||
Experimental bool `json:",omitempty"` // Deprecated: experimental CLI features always enabled. This field is kept for backward-compatibility, and is always "true"
|
||||
}
|
||||
|
||||
type kubernetesVersion struct {
|
||||
Kubernetes string
|
||||
StackAPI string
|
||||
}
|
||||
|
||||
// ServerOK returns true when the client could connect to the docker server
|
||||
// and parse the information received. It returns false otherwise.
|
||||
func (v versionInfo) ServerOK() bool {
|
||||
|
@ -112,9 +100,6 @@ func NewVersionCommand(dockerCli command.Cli) *cobra.Command {
|
|||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template")
|
||||
flags.StringVar(&opts.kubeConfig, "kubeconfig", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "deprecated", nil)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -142,10 +127,14 @@ func runVersion(dockerCli command.Cli, opts *versionOptions) error {
|
|||
return cli.StatusError{StatusCode: 64, Status: err.Error()}
|
||||
}
|
||||
|
||||
orchestrator, err := dockerCli.StackOrchestrator("")
|
||||
if err != nil {
|
||||
return cli.StatusError{StatusCode: 64, Status: err.Error()}
|
||||
}
|
||||
// TODO print error if kubernetes is used?
|
||||
// orchestrator, err := dockerCli.StackOrchestrator("")
|
||||
// if err != nil {
|
||||
// return cli.StatusError{StatusCode: 64, Status: err.Error()}
|
||||
// }
|
||||
// if orchestrator.HasKubernetes() {
|
||||
// // TODO print error if kubernetes is used?
|
||||
// }
|
||||
|
||||
vd := versionInfo{
|
||||
Client: clientVersion{
|
||||
|
@ -166,12 +155,7 @@ func runVersion(dockerCli command.Cli, opts *versionOptions) error {
|
|||
sv, err := dockerCli.Client().ServerVersion(context.Background())
|
||||
if err == nil {
|
||||
vd.Server = &sv
|
||||
var kubeVersion *kubernetesVersion
|
||||
if orchestrator.HasKubernetes() {
|
||||
kubeVersion = getKubernetesVersion(dockerCli, opts.kubeConfig)
|
||||
}
|
||||
foundEngine := false
|
||||
foundKubernetes := false
|
||||
for _, component := range sv.Components {
|
||||
switch component.Name {
|
||||
case "Engine":
|
||||
|
@ -180,11 +164,6 @@ func runVersion(dockerCli command.Cli, opts *versionOptions) error {
|
|||
if ok {
|
||||
component.Details["BuildTime"] = reformatDate(buildTime)
|
||||
}
|
||||
case "Kubernetes":
|
||||
foundKubernetes = true
|
||||
if _, ok := component.Details["StackAPI"]; !ok && kubeVersion != nil {
|
||||
component.Details["StackAPI"] = kubeVersion.StackAPI
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -204,15 +183,6 @@ func runVersion(dockerCli command.Cli, opts *versionOptions) error {
|
|||
},
|
||||
})
|
||||
}
|
||||
if !foundKubernetes && kubeVersion != nil {
|
||||
vd.Server.Components = append(vd.Server.Components, types.ComponentVersion{
|
||||
Name: "Kubernetes",
|
||||
Version: kubeVersion.Kubernetes,
|
||||
Details: map[string]string{
|
||||
"StackAPI": kubeVersion.StackAPI,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
if err2 := prettyPrintVersion(dockerCli, vd, tmpl); err2 != nil && err == nil {
|
||||
err = err2
|
||||
|
@ -246,54 +216,3 @@ func getDetailsOrder(v types.ComponentVersion) []string {
|
|||
sort.Strings(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func getKubernetesVersion(dockerCli command.Cli, kubeConfig string) *kubernetesVersion {
|
||||
version := kubernetesVersion{
|
||||
Kubernetes: "Unknown",
|
||||
StackAPI: "Unknown",
|
||||
}
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
err error
|
||||
)
|
||||
if dockerCli.CurrentContext() == "" {
|
||||
clientConfig = kubeapi.NewKubernetesConfig(kubeConfig)
|
||||
} else {
|
||||
clientConfig, err = kubecontext.ConfigFromContext(dockerCli.CurrentContext(), dockerCli.ContextStore())
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Kubernetes configuration: %s", err)
|
||||
return &version
|
||||
}
|
||||
config, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Kubernetes client config: %s", err)
|
||||
return &version
|
||||
}
|
||||
kubeClient, err := kubernetesClient.NewForConfig(config)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Kubernetes client: %s", err)
|
||||
return &version
|
||||
}
|
||||
version.StackAPI = getStackVersion(kubeClient)
|
||||
version.Kubernetes = getKubernetesServerVersion(kubeClient)
|
||||
return &version
|
||||
}
|
||||
|
||||
func getStackVersion(client *kubernetesClient.Clientset) string {
|
||||
apiVersion, err := kubernetes.GetStackAPIVersion(client)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Stack API version: %s", err)
|
||||
return "Unknown"
|
||||
}
|
||||
return string(apiVersion)
|
||||
}
|
||||
|
||||
func getKubernetesServerVersion(client *kubernetesClient.Clientset) string {
|
||||
kubeVersion, err := client.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Kubernetes server version: %s", err)
|
||||
return "Unknown"
|
||||
}
|
||||
return kubeVersion.String()
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package configfile
|
|||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -47,7 +46,6 @@ type ConfigFile struct {
|
|||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||
Experimental string `json:"experimental,omitempty"`
|
||||
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
||||
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
||||
CurrentContext string `json:"currentContext,omitempty"`
|
||||
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||
|
@ -63,11 +61,6 @@ type ProxyConfig struct {
|
|||
AllProxy string `json:"allProxy,omitempty"`
|
||||
}
|
||||
|
||||
// KubernetesConfig contains Kubernetes orchestrator settings
|
||||
type KubernetesConfig struct {
|
||||
AllNamespaces string `json:"allNamespaces,omitempty"`
|
||||
}
|
||||
|
||||
// New initializes an empty configuration file for the given filename 'fn'
|
||||
func New(fn string) *ConfigFile {
|
||||
return &ConfigFile{
|
||||
|
@ -135,7 +128,7 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
|||
ac.ServerAddress = addr
|
||||
configFile.AuthConfigs[addr] = ac
|
||||
}
|
||||
return checkKubernetesConfiguration(configFile.Kubernetes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContainsAuth returns whether there is authentication configured
|
||||
|
@ -401,17 +394,3 @@ func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string)
|
|||
delete(configFile.Plugins, pluginname)
|
||||
}
|
||||
}
|
||||
|
||||
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
||||
if kubeConfig == nil {
|
||||
return nil
|
||||
}
|
||||
switch kubeConfig.AllNamespaces {
|
||||
case "":
|
||||
case "enabled":
|
||||
case "disabled":
|
||||
default:
|
||||
return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -438,46 +438,6 @@ func TestLoadFromReaderWithUsernamePassword(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCheckKubernetesConfigurationRaiseAnErrorOnInvalidValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
config *KubernetesConfig
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "no kubernetes config is valid",
|
||||
},
|
||||
{
|
||||
name: "enabled is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: "enabled"},
|
||||
},
|
||||
{
|
||||
name: "disabled is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: "disabled"},
|
||||
},
|
||||
{
|
||||
name: "empty string is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: ""},
|
||||
},
|
||||
{
|
||||
name: "other value is invalid",
|
||||
config: &KubernetesConfig{AllNamespaces: "unknown"},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
test := tc
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := checkKubernetesConfiguration(test.config)
|
||||
if test.expectError {
|
||||
assert.Assert(t, err != nil, test.name)
|
||||
} else {
|
||||
assert.NilError(t, err, test.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSave(t *testing.T) {
|
||||
configFile := New("test-save")
|
||||
defer os.Remove("test-save")
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
const (
|
||||
// KubernetesEndpoint is the kubernetes endpoint name in a stored context
|
||||
KubernetesEndpoint = "kubernetes"
|
||||
)
|
|
@ -1,224 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"gotest.tools/v3/assert"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLSVerify bool) Endpoint {
|
||||
var tlsData *context.TLSData
|
||||
if ca != nil || cert != nil || key != nil {
|
||||
tlsData = &context.TLSData{
|
||||
CA: ca,
|
||||
Cert: cert,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: EndpointMeta{
|
||||
EndpointMetaBase: context.EndpointMetaBase{
|
||||
Host: server,
|
||||
SkipTLSVerify: skipTLSVerify,
|
||||
},
|
||||
DefaultNamespace: defaultNamespace,
|
||||
},
|
||||
TLSData: tlsData,
|
||||
}
|
||||
}
|
||||
|
||||
var testStoreCfg = store.NewConfig(
|
||||
func() interface{} {
|
||||
return &map[string]interface{}{}
|
||||
},
|
||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
||||
)
|
||||
|
||||
func TestSaveLoadContexts(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", "test-load-save-k8-context")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
assert.NilError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, false), "raw-notls"))
|
||||
assert.NilError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, true), "raw-notls-skip"))
|
||||
assert.NilError(t, save(store, testEndpoint("https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true), "raw-tls"))
|
||||
|
||||
kcFile, err := ioutil.TempFile(os.TempDir(), "test-load-save-k8-context")
|
||||
assert.NilError(t, err)
|
||||
defer os.Remove(kcFile.Name())
|
||||
defer kcFile.Close()
|
||||
cfg := clientcmdapi.NewConfig()
|
||||
cfg.AuthInfos["user"] = clientcmdapi.NewAuthInfo()
|
||||
cfg.Contexts["context1"] = clientcmdapi.NewContext()
|
||||
cfg.Clusters["cluster1"] = clientcmdapi.NewCluster()
|
||||
cfg.Contexts["context2"] = clientcmdapi.NewContext()
|
||||
cfg.Clusters["cluster2"] = clientcmdapi.NewCluster()
|
||||
cfg.AuthInfos["user"].ClientCertificateData = []byte("cert")
|
||||
cfg.AuthInfos["user"].ClientKeyData = []byte("key")
|
||||
cfg.Clusters["cluster1"].Server = "https://server1"
|
||||
cfg.Clusters["cluster1"].InsecureSkipTLSVerify = true
|
||||
cfg.Clusters["cluster2"].Server = "https://server2"
|
||||
cfg.Clusters["cluster2"].CertificateAuthorityData = []byte("ca")
|
||||
cfg.Contexts["context1"].AuthInfo = "user"
|
||||
cfg.Contexts["context1"].Cluster = "cluster1"
|
||||
cfg.Contexts["context1"].Namespace = "namespace1"
|
||||
cfg.Contexts["context2"].AuthInfo = "user"
|
||||
cfg.Contexts["context2"].Cluster = "cluster2"
|
||||
cfg.Contexts["context2"].Namespace = "namespace2"
|
||||
cfg.CurrentContext = "context1"
|
||||
cfgData, err := clientcmd.Write(*cfg)
|
||||
assert.NilError(t, err)
|
||||
_, err = kcFile.Write(cfgData)
|
||||
assert.NilError(t, err)
|
||||
kcFile.Close()
|
||||
|
||||
epDefault, err := FromKubeConfig(kcFile.Name(), "", "")
|
||||
assert.NilError(t, err)
|
||||
epContext2, err := FromKubeConfig(kcFile.Name(), "context2", "namespace-override")
|
||||
assert.NilError(t, err)
|
||||
assert.NilError(t, save(store, epDefault, "embed-default-context"))
|
||||
assert.NilError(t, save(store, epContext2, "embed-context2"))
|
||||
|
||||
rawNoTLSMeta, err := store.GetMetadata("raw-notls")
|
||||
assert.NilError(t, err)
|
||||
rawNoTLSSkipMeta, err := store.GetMetadata("raw-notls-skip")
|
||||
assert.NilError(t, err)
|
||||
rawTLSMeta, err := store.GetMetadata("raw-tls")
|
||||
assert.NilError(t, err)
|
||||
embededDefaultMeta, err := store.GetMetadata("embed-default-context")
|
||||
assert.NilError(t, err)
|
||||
embededContext2Meta, err := store.GetMetadata("embed-context2")
|
||||
assert.NilError(t, err)
|
||||
|
||||
rawNoTLS := EndpointFromContext(rawNoTLSMeta)
|
||||
rawNoTLSSkip := EndpointFromContext(rawNoTLSSkipMeta)
|
||||
rawTLS := EndpointFromContext(rawTLSMeta)
|
||||
embededDefault := EndpointFromContext(embededDefaultMeta)
|
||||
embededContext2 := EndpointFromContext(embededContext2Meta)
|
||||
|
||||
rawNoTLSEP, err := rawNoTLS.WithTLSData(store, "raw-notls")
|
||||
assert.NilError(t, err)
|
||||
checkClientConfig(t, rawNoTLSEP, "https://test", "test", nil, nil, nil, false)
|
||||
rawNoTLSSkipEP, err := rawNoTLSSkip.WithTLSData(store, "raw-notls-skip")
|
||||
assert.NilError(t, err)
|
||||
checkClientConfig(t, rawNoTLSSkipEP, "https://test", "test", nil, nil, nil, true)
|
||||
rawTLSEP, err := rawTLS.WithTLSData(store, "raw-tls")
|
||||
assert.NilError(t, err)
|
||||
checkClientConfig(t, rawTLSEP, "https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true)
|
||||
embededDefaultEP, err := embededDefault.WithTLSData(store, "embed-default-context")
|
||||
assert.NilError(t, err)
|
||||
checkClientConfig(t, embededDefaultEP, "https://server1", "namespace1", nil, []byte("cert"), []byte("key"), true)
|
||||
embededContext2EP, err := embededContext2.WithTLSData(store, "embed-context2")
|
||||
assert.NilError(t, err)
|
||||
checkClientConfig(t, embededContext2EP, "https://server2", "namespace-override", []byte("ca"), []byte("cert"), []byte("key"), false)
|
||||
}
|
||||
|
||||
func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca, cert, key []byte, skipTLSVerify bool) {
|
||||
config := ep.KubernetesConfig()
|
||||
cfg, err := config.ClientConfig()
|
||||
assert.NilError(t, err)
|
||||
ns, _, _ := config.Namespace()
|
||||
assert.Equal(t, server, cfg.Host)
|
||||
assert.Equal(t, namespace, ns)
|
||||
assert.DeepEqual(t, ca, cfg.CAData)
|
||||
assert.DeepEqual(t, cert, cfg.CertData)
|
||||
assert.DeepEqual(t, key, cfg.KeyData)
|
||||
assert.Equal(t, skipTLSVerify, cfg.Insecure)
|
||||
}
|
||||
|
||||
func save(s store.Writer, ep Endpoint, name string) error {
|
||||
meta := store.Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
KubernetesEndpoint: ep.EndpointMeta,
|
||||
},
|
||||
Name: name,
|
||||
}
|
||||
if err := s.CreateOrUpdate(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.ResetEndpointTLSMaterial(name, KubernetesEndpoint, ep.TLSData.ToStoreTLSData())
|
||||
}
|
||||
|
||||
func TestSaveLoadGKEConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("testdata/gke-kubeconfig")
|
||||
assert.NilError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
assert.NilError(t, err)
|
||||
ep, err := FromKubeConfig("testdata/gke-kubeconfig", "", "")
|
||||
assert.NilError(t, err)
|
||||
assert.NilError(t, save(store, ep, "gke-context"))
|
||||
persistedMetadata, err := store.GetMetadata("gke-context")
|
||||
assert.NilError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.Check(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "gke-context")
|
||||
assert.NilError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, expectedCfg.AuthProvider, actualCfg.AuthProvider)
|
||||
}
|
||||
|
||||
func TestSaveLoadEKSConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("testdata/eks-kubeconfig")
|
||||
assert.NilError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
assert.NilError(t, err)
|
||||
ep, err := FromKubeConfig("testdata/eks-kubeconfig", "", "")
|
||||
assert.NilError(t, err)
|
||||
assert.NilError(t, save(store, ep, "eks-context"))
|
||||
persistedMetadata, err := store.GetMetadata("eks-context")
|
||||
assert.NilError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.Check(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "eks-context")
|
||||
assert.NilError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, expectedCfg.ExecProvider, actualCfg.ExecProvider)
|
||||
}
|
||||
|
||||
func TestSaveLoadK3SConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("testdata/k3s-kubeconfig")
|
||||
assert.NilError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
assert.NilError(t, err)
|
||||
ep, err := FromKubeConfig("testdata/k3s-kubeconfig", "", "")
|
||||
assert.NilError(t, err)
|
||||
assert.NilError(t, save(store, ep, "k3s-context"))
|
||||
persistedMetadata, err := store.GetMetadata("k3s-context")
|
||||
assert.NilError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.Check(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "k3s-context")
|
||||
assert.NilError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, len(actualCfg.Username) > 0)
|
||||
assert.Check(t, len(actualCfg.Password) > 0)
|
||||
assert.Equal(t, expectedCfg.Username, actualCfg.Username)
|
||||
assert.Equal(t, expectedCfg.Password, actualCfg.Password)
|
||||
}
|
|
@ -1,146 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
api "github.com/docker/compose-on-kubernetes/api"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
// EndpointMeta is a typed wrapper around a context-store generic endpoint describing
|
||||
// a Kubernetes endpoint, without TLS data
|
||||
type EndpointMeta struct {
|
||||
context.EndpointMetaBase
|
||||
DefaultNamespace string `json:",omitempty"`
|
||||
AuthProvider *clientcmdapi.AuthProviderConfig `json:",omitempty"`
|
||||
Exec *clientcmdapi.ExecConfig `json:",omitempty"`
|
||||
UsernamePassword *UsernamePassword `json:"usernamePassword,omitempty"`
|
||||
}
|
||||
|
||||
// UsernamePassword contains username/password auth info
|
||||
type UsernamePassword struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
var _ command.EndpointDefaultResolver = &EndpointMeta{}
|
||||
|
||||
// Endpoint is a typed wrapper around a context-store generic endpoint describing
|
||||
// a Kubernetes endpoint, with TLS data
|
||||
type Endpoint struct {
|
||||
EndpointMeta
|
||||
TLSData *context.TLSData
|
||||
}
|
||||
|
||||
func init() {
|
||||
command.RegisterDefaultStoreEndpoints(
|
||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
||||
)
|
||||
}
|
||||
|
||||
// WithTLSData loads TLS materials for the endpoint
|
||||
func (c *EndpointMeta) WithTLSData(s store.Reader, contextName string) (Endpoint, error) {
|
||||
tlsData, err := context.LoadTLSData(s, contextName, KubernetesEndpoint)
|
||||
if err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: *c,
|
||||
TLSData: tlsData,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// KubernetesConfig creates the kubernetes client config from the endpoint
|
||||
func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
|
||||
cfg := clientcmdapi.NewConfig()
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = c.Host
|
||||
cluster.InsecureSkipTLSVerify = c.SkipTLSVerify
|
||||
authInfo := clientcmdapi.NewAuthInfo()
|
||||
if c.TLSData != nil {
|
||||
cluster.CertificateAuthorityData = c.TLSData.CA
|
||||
authInfo.ClientCertificateData = c.TLSData.Cert
|
||||
authInfo.ClientKeyData = c.TLSData.Key
|
||||
}
|
||||
if c.UsernamePassword != nil {
|
||||
authInfo.Username = c.UsernamePassword.Username
|
||||
authInfo.Password = c.UsernamePassword.Password
|
||||
}
|
||||
authInfo.AuthProvider = c.AuthProvider
|
||||
authInfo.Exec = c.Exec
|
||||
cfg.Clusters["cluster"] = cluster
|
||||
cfg.AuthInfos["authInfo"] = authInfo
|
||||
ctx := clientcmdapi.NewContext()
|
||||
ctx.AuthInfo = "authInfo"
|
||||
ctx.Cluster = "cluster"
|
||||
ctx.Namespace = c.DefaultNamespace
|
||||
cfg.Contexts["context"] = ctx
|
||||
cfg.CurrentContext = "context"
|
||||
return clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
}
|
||||
|
||||
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
||||
// endpoint, which is derived from the env-based kubeconfig.
|
||||
func (c *EndpointMeta) ResolveDefault(stackOrchestrator command.Orchestrator) (interface{}, *store.EndpointTLSData, error) {
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
kubeEP, err := FromKubeConfig(kubeconfig, "", "")
|
||||
if err != nil {
|
||||
if stackOrchestrator == command.OrchestratorKubernetes || stackOrchestrator == command.OrchestratorAll {
|
||||
return nil, nil, errors.Wrapf(err, "default orchestrator is %s but unable to resolve kubernetes endpoint", stackOrchestrator)
|
||||
}
|
||||
|
||||
// We deliberately quash the error here, returning nil
|
||||
// for the first argument is sufficient to indicate we weren't able to
|
||||
// provide a default
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var tls *store.EndpointTLSData
|
||||
if kubeEP.TLSData != nil {
|
||||
tls = kubeEP.TLSData.ToStoreTLSData()
|
||||
}
|
||||
return kubeEP.EndpointMeta, tls, nil
|
||||
}
|
||||
|
||||
// EndpointFromContext extracts kubernetes endpoint info from current context
|
||||
func EndpointFromContext(metadata store.Metadata) *EndpointMeta {
|
||||
ep, ok := metadata.Endpoints[KubernetesEndpoint]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
typed, ok := ep.(EndpointMeta)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return &typed
|
||||
}
|
||||
|
||||
// ConfigFromContext resolves a kubernetes client config for the specified context.
|
||||
// If kubeconfigOverride is specified, use this config file instead of the context defaults.ConfigFromContext
|
||||
// if command.ContextDockerHost is specified as the context name, fallsback to the default user's kubeconfig file
|
||||
func ConfigFromContext(name string, s store.Reader) (clientcmd.ClientConfig, error) {
|
||||
ctxMeta, err := s.GetMetadata(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epMeta := EndpointFromContext(ctxMeta)
|
||||
if epMeta != nil {
|
||||
ep, err := epMeta.WithTLSData(s, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ep.KubernetesConfig(), nil
|
||||
}
|
||||
// context has no kubernetes endpoint
|
||||
return api.NewKubernetesConfig(""), nil
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/env"
|
||||
)
|
||||
|
||||
func TestDefaultContextInitializer(t *testing.T) {
|
||||
cli, err := command.NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
defer env.Patch(t, "KUBECONFIG", "./testdata/test-kubeconfig")()
|
||||
configFile := &configfile.ConfigFile{
|
||||
StackOrchestrator: "all",
|
||||
}
|
||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, configFile, command.DefaultContextStoreConfig(), cli.Err())
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, command.OrchestratorAll, ctx.Meta.Metadata.(command.DockerContext).StackOrchestrator)
|
||||
assert.DeepEqual(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/cli/cli/context"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
// FromKubeConfig creates a Kubernetes endpoint from a Kubeconfig file
|
||||
func FromKubeConfig(kubeconfig, kubeContext, namespaceOverride string) (Endpoint, error) {
|
||||
cfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
|
||||
&clientcmd.ConfigOverrides{CurrentContext: kubeContext, Context: clientcmdapi.Context{Namespace: namespaceOverride}})
|
||||
ns, _, err := cfg.Namespace()
|
||||
if err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
clientcfg, err := cfg.ClientConfig()
|
||||
if err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
var ca, key, cert []byte
|
||||
if ca, err = readFileOrDefault(clientcfg.CAFile, clientcfg.CAData); err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
if key, err = readFileOrDefault(clientcfg.KeyFile, clientcfg.KeyData); err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
if cert, err = readFileOrDefault(clientcfg.CertFile, clientcfg.CertData); err != nil {
|
||||
return Endpoint{}, err
|
||||
}
|
||||
var tlsData *context.TLSData
|
||||
if ca != nil || cert != nil || key != nil {
|
||||
tlsData = &context.TLSData{
|
||||
CA: ca,
|
||||
Cert: cert,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
var usernamePassword *UsernamePassword
|
||||
if clientcfg.Username != "" || clientcfg.Password != "" {
|
||||
usernamePassword = &UsernamePassword{
|
||||
Username: clientcfg.Username,
|
||||
Password: clientcfg.Password,
|
||||
}
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: EndpointMeta{
|
||||
EndpointMetaBase: context.EndpointMetaBase{
|
||||
Host: clientcfg.Host,
|
||||
SkipTLSVerify: clientcfg.Insecure,
|
||||
},
|
||||
DefaultNamespace: ns,
|
||||
AuthProvider: clientcfg.AuthProvider,
|
||||
Exec: clientcfg.ExecProvider,
|
||||
UsernamePassword: usernamePassword,
|
||||
},
|
||||
TLSData: tlsData,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func readFileOrDefault(path string, defaultValue []byte) ([]byte, error) {
|
||||
if path != "" {
|
||||
return ioutil.ReadFile(path)
|
||||
}
|
||||
return defaultValue, nil
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://some-server
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: aws
|
||||
name: aws
|
||||
current-context: aws
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: aws
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1alpha1
|
||||
command: heptio-authenticator-aws
|
||||
args:
|
||||
- "token"
|
||||
- "-i"
|
||||
- "eks-cf"
|
|
@ -1,23 +0,0 @@
|
|||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://some-server
|
||||
name: gke_sample
|
||||
contexts:
|
||||
- context:
|
||||
cluster: gke_sample
|
||||
user: gke_sample
|
||||
name: gke_sample
|
||||
current-context: gke_sample
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: gke_sample
|
||||
user:
|
||||
auth-provider:
|
||||
config:
|
||||
cmd-args: config config-helper --format=json
|
||||
cmd-path: /google/google-cloud-sdk/bin/gcloud
|
||||
expiry-key: '{.credential.token_expiry}'
|
||||
token-key: '{.credential.access_token}'
|
||||
name: gcp
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
username: admin
|
||||
password: testpwd
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
client-certificate-data: dGhlLWNlcnQ=
|
||||
client-key-data: dGhlLWtleQ==
|
|
@ -12,7 +12,7 @@
|
|||
// - tls/
|
||||
// - <context id>/endpoint1/: directory containing TLS data for the endpoint1 in the corresponding context
|
||||
//
|
||||
// The context store itself has absolutely no knowledge about what a docker or a kubernetes endpoint should contain in term of metadata or TLS config.
|
||||
// The context store itself has absolutely no knowledge about what a docker endpoint should contain in term of metadata or TLS config.
|
||||
// Client code is responsible for generating and parsing endpoint metadata and TLS files.
|
||||
// The multi-endpoints approach of this package allows to combine many different endpoints in the same "context" (e.g., the Docker CLI
|
||||
// is able for a single context to define both a docker endpoint and a Kubernetes endpoint for the same cluster, and also specify which
|
||||
|
|
|
@ -38,7 +38,6 @@ func TestValidFilePaths(t *testing.T) {
|
|||
"/tls/absolute/unix/path": false,
|
||||
`C:\tls\absolute\windows\path`: false,
|
||||
"C:/tls/absolute/windows/path": false,
|
||||
"tls/kubernetes/key.pem": true,
|
||||
}
|
||||
for p, expectedValid := range paths {
|
||||
err := isValidFilePath(p)
|
||||
|
|
|
@ -44,7 +44,7 @@ func (s *tlsStore) getData(contextID contextdir, endpointName, filename string)
|
|||
return data, nil
|
||||
}
|
||||
|
||||
func (s *tlsStore) remove(contextID contextdir, endpointName, filename string) error {
|
||||
func (s *tlsStore) remove(contextID contextdir, endpointName, filename string) error { // nolint:unused
|
||||
err := os.Remove(s.filePath(contextID, endpointName, filename))
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
|
|
|
@ -17,6 +17,4 @@ ignore:
|
|||
- "**/internal/test"
|
||||
- "vendor/*"
|
||||
- "cli/compose/schema/bindata.go"
|
||||
- "cli/command/stack/kubernetes/api/openapi"
|
||||
- "cli/command/stack/kubernetes/api/client"
|
||||
- ".*generated.*"
|
|
@ -53,7 +53,7 @@ Status | Feature
|
|||
Deprecated | [Legacy builder for Linux images](#legacy-builder-for-linux-images) | v21.xx | -
|
||||
Deprecated | [Legacy builder fallback](#legacy-builder-fallback) | v21.xx | -
|
||||
Removed | [Support for encrypted TLS private keys](#support-for-encrypted-tls-private-keys) | v20.10 | v21.xx
|
||||
Deprecated | [Kubernetes stack and context support](#kubernetes-stack-and-context-support) | v20.10 | -
|
||||
Removed | [Kubernetes stack and context support](#kubernetes-stack-and-context-support) | v20.10 | v21.xx
|
||||
Deprecated | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | -
|
||||
Removed | [Linux containers on Windows (LCOW)](#linux-containers-on-windows-lcow-experimental) | v20.10 | v21.xx
|
||||
Deprecated | [BLKIO weight options with cgroups v1](#blkio-weight-options-with-cgroups-v1) | v20.10 | -
|
||||
|
|
|
@ -28,8 +28,8 @@ indicated with an `*`:
|
|||
```console
|
||||
$ docker context ls
|
||||
|
||||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
NAME DESCRIPTION DOCKER ENDPOINT ORCHESTRATOR
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
production tcp:///prod.corp.example.com:2376
|
||||
staging tcp:///stage.corp.example.com:2376
|
||||
```
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
remote my remote cluster ssh://someserver https://someserver (default) kubernetes
|
||||
NAME DESCRIPTION DOCKER ENDPOINT
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock
|
||||
remote my remote cluster ssh://someserver
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
test unix:///var/run/docker.sock https://kubernetes.docker.internal:6443 (default) swarm
|
||||
NAME DESCRIPTION DOCKER ENDPOINT
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock
|
||||
test unix:///var/run/docker.sock
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock https://someserver (zoinx) swarm
|
||||
remote my remote cluster ssh://someserver https://someserver (default) kubernetes
|
||||
NAME DESCRIPTION DOCKER ENDPOINT
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock
|
||||
remote my remote cluster ssh://someserver
|
||||
|
|
|
@ -1,43 +1,26 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test/environment"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/golden"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func TestDeployWithNamedResources(t *testing.T) {
|
||||
t.Run("Swarm", func(t *testing.T) {
|
||||
testDeployWithNamedResources(t, "swarm")
|
||||
})
|
||||
t.Run("Kubernetes", func(t *testing.T) {
|
||||
// FIXME(chris-crone): currently does not work with compose for kubernetes.
|
||||
t.Skip("FIXME(chris-crone): currently does not work with compose for kubernetes.")
|
||||
skip.If(t, !environment.KubernetesEnabled())
|
||||
|
||||
testDeployWithNamedResources(t, "kubernetes")
|
||||
})
|
||||
}
|
||||
|
||||
func testDeployWithNamedResources(t *testing.T, orchestrator string) {
|
||||
stackname := fmt.Sprintf("test-stack-deploy-with-names-%s", orchestrator)
|
||||
stackname := "test-stack-deploy-with-names"
|
||||
composefile := golden.Path("stack-with-named-resources.yml")
|
||||
|
||||
result := icmd.RunCommand("docker", "stack", "deploy",
|
||||
"-c", composefile, stackname, "--orchestrator", orchestrator)
|
||||
defer icmd.RunCommand("docker", "stack", "rm",
|
||||
"--orchestrator", orchestrator, stackname)
|
||||
"-c", composefile, stackname)
|
||||
defer icmd.RunCommand("docker", "stack", "rm", stackname)
|
||||
|
||||
result.Assert(t, icmd.Success)
|
||||
stdout := strings.Split(result.Stdout(), "\n")
|
||||
expected := strings.Split(string(golden.Get(t, fmt.Sprintf("stack-deploy-with-names-%s.golden", orchestrator))), "\n")
|
||||
expected := strings.Split(string(golden.Get(t, "stack-deploy-with-names.golden")), "\n")
|
||||
sort.Strings(stdout)
|
||||
sort.Strings(expected)
|
||||
assert.DeepEqual(t, stdout, expected)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/golden"
|
||||
|
@ -9,16 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestStackDeployHelp(t *testing.T) {
|
||||
t.Run("Swarm", func(t *testing.T) {
|
||||
testStackDeployHelp(t, "swarm")
|
||||
})
|
||||
t.Run("Kubernetes", func(t *testing.T) {
|
||||
testStackDeployHelp(t, "kubernetes")
|
||||
})
|
||||
}
|
||||
|
||||
func testStackDeployHelp(t *testing.T, orchestrator string) {
|
||||
result := icmd.RunCommand("docker", "stack", "deploy", "--orchestrator", orchestrator, "--help")
|
||||
result := icmd.RunCommand("docker", "stack", "deploy", "--help")
|
||||
result.Assert(t, icmd.Success)
|
||||
golden.Assert(t, result.Stdout(), fmt.Sprintf("stack-deploy-help-%s.golden", orchestrator))
|
||||
golden.Assert(t, result.Stdout(), "stack-deploy-help.golden")
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package stack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -9,51 +8,37 @@ import (
|
|||
"gotest.tools/v3/golden"
|
||||
"gotest.tools/v3/icmd"
|
||||
"gotest.tools/v3/poll"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
var pollSettings = environment.DefaultPollSettings
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
t.Run("Swarm", func(t *testing.T) {
|
||||
testRemove(t, "swarm")
|
||||
})
|
||||
t.Run("Kubernetes", func(t *testing.T) {
|
||||
skip.If(t, !environment.KubernetesEnabled())
|
||||
|
||||
testRemove(t, "kubernetes")
|
||||
})
|
||||
}
|
||||
|
||||
func testRemove(t *testing.T, orchestrator string) {
|
||||
stackname := "test-stack-remove-" + orchestrator
|
||||
deployFullStack(t, orchestrator, stackname)
|
||||
defer cleanupFullStack(t, orchestrator, stackname)
|
||||
result := icmd.RunCommand("docker", "stack", "rm",
|
||||
stackname, "--orchestrator", orchestrator)
|
||||
stackname := "test-stack-remove"
|
||||
deployFullStack(t, stackname)
|
||||
defer cleanupFullStack(t, stackname)
|
||||
result := icmd.RunCommand("docker", "stack", "rm", stackname)
|
||||
result.Assert(t, icmd.Expected{Err: icmd.None})
|
||||
golden.Assert(t, result.Stdout(),
|
||||
fmt.Sprintf("stack-remove-%s-success.golden", orchestrator))
|
||||
golden.Assert(t, result.Stdout(), "stack-remove-success.golden")
|
||||
}
|
||||
|
||||
func deployFullStack(t *testing.T, orchestrator, stackname string) {
|
||||
func deployFullStack(t *testing.T, stackname string) {
|
||||
// TODO: this stack should have full options not minimal options
|
||||
result := icmd.RunCommand("docker", "stack", "deploy",
|
||||
"--compose-file=./testdata/full-stack.yml", stackname, "--orchestrator", orchestrator)
|
||||
"--compose-file=./testdata/full-stack.yml", stackname)
|
||||
result.Assert(t, icmd.Success)
|
||||
|
||||
poll.WaitOn(t, taskCount(orchestrator, stackname, 2), pollSettings)
|
||||
poll.WaitOn(t, taskCount(stackname, 2), pollSettings)
|
||||
}
|
||||
|
||||
func cleanupFullStack(t *testing.T, orchestrator, stackname string) {
|
||||
func cleanupFullStack(t *testing.T, stackname string) {
|
||||
// FIXME(vdemeester) we shouldn't have to do that. it is hiding a race on docker stack rm
|
||||
poll.WaitOn(t, stackRm(orchestrator, stackname), pollSettings)
|
||||
poll.WaitOn(t, taskCount(orchestrator, stackname, 0), pollSettings)
|
||||
poll.WaitOn(t, stackRm(stackname), pollSettings)
|
||||
poll.WaitOn(t, taskCount(stackname, 0), pollSettings)
|
||||
}
|
||||
|
||||
func stackRm(orchestrator, stackname string) func(t poll.LogT) poll.Result {
|
||||
func stackRm(stackname string) func(t poll.LogT) poll.Result {
|
||||
return func(poll.LogT) poll.Result {
|
||||
result := icmd.RunCommand("docker", "stack", "rm", stackname, "--orchestrator", orchestrator)
|
||||
result := icmd.RunCommand("docker", "stack", "rm", stackname)
|
||||
if result.Error != nil {
|
||||
if strings.Contains(result.Stderr(), "not found") {
|
||||
return poll.Success()
|
||||
|
@ -64,14 +49,9 @@ func stackRm(orchestrator, stackname string) func(t poll.LogT) poll.Result {
|
|||
}
|
||||
}
|
||||
|
||||
func taskCount(orchestrator, stackname string, expected int) func(t poll.LogT) poll.Result {
|
||||
func taskCount(stackname string, expected int) func(t poll.LogT) poll.Result {
|
||||
return func(poll.LogT) poll.Result {
|
||||
args := []string{"stack", "ps", stackname, "--orchestrator", orchestrator}
|
||||
// FIXME(chris-crone): remove when we support filtering by desired-state on kubernetes
|
||||
if orchestrator == "swarm" {
|
||||
args = append(args, "-f=desired-state=running")
|
||||
}
|
||||
result := icmd.RunCommand("docker", args...)
|
||||
result := icmd.RunCommand("docker", "stack", "ps", stackname, "-f=desired-state=running")
|
||||
count := lines(result.Stdout()) - 1
|
||||
if count == expected {
|
||||
return poll.Success()
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
|
||||
Usage: docker stack deploy [OPTIONS] STACK
|
||||
|
||||
Deploy a new stack or update an existing stack
|
||||
|
||||
Aliases:
|
||||
deploy, up
|
||||
|
||||
Options:
|
||||
-c, --compose-file strings Path to a Compose file, or "-" to read
|
||||
from stdin
|
||||
--kubeconfig string Kubernetes config file
|
||||
--namespace string Kubernetes namespace to use
|
||||
--orchestrator string Orchestrator to use (swarm|kubernetes|all)
|
|
@ -9,7 +9,6 @@ Aliases:
|
|||
Options:
|
||||
-c, --compose-file strings Path to a Compose file, or "-" to read
|
||||
from stdin
|
||||
--orchestrator string Orchestrator to use (swarm|kubernetes|all)
|
||||
--prune Prune services that are no longer referenced
|
||||
--resolve-image string Query the registry to resolve image digest
|
||||
and supported platforms
|
|
@ -1,7 +0,0 @@
|
|||
Creating network test-stack-deploy-with-names_network2
|
||||
Creating network named-network
|
||||
Creating secret named-secret
|
||||
Creating secret test-stack-deploy-with-names_secret2
|
||||
Creating config test-stack-deploy-with-names_config2
|
||||
Creating config named-config
|
||||
Creating service test-stack-deploy-with-names_web
|
|
@ -1,7 +0,0 @@
|
|||
Creating network test-stack-deploy-with-names-swarm_network2
|
||||
Creating network named-network
|
||||
Creating secret named-secret
|
||||
Creating secret test-stack-deploy-with-names-swarm_secret2
|
||||
Creating config test-stack-deploy-with-names-swarm_config2
|
||||
Creating config named-config
|
||||
Creating service test-stack-deploy-with-names-swarm_web
|
|
@ -1 +0,0 @@
|
|||
Removing stack: test-stack-remove-kubernetes
|
|
@ -0,0 +1,3 @@
|
|||
Removing service test-stack-remove_one
|
||||
Removing service test-stack-remove_two
|
||||
Removing network test-stack-remove_default
|
|
@ -1,3 +0,0 @@
|
|||
Removing service test-stack-remove-swarm_one
|
||||
Removing service test-stack-remove-swarm_two
|
||||
Removing network test-stack-remove-swarm_default
|
|
@ -55,7 +55,8 @@ func NewFakeCli(client client.APIClient, opts ...func(*FakeCli)) *FakeCli {
|
|||
in: streams.NewIn(ioutil.NopCloser(strings.NewReader(""))),
|
||||
// Use an empty string for filename so that tests don't create configfiles
|
||||
// Set cli.ConfigFile().Filename to a tempfile to support Save.
|
||||
configfile: configfile.New(""),
|
||||
configfile: configfile.New(""),
|
||||
currentContext: command.DefaultContextName,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
|
@ -214,24 +215,3 @@ func (c *FakeCli) ContentTrustEnabled() bool {
|
|||
func EnableContentTrust(c *FakeCli) {
|
||||
c.contentTrust = true
|
||||
}
|
||||
|
||||
// StackOrchestrator return the selected stack orchestrator
|
||||
func (c *FakeCli) StackOrchestrator(flagValue string) (command.Orchestrator, error) {
|
||||
configOrchestrator := ""
|
||||
if c.configfile != nil {
|
||||
configOrchestrator = c.configfile.StackOrchestrator
|
||||
}
|
||||
ctxOrchestrator := ""
|
||||
if c.currentContext != "" && c.contextStore != nil {
|
||||
meta, err := c.contextStore.GetMetadata(c.currentContext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
context, err := command.GetDockerContext(meta)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ctxOrchestrator = string(context.StackOrchestrator)
|
||||
}
|
||||
return command.GetStackOrchestrator(flagValue, ctxOrchestrator, configOrchestrator, c.err)
|
||||
}
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
# Kubernetes client libraries
|
||||
|
||||
This package (and sub-packages) holds the client libraries for the kubernetes integration in
|
||||
the docker platform. Most of the code is currently generated.
|
|
@ -1,60 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
apiv1alpha3 "github.com/docker/compose-on-kubernetes/api/compose/v1alpha3"
|
||||
apiv1beta1 "github.com/docker/compose-on-kubernetes/api/compose/v1beta1"
|
||||
apiv1beta2 "github.com/docker/compose-on-kubernetes/api/compose/v1beta2"
|
||||
"github.com/pkg/errors"
|
||||
apimachinerymetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/discovery"
|
||||
)
|
||||
|
||||
// StackVersion represents the detected Compose Component on Kubernetes side.
|
||||
type StackVersion string
|
||||
|
||||
const (
|
||||
// StackAPIV1Beta1 is returned if it's the most recent version available.
|
||||
StackAPIV1Beta1 = StackVersion("v1beta1")
|
||||
// StackAPIV1Beta2 is returned if it's the most recent version available.
|
||||
StackAPIV1Beta2 = StackVersion("v1beta2")
|
||||
// StackAPIV1Alpha3 is returned if it's the most recent version available, and experimental flag is on.
|
||||
StackAPIV1Alpha3 = StackVersion("v1alpha3")
|
||||
)
|
||||
|
||||
// GetStackAPIVersion returns the most appropriate stack API version installed.
|
||||
func GetStackAPIVersion(serverGroups discovery.ServerGroupsInterface) (StackVersion, error) {
|
||||
groups, err := serverGroups.ServerGroups()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return getAPIVersion(groups)
|
||||
}
|
||||
|
||||
func getAPIVersion(groups *metav1.APIGroupList) (StackVersion, error) {
|
||||
switch {
|
||||
case findVersion(apiv1alpha3.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Alpha3, nil
|
||||
case findVersion(apiv1beta2.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Beta2, nil
|
||||
case findVersion(apiv1beta1.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Beta1, nil
|
||||
default:
|
||||
return "", errors.New("failed to find a Stack API version")
|
||||
}
|
||||
}
|
||||
|
||||
func findVersion(stackAPI schema.GroupVersion, groups []apimachinerymetav1.APIGroup) bool {
|
||||
for _, group := range groups {
|
||||
if group.Name == stackAPI.Group {
|
||||
for _, version := range group.Versions {
|
||||
if version.Version == stackAPI.Version {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestGetStackAPIVersion(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
groups *metav1.APIGroupList
|
||||
err bool
|
||||
expectedStack StackVersion
|
||||
}{
|
||||
{"no stack api", makeGroups(), true, ""},
|
||||
{"v1beta1", makeGroups(groupVersion{"compose.docker.com", []string{"v1beta1"}}), false, StackAPIV1Beta1},
|
||||
{"v1beta2", makeGroups(groupVersion{"compose.docker.com", []string{"v1beta2"}}), false, StackAPIV1Beta2},
|
||||
{"most recent has precedence", makeGroups(groupVersion{"compose.docker.com", []string{"v1beta1", "v1beta2"}}), false, StackAPIV1Beta2},
|
||||
{"most recent has precedence", makeGroups(groupVersion{"compose.docker.com", []string{"v1beta1", "v1beta2", "v1alpha3"}}), false, StackAPIV1Alpha3},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
version, err := getAPIVersion(test.groups)
|
||||
if test.err {
|
||||
assert.ErrorContains(t, err, "")
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
assert.Check(t, is.Equal(test.expectedStack, version))
|
||||
}
|
||||
}
|
||||
|
||||
type groupVersion struct {
|
||||
name string
|
||||
versions []string
|
||||
}
|
||||
|
||||
func makeGroups(versions ...groupVersion) *metav1.APIGroupList {
|
||||
groups := make([]metav1.APIGroup, len(versions))
|
||||
for i := range versions {
|
||||
groups[i].Name = versions[i].name
|
||||
for _, v := range versions[i].versions {
|
||||
groups[i].Versions = append(groups[i].Versions, metav1.GroupVersionForDiscovery{Version: v})
|
||||
}
|
||||
}
|
||||
return &metav1.APIGroupList{
|
||||
Groups: groups,
|
||||
}
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package clientset
|
||||
|
||||
import api "github.com/docker/compose-on-kubernetes/api/client/clientset"
|
||||
|
||||
// Interface defines the methods a compose kube client should have
|
||||
// Deprecated: Use github.com/docker/compose-on-kubernetes/api/client/clientset.Interface instead
|
||||
type Interface = api.Interface
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
// version included in a Clientset.
|
||||
// Deprecated: Use github.com/docker/compose-on-kubernetes/api/client/clientset.Clientset instead
|
||||
type Clientset = api.Clientset
|
||||
|
||||
// NewForConfig creates a new Clientset for the given config.
|
||||
// Deprecated: Use github.com/docker/compose-on-kubernetes/api/client/clientset.NewForConfig instead
|
||||
var NewForConfig = api.NewForConfig
|
||||
|
||||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
// panics if there is an error in the config.
|
||||
// Deprecated: Use github.com/docker/compose-on-kubernetes/api/client/clientset.NewForConfigOrDie instead
|
||||
var NewForConfigOrDie = api.NewForConfigOrDie
|
||||
|
||||
// New creates a new Clientset for the given RESTClient.
|
||||
// Deprecated: Use github.com/docker/compose-on-kubernetes/api/client/clientset.New instead
|
||||
var New = api.New
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue