mirror of https://github.com/docker/cli.git
Fix PR comments
- More strict on orchestrator flag - Make orchestrator flag more explicit as experimental - Add experimentalCLI annotation on kubernetes flags - Better kubeconfig error message - Prefix service name with stackname in ps and services stack subcommands - Fix yaml documentation - Fix code coverage ignoring generated code Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
This commit is contained in:
parent
ad409767bf
commit
f1b116179f
|
@ -140,7 +140,6 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
|
|||
cli.clientInfo = ClientInfo{
|
||||
DefaultVersion: cli.client.ClientVersion(),
|
||||
HasExperimental: hasExperimental,
|
||||
HasKubernetes: hasExperimental && orchestrator == OrchestratorKubernetes,
|
||||
Orchestrator: orchestrator,
|
||||
}
|
||||
cli.initializeFromClient()
|
||||
|
@ -206,11 +205,15 @@ type ServerInfo struct {
|
|||
// ClientInfo stores details about the supported features of the client
|
||||
type ClientInfo struct {
|
||||
HasExperimental bool
|
||||
HasKubernetes bool
|
||||
DefaultVersion string
|
||||
Orchestrator Orchestrator
|
||||
}
|
||||
|
||||
// HasKubernetes checks if kubernetes orchestrator is enabled
|
||||
func (c ClientInfo) HasKubernetes() bool {
|
||||
return c.HasExperimental && c.Orchestrator == OrchestratorKubernetes
|
||||
}
|
||||
|
||||
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli {
|
||||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err}
|
||||
|
|
|
@ -171,7 +171,7 @@ func TestExperimentalCLI(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOrchestratorSwitch(t *testing.T) {
|
||||
defaultVersion := "v1.55"
|
||||
defaultVersion := "v0.00"
|
||||
|
||||
var testcases = []struct {
|
||||
doc string
|
||||
|
@ -268,7 +268,7 @@ func TestOrchestratorSwitch(t *testing.T) {
|
|||
}
|
||||
err := cli.Initialize(options)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testcase.expectedKubernetes, cli.ClientInfo().HasKubernetes)
|
||||
assert.Equal(t, testcase.expectedKubernetes, cli.ClientInfo().HasKubernetes())
|
||||
assert.Equal(t, testcase.expectedOrchestrator, string(cli.ClientInfo().Orchestrator))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package command
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Orchestrator type acts as an enum describing supported orchestrators.
|
||||
|
@ -17,11 +16,11 @@ const (
|
|||
orchestratorUnset = Orchestrator("unset")
|
||||
|
||||
defaultOrchestrator = OrchestratorSwarm
|
||||
dockerOrchestrator = "DOCKER_ORCHESTRATOR"
|
||||
envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR"
|
||||
)
|
||||
|
||||
func normalize(flag string) Orchestrator {
|
||||
switch strings.ToLower(flag) {
|
||||
switch flag {
|
||||
case "kubernetes", "k8s":
|
||||
return OrchestratorKubernetes
|
||||
case "swarm", "swarmkit":
|
||||
|
@ -43,7 +42,7 @@ func GetOrchestrator(isExperimental bool, flagValue, value string) Orchestrator
|
|||
return o
|
||||
}
|
||||
// Check environment variable
|
||||
env := os.Getenv(dockerOrchestrator)
|
||||
env := os.Getenv(envVarDockerOrchestrator)
|
||||
if o := normalize(env); o != orchestratorUnset {
|
||||
return o
|
||||
}
|
||||
|
|
|
@ -25,8 +25,10 @@ func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
|||
flags := cmd.PersistentFlags()
|
||||
flags.String("namespace", "default", "Kubernetes namespace to use")
|
||||
flags.SetAnnotation("namespace", "kubernetes", nil)
|
||||
flags.SetAnnotation("namespace", "experimentalCLI", nil)
|
||||
flags.String("kubeconfig", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "experimentalCLI", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ func newDeployCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
|
@ -49,7 +50,7 @@ func WrapCli(dockerCli command.Cli, cmd *cobra.Command) (*KubeCli, error) {
|
|||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("Failed to load kubernetes configuration file '%s'", kubeConfig)
|
||||
}
|
||||
cli.kubeConfig = config
|
||||
|
||||
|
|
|
@ -126,12 +126,16 @@ func replicasToServices(replicas *appsv1beta2.ReplicaSetList, services *apiv1.Se
|
|||
if !ok {
|
||||
return nil, nil, fmt.Errorf("could not find service '%s'", r.Labels[labels.ForServiceName])
|
||||
}
|
||||
stack, ok := service.Labels[labels.ForStackName]
|
||||
if ok {
|
||||
stack += "_"
|
||||
}
|
||||
uid := string(service.UID)
|
||||
s := swarm.Service{
|
||||
ID: uid,
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: service.Name,
|
||||
Name: stack + service.Name,
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
|
|
|
@ -20,7 +20,7 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
|||
return errors.Errorf("Please specify a Compose file (with --compose-file).")
|
||||
}
|
||||
// Initialize clients
|
||||
stackInterface, err := dockerCli.stacks()
|
||||
stacks, err := dockerCli.stacks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -28,12 +28,12 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configMapInterface := composeClient.ConfigMaps()
|
||||
secretInterface := composeClient.Secrets()
|
||||
serviceInterface := composeClient.Services()
|
||||
podInterface := composeClient.Pods()
|
||||
configMaps := composeClient.ConfigMaps()
|
||||
secrets := composeClient.Secrets()
|
||||
services := composeClient.Services()
|
||||
pods := composeClient.Pods()
|
||||
watcher := DeployWatcher{
|
||||
Pods: podInterface,
|
||||
Pods: pods,
|
||||
}
|
||||
|
||||
// Parse the compose file
|
||||
|
@ -43,28 +43,28 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
|||
}
|
||||
|
||||
// FIXME(vdemeester) handle warnings server-side
|
||||
if err = IsColliding(serviceInterface, stack, cfg); err != nil {
|
||||
if err = IsColliding(services, stack, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = createFileBasedConfigMaps(stack.Name, cfg.Configs, configMapInterface); err != nil {
|
||||
if err = createFileBasedConfigMaps(stack.Name, cfg.Configs, configMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = createFileBasedSecrets(stack.Name, cfg.Secrets, secretInterface); err != nil {
|
||||
if err = createFileBasedSecrets(stack.Name, cfg.Secrets, secrets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in, err := stackInterface.Get(stack.Name, metav1.GetOptions{}); err == nil {
|
||||
if in, err := stacks.Get(stack.Name, metav1.GetOptions{}); err == nil {
|
||||
in.Spec = stack.Spec
|
||||
|
||||
if _, err = stackInterface.Update(in); err != nil {
|
||||
if _, err = stacks.Update(in); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Stack %s was updated\n", stack.Name)
|
||||
} else {
|
||||
if _, err = stackInterface.Create(stack); err != nil {
|
||||
if _, err = stacks.Create(stack); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy) error {
|
|||
<-watcher.Watch(stack, serviceNames(cfg))
|
||||
|
||||
fmt.Fprintf(cmdOut, "Stack %s is stable and running\n\n", stack.Name)
|
||||
// fmt.Fprintf(cmdOut, "Read the logs with:\n $ %s stack logs %s\n", filepath.Base(os.Args[0]), stack.Name)
|
||||
// TODO: fmt.Fprintf(cmdOut, "Read the logs with:\n $ %s stack logs %s\n", filepath.Base(os.Args[0]), stack.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -55,12 +55,12 @@ func RunPS(dockerCli *KubeCli, options options.PS) error {
|
|||
for i, pod := range pods {
|
||||
tasks[i] = podToTask(pod)
|
||||
}
|
||||
return print(dockerCli, tasks, pods, nodeResolver, !options.NoTrunc, options.Quiet, format)
|
||||
return print(dockerCli, namespace, tasks, pods, nodeResolver, !options.NoTrunc, options.Quiet, format)
|
||||
}
|
||||
|
||||
type idResolver func(name string) (string, error)
|
||||
|
||||
func print(dockerCli command.Cli, tasks []swarm.Task, pods []apiv1.Pod, nodeResolver idResolver, trunc, quiet bool, format string) error {
|
||||
func print(dockerCli command.Cli, namespace string, tasks []swarm.Task, pods []apiv1.Pod, nodeResolver idResolver, trunc, quiet bool, format string) error {
|
||||
sort.Stable(tasksBySlot(tasks))
|
||||
|
||||
names := map[string]string{}
|
||||
|
@ -78,7 +78,7 @@ func print(dockerCli command.Cli, tasks []swarm.Task, pods []apiv1.Pod, nodeReso
|
|||
return err
|
||||
}
|
||||
|
||||
names[task.ID] = pods[i].Name
|
||||
names[task.ID] = fmt.Sprintf("%s_%s", namespace, pods[i].Name)
|
||||
nodes[task.ID] = nodeValue
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Short: "List stacks",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -19,7 +19,7 @@ func newPsCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -19,7 +19,7 @@ func newRemoveCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespaces = args
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -19,7 +19,7 @@ func newServicesCommand(dockerCli command.Cli) *cobra.Command {
|
|||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -54,7 +54,8 @@ func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) {
|
|||
flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`)
|
||||
flags.BoolVar(&commonOpts.TLS, "tls", false, "Use TLS; implied by --tlsverify")
|
||||
flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote")
|
||||
flags.StringVar(&commonOpts.Orchestrator, "orchestrator", "", "Which orchestrator to use with the docker cli (swarm|kubernetes)")
|
||||
flags.StringVar(&commonOpts.Orchestrator, "orchestrator", "", "Which orchestrator to use with the docker cli (swarm|kubernetes) (default swarm) (experimental)")
|
||||
flags.SetAnnotation("orchestrator", "experimentalCLI", nil)
|
||||
|
||||
// TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file")
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) {
|
|||
osType := details.ServerInfo().OSType
|
||||
hasExperimental := details.ServerInfo().HasExperimental
|
||||
hasExperimentalCLI := details.ClientInfo().HasExperimental
|
||||
hasKubernetes := details.ClientInfo().HasKubernetes
|
||||
hasKubernetes := details.ClientInfo().HasKubernetes()
|
||||
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
hideFeatureFlag(f, hasExperimental, "experimental")
|
||||
|
@ -261,7 +261,7 @@ func areFlagsSupported(cmd *cobra.Command, details versionDetails) error {
|
|||
clientVersion := details.Client().ClientVersion()
|
||||
osType := details.ServerInfo().OSType
|
||||
hasExperimental := details.ServerInfo().HasExperimental
|
||||
hasKubernetes := details.ClientInfo().HasKubernetes
|
||||
hasKubernetes := details.ClientInfo().HasKubernetes()
|
||||
hasExperimentalCLI := details.ClientInfo().HasExperimental
|
||||
|
||||
errs := []string{}
|
||||
|
@ -301,7 +301,7 @@ func areSubcommandsSupported(cmd *cobra.Command, details versionDetails) error {
|
|||
clientVersion := details.Client().ClientVersion()
|
||||
hasExperimental := details.ServerInfo().HasExperimental
|
||||
hasExperimentalCLI := details.ClientInfo().HasExperimental
|
||||
hasKubernetes := details.ClientInfo().HasKubernetes
|
||||
hasKubernetes := details.ClientInfo().HasKubernetes()
|
||||
|
||||
// Check recursively so that, e.g., `docker stack ls` returns the same output as `docker stack`
|
||||
for curr := cmd; curr != nil; curr = curr.Parent() {
|
||||
|
|
|
@ -17,3 +17,6 @@ ignore:
|
|||
- "**/internal/test"
|
||||
- "vendor/*"
|
||||
- "cli/compose/schema/bindata.go"
|
||||
- "cli/command/stack/kubernetes/api/openapi"
|
||||
- "cli/command/stack/kubernetes/api/client"
|
||||
- ".*generated.*"
|
|
@ -23,6 +23,9 @@ type cmdOption struct {
|
|||
Deprecated bool
|
||||
MinAPIVersion string `yaml:"min_api_version,omitempty"`
|
||||
Experimental bool
|
||||
ExperimentalCLI bool
|
||||
Kubernetes bool
|
||||
Swarm bool
|
||||
}
|
||||
|
||||
type cmdDoc struct {
|
||||
|
@ -43,6 +46,9 @@ type cmdDoc struct {
|
|||
Deprecated bool
|
||||
MinAPIVersion string `yaml:"min_api_version,omitempty"`
|
||||
Experimental bool
|
||||
ExperimentalCLI bool
|
||||
Kubernetes bool
|
||||
Swarm bool
|
||||
}
|
||||
|
||||
// GenYamlTree creates yaml structured ref files
|
||||
|
@ -110,6 +116,15 @@ func GenYamlCustom(cmd *cobra.Command, w io.Writer) error {
|
|||
if _, ok := curr.Annotations["experimental"]; ok && !cliDoc.Experimental {
|
||||
cliDoc.Experimental = true
|
||||
}
|
||||
if _, ok := curr.Annotations["experimentalCLI"]; ok && !cliDoc.ExperimentalCLI {
|
||||
cliDoc.ExperimentalCLI = true
|
||||
}
|
||||
if _, ok := curr.Annotations["kubernetes"]; ok && !cliDoc.Kubernetes {
|
||||
cliDoc.Kubernetes = true
|
||||
}
|
||||
if _, ok := curr.Annotations["swarm"]; ok && !cliDoc.Swarm {
|
||||
cliDoc.Kubernetes = true
|
||||
}
|
||||
}
|
||||
|
||||
flags := cmd.NonInheritedFlags()
|
||||
|
@ -186,6 +201,15 @@ func genFlagResult(flags *pflag.FlagSet) []cmdOption {
|
|||
if v, ok := flag.Annotations["version"]; ok {
|
||||
opt.MinAPIVersion = v[0]
|
||||
}
|
||||
if _, ok := flag.Annotations["experimentalCLI"]; ok {
|
||||
opt.ExperimentalCLI = true
|
||||
}
|
||||
if _, ok := flag.Annotations["kubernetes"]; ok {
|
||||
opt.Kubernetes = true
|
||||
}
|
||||
if _, ok := flag.Annotations["swarm"]; ok {
|
||||
opt.Kubernetes = true
|
||||
}
|
||||
|
||||
result = append(result, opt)
|
||||
})
|
||||
|
|
|
@ -8,8 +8,6 @@ services:
|
|||
image: 'docker:${TEST_ENGINE_VERSION:-edge-dind}'
|
||||
privileged: true
|
||||
command: ['--insecure-registry=registry:5000']
|
||||
depends_on:
|
||||
- registry
|
||||
|
||||
notary-server:
|
||||
image: 'notary:server-0.4.2'
|
||||
|
|
|
@ -2,7 +2,6 @@ package stack
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -21,9 +20,7 @@ func TestRemove(t *testing.T) {
|
|||
deployFullStack(t, stackname)
|
||||
defer cleanupFullStack(t, stackname)
|
||||
|
||||
result := icmd.RunCmd(shell(t, "docker version"))
|
||||
fmt.Println(result.Stdout(), os.Getenv("DOCKER_HOST"), os.Getenv("TEST_DOCKER_HOST"))
|
||||
result = icmd.RunCmd(shell(t, "docker stack rm %s", stackname))
|
||||
result := icmd.RunCmd(shell(t, "docker stack rm %s", stackname))
|
||||
|
||||
result.Assert(t, icmd.Expected{Err: icmd.None})
|
||||
golden.Assert(t, result.Stdout(), "stack-remove-success.golden")
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# Kubernetes client libraries
|
||||
|
||||
This package (and sub-packages) holds the client libraries for the kubernetes integration in
|
||||
the docker platform. Most of the code is currently generated but will evolved quickly in
|
||||
the next months.
|
||||
the docker platform. Most of the code is currently generated.
|
Loading…
Reference in New Issue