mirror of https://github.com/docker/cli.git
Merge pull request #421 from thaJeztah/refactor-docker-info
Refactor/cleanup of docker info
This commit is contained in:
commit
e77dc2232e
|
@ -55,109 +55,50 @@ func runInfo(dockerCli *command.DockerCli, opts *infoOptions) error {
|
||||||
|
|
||||||
// nolint: gocyclo
|
// nolint: gocyclo
|
||||||
func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
||||||
fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers)
|
fmt.Fprintln(dockerCli.Out(), "Containers:", info.Containers)
|
||||||
fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning)
|
fmt.Fprintln(dockerCli.Out(), " Running:", info.ContainersRunning)
|
||||||
fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused)
|
fmt.Fprintln(dockerCli.Out(), " Paused:", info.ContainersPaused)
|
||||||
fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped)
|
fmt.Fprintln(dockerCli.Out(), " Stopped:", info.ContainersStopped)
|
||||||
fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images)
|
fmt.Fprintln(dockerCli.Out(), "Images:", info.Images)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion)
|
fprintlnNonEmpty(dockerCli.Out(), "Server Version:", info.ServerVersion)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver)
|
fprintlnNonEmpty(dockerCli.Out(), "Storage Driver:", info.Driver)
|
||||||
if info.DriverStatus != nil {
|
if info.DriverStatus != nil {
|
||||||
for _, pair := range info.DriverStatus {
|
for _, pair := range info.DriverStatus {
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1])
|
fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if info.SystemStatus != nil {
|
if info.SystemStatus != nil {
|
||||||
for _, pair := range info.SystemStatus {
|
for _, pair := range info.SystemStatus {
|
||||||
fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1])
|
fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver)
|
fprintlnNonEmpty(dockerCli.Out(), "Logging Driver:", info.LoggingDriver)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver)
|
fprintlnNonEmpty(dockerCli.Out(), "Cgroup Driver:", info.CgroupDriver)
|
||||||
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "Plugins:\n")
|
fmt.Fprintln(dockerCli.Out(), "Plugins:")
|
||||||
fmt.Fprintf(dockerCli.Out(), " Volume:")
|
fmt.Fprintln(dockerCli.Out(), " Volume:", strings.Join(info.Plugins.Volume, " "))
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " "))
|
fmt.Fprintln(dockerCli.Out(), " Network:", strings.Join(info.Plugins.Network, " "))
|
||||||
fmt.Fprintf(dockerCli.Out(), "\n")
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Network:")
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " "))
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "\n")
|
|
||||||
|
|
||||||
if len(info.Plugins.Authorization) != 0 {
|
if len(info.Plugins.Authorization) != 0 {
|
||||||
fmt.Fprintf(dockerCli.Out(), " Authorization:")
|
fmt.Fprintln(dockerCli.Out(), " Authorization:", strings.Join(info.Plugins.Authorization, " "))
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " "))
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Log:")
|
fmt.Fprintln(dockerCli.Out(), " Log:", strings.Join(info.Plugins.Log, " "))
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Log, " "))
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "\n")
|
|
||||||
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState)
|
fmt.Fprintln(dockerCli.Out(), "Swarm:", info.Swarm.LocalNodeState)
|
||||||
if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive && info.Swarm.LocalNodeState != swarm.LocalNodeStateLocked {
|
printSwarmInfo(dockerCli, info)
|
||||||
fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID)
|
|
||||||
if info.Swarm.Error != "" {
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable)
|
|
||||||
if info.Swarm.Cluster != nil && info.Swarm.ControlAvailable && info.Swarm.Error == "" && info.Swarm.LocalNodeState != swarm.LocalNodeStateError {
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID)
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers)
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes)
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Orchestration:\n")
|
|
||||||
taskHistoryRetentionLimit := int64(0)
|
|
||||||
if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil {
|
|
||||||
taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
|
|
||||||
}
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", taskHistoryRetentionLimit)
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Raft:\n")
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval)
|
|
||||||
if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil {
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick)
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick)
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n")
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Heartbeat Period: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " CA Configuration:\n")
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry))
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Force Rotate: %d\n", info.Swarm.Cluster.Spec.CAConfig.ForceRotate)
|
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), " Signing CA Certificate: \n%s\n\n", strings.TrimSpace(info.Swarm.Cluster.Spec.CAConfig.SigningCACert))
|
|
||||||
if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 {
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " External CAs:\n")
|
|
||||||
for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs {
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Autolock Managers: %v\n", info.Swarm.Cluster.Spec.EncryptionConfig.AutoLockManagers)
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Root Rotation In Progress: %v\n", info.Swarm.Cluster.RootRotationInProgress)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr)
|
|
||||||
managers := []string{}
|
|
||||||
for _, entry := range info.Swarm.RemoteManagers {
|
|
||||||
managers = append(managers, entry.Addr)
|
|
||||||
}
|
|
||||||
if len(managers) > 0 {
|
|
||||||
sort.Strings(managers)
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " Manager Addresses:\n")
|
|
||||||
for _, entry := range managers {
|
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s\n", entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(info.Runtimes) > 0 {
|
if len(info.Runtimes) > 0 {
|
||||||
fmt.Fprintf(dockerCli.Out(), "Runtimes:")
|
fmt.Fprint(dockerCli.Out(), "Runtimes:")
|
||||||
for name := range info.Runtimes {
|
for name := range info.Runtimes {
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s", name)
|
fmt.Fprintf(dockerCli.Out(), " %s", name)
|
||||||
}
|
}
|
||||||
fmt.Fprint(dockerCli.Out(), "\n")
|
fmt.Fprint(dockerCli.Out(), "\n")
|
||||||
fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime)
|
fmt.Fprintln(dockerCli.Out(), "Default Runtime:", info.DefaultRuntime)
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.OSType == "linux" {
|
if info.OSType == "linux" {
|
||||||
fmt.Fprintf(dockerCli.Out(), "Init Binary: %v\n", info.InitBinary)
|
fmt.Fprintln(dockerCli.Out(), "Init Binary:", info.InitBinary)
|
||||||
|
|
||||||
for _, ci := range []struct {
|
for _, ci := range []struct {
|
||||||
Name string
|
Name string
|
||||||
|
@ -171,23 +112,23 @@ func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
||||||
if ci.Commit.ID != ci.Commit.Expected {
|
if ci.Commit.ID != ci.Commit.Expected {
|
||||||
fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected)
|
fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(dockerCli.Out(), "\n")
|
fmt.Fprint(dockerCli.Out(), "\n")
|
||||||
}
|
}
|
||||||
if len(info.SecurityOptions) != 0 {
|
if len(info.SecurityOptions) != 0 {
|
||||||
kvs, err := types.DecodeSecurityOptions(info.SecurityOptions)
|
kvs, err := types.DecodeSecurityOptions(info.SecurityOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Fprintf(dockerCli.Out(), "Security Options:\n")
|
fmt.Fprintln(dockerCli.Out(), "Security Options:")
|
||||||
for _, so := range kvs {
|
for _, so := range kvs {
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s\n", so.Name)
|
fmt.Fprintln(dockerCli.Out(), " "+so.Name)
|
||||||
for _, o := range so.Options {
|
for _, o := range so.Options {
|
||||||
switch o.Key {
|
switch o.Key {
|
||||||
case "profile":
|
case "profile":
|
||||||
if o.Value != "default" {
|
if o.Value != "default" {
|
||||||
fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n")
|
fmt.Fprintln(dockerCli.Err(), " WARNING: You're not using the default seccomp profile")
|
||||||
}
|
}
|
||||||
fmt.Fprintf(dockerCli.Out(), " Profile: %s\n", o.Value)
|
fmt.Fprintln(dockerCli.Out(), " Profile:", o.Value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -196,44 +137,44 @@ func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
||||||
|
|
||||||
// Isolation only has meaning on a Windows daemon.
|
// Isolation only has meaning on a Windows daemon.
|
||||||
if info.OSType == "windows" {
|
if info.OSType == "windows" {
|
||||||
fmt.Fprintf(dockerCli.Out(), "Default Isolation: %v\n", info.Isolation)
|
fmt.Fprintln(dockerCli.Out(), "Default Isolation:", info.Isolation)
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion)
|
fprintlnNonEmpty(dockerCli.Out(), "Kernel Version:", info.KernelVersion)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem)
|
fprintlnNonEmpty(dockerCli.Out(), "Operating System:", info.OperatingSystem)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType)
|
fprintlnNonEmpty(dockerCli.Out(), "OSType:", info.OSType)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture)
|
fprintlnNonEmpty(dockerCli.Out(), "Architecture:", info.Architecture)
|
||||||
fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU)
|
fmt.Fprintln(dockerCli.Out(), "CPUs:", info.NCPU)
|
||||||
fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal)))
|
fmt.Fprintln(dockerCli.Out(), "Total Memory:", units.BytesSize(float64(info.MemTotal)))
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name)
|
fprintlnNonEmpty(dockerCli.Out(), "Name:", info.Name)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID)
|
fprintlnNonEmpty(dockerCli.Out(), "ID:", info.ID)
|
||||||
fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir)
|
fmt.Fprintln(dockerCli.Out(), "Docker Root Dir:", info.DockerRootDir)
|
||||||
fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", debug.IsEnabled())
|
fmt.Fprintln(dockerCli.Out(), "Debug Mode (client):", debug.IsEnabled())
|
||||||
fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug)
|
fmt.Fprintln(dockerCli.Out(), "Debug Mode (server):", info.Debug)
|
||||||
|
|
||||||
if info.Debug {
|
if info.Debug {
|
||||||
fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd)
|
fmt.Fprintln(dockerCli.Out(), " File Descriptors:", info.NFd)
|
||||||
fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines)
|
fmt.Fprintln(dockerCli.Out(), " Goroutines:", info.NGoroutines)
|
||||||
fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime)
|
fmt.Fprintln(dockerCli.Out(), " System Time:", info.SystemTime)
|
||||||
fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener)
|
fmt.Fprintln(dockerCli.Out(), " EventsListeners:", info.NEventsListener)
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy)
|
fprintlnNonEmpty(dockerCli.Out(), "HTTP Proxy:", info.HTTPProxy)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy)
|
fprintlnNonEmpty(dockerCli.Out(), "HTTPS Proxy:", info.HTTPSProxy)
|
||||||
fprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy)
|
fprintlnNonEmpty(dockerCli.Out(), "No Proxy:", info.NoProxy)
|
||||||
|
|
||||||
if info.IndexServerAddress != "" {
|
if info.IndexServerAddress != "" {
|
||||||
u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username
|
u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username
|
||||||
if len(u) > 0 {
|
if len(u) > 0 {
|
||||||
fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u)
|
fmt.Fprintln(dockerCli.Out(), "Username:", u)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress)
|
fmt.Fprintln(dockerCli.Out(), "Registry:", info.IndexServerAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.Labels != nil {
|
if info.Labels != nil {
|
||||||
fmt.Fprintln(dockerCli.Out(), "Labels:")
|
fmt.Fprintln(dockerCli.Out(), "Labels:")
|
||||||
for _, attribute := range info.Labels {
|
for _, lbl := range info.Labels {
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s\n", attribute)
|
fmt.Fprintln(dockerCli.Out(), " "+lbl)
|
||||||
}
|
}
|
||||||
// TODO: Engine labels with duplicate keys has been deprecated in 1.13 and will be error out
|
// TODO: Engine labels with duplicate keys has been deprecated in 1.13 and will be error out
|
||||||
// after 3 release cycles (17.12). For now, a WARNING will be generated. The following will
|
// after 3 release cycles (17.12). For now, a WARNING will be generated. The following will
|
||||||
|
@ -252,20 +193,15 @@ func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild)
|
fmt.Fprintln(dockerCli.Out(), "Experimental:", info.ExperimentalBuild)
|
||||||
if info.ClusterStore != "" {
|
fprintlnNonEmpty(dockerCli.Out(), "Cluster Store:", info.ClusterStore)
|
||||||
fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore)
|
fprintlnNonEmpty(dockerCli.Out(), "Cluster Advertise:", info.ClusterAdvertise)
|
||||||
}
|
|
||||||
|
|
||||||
if info.ClusterAdvertise != "" {
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise)
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) {
|
if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) {
|
||||||
fmt.Fprintln(dockerCli.Out(), "Insecure Registries:")
|
fmt.Fprintln(dockerCli.Out(), "Insecure Registries:")
|
||||||
for _, registry := range info.RegistryConfig.IndexConfigs {
|
for _, registry := range info.RegistryConfig.IndexConfigs {
|
||||||
if !registry.Secure {
|
if !registry.Secure {
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name)
|
fmt.Fprintln(dockerCli.Out(), " "+registry.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,11 +214,12 @@ func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
||||||
if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 {
|
if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 {
|
||||||
fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:")
|
fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:")
|
||||||
for _, mirror := range info.RegistryConfig.Mirrors {
|
for _, mirror := range info.RegistryConfig.Mirrors {
|
||||||
fmt.Fprintf(dockerCli.Out(), " %s\n", mirror)
|
fmt.Fprintln(dockerCli.Out(), " "+mirror)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(dockerCli.Out(), "Live Restore Enabled: %v\n\n", info.LiveRestoreEnabled)
|
fmt.Fprintln(dockerCli.Out(), "Live Restore Enabled:", info.LiveRestoreEnabled)
|
||||||
|
fmt.Fprint(dockerCli.Out(), "\n")
|
||||||
|
|
||||||
// Only output these warnings if the server does not support these features
|
// Only output these warnings if the server does not support these features
|
||||||
if info.OSType != "windows" {
|
if info.OSType != "windows" {
|
||||||
|
@ -326,6 +263,63 @@ func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func printSwarmInfo(dockerCli command.Cli, info types.Info) {
|
||||||
|
if info.Swarm.LocalNodeState == swarm.LocalNodeStateInactive || info.Swarm.LocalNodeState == swarm.LocalNodeStateLocked {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " NodeID:", info.Swarm.NodeID)
|
||||||
|
if info.Swarm.Error != "" {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Error:", info.Swarm.Error)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Is Manager:", info.Swarm.ControlAvailable)
|
||||||
|
if info.Swarm.Cluster != nil && info.Swarm.ControlAvailable && info.Swarm.Error == "" && info.Swarm.LocalNodeState != swarm.LocalNodeStateError {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " ClusterID:", info.Swarm.Cluster.ID)
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Managers:", info.Swarm.Managers)
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Nodes:", info.Swarm.Nodes)
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Orchestration:")
|
||||||
|
taskHistoryRetentionLimit := int64(0)
|
||||||
|
if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil {
|
||||||
|
taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Task History Retention Limit:", taskHistoryRetentionLimit)
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Raft:")
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Snapshot Interval:", info.Swarm.Cluster.Spec.Raft.SnapshotInterval)
|
||||||
|
if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Heartbeat Tick:", info.Swarm.Cluster.Spec.Raft.HeartbeatTick)
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Election Tick:", info.Swarm.Cluster.Spec.Raft.ElectionTick)
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Dispatcher:")
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Heartbeat Period:", units.HumanDuration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " CA Configuration:")
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Expiry Duration:", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry))
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Force Rotate:", info.Swarm.Cluster.Spec.CAConfig.ForceRotate)
|
||||||
|
if caCert := strings.TrimSpace(info.Swarm.Cluster.Spec.CAConfig.SigningCACert); caCert != "" {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), " Signing CA Certificate: \n%s\n\n", caCert)
|
||||||
|
}
|
||||||
|
if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 {
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " External CAs:")
|
||||||
|
for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Autolock Managers:", info.Swarm.Cluster.Spec.EncryptionConfig.AutoLockManagers)
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Root Rotation In Progress:", info.Swarm.Cluster.RootRotationInProgress)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Node Address:", info.Swarm.NodeAddr)
|
||||||
|
if len(info.Swarm.RemoteManagers) > 0 {
|
||||||
|
managers := []string{}
|
||||||
|
for _, entry := range info.Swarm.RemoteManagers {
|
||||||
|
managers = append(managers, entry.Addr)
|
||||||
|
}
|
||||||
|
sort.Strings(managers)
|
||||||
|
fmt.Fprintln(dockerCli.Out(), " Manager Addresses:")
|
||||||
|
for _, entry := range managers {
|
||||||
|
fmt.Fprintf(dockerCli.Out(), " %s\n", entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func printStorageDriverWarnings(dockerCli command.Cli, info types.Info) {
|
func printStorageDriverWarnings(dockerCli command.Cli, info types.Info) {
|
||||||
if info.DriverStatus == nil {
|
if info.DriverStatus == nil {
|
||||||
return
|
return
|
||||||
|
@ -374,9 +368,8 @@ func formatInfo(dockerCli *command.DockerCli, info types.Info, format string) er
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func fprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
func fprintlnNonEmpty(w io.Writer, label, value string) {
|
||||||
if value != "" {
|
if value != "" {
|
||||||
return fmt.Fprintf(w, format, value)
|
fmt.Fprintln(w, label, value)
|
||||||
}
|
}
|
||||||
return 0, nil
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue