Add the format switch to the stats command

Signed-off-by: Boaz Shuster <ripcurld.github@gmail.com>
This commit is contained in:
Boaz Shuster 2016-07-18 21:30:15 +03:00
parent 41b980ad6d
commit a4f3442403
4 changed files with 188 additions and 149 deletions

View File

@ -5,17 +5,16 @@ import (
"io" "io"
"strings" "strings"
"sync" "sync"
"text/tabwriter"
"time" "time"
"golang.org/x/net/context" "golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/cli" "github.com/docker/docker/cli"
"github.com/docker/docker/cli/command" "github.com/docker/docker/cli/command"
"github.com/docker/docker/cli/command/formatter"
"github.com/docker/docker/cli/command/system" "github.com/docker/docker/cli/command/system"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -23,7 +22,7 @@ import (
type statsOptions struct { type statsOptions struct {
all bool all bool
noStream bool noStream bool
format string
containers []string containers []string
} }
@ -44,6 +43,7 @@ func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command {
flags := cmd.Flags() flags := cmd.Flags()
flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)")
flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result")
flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template")
return cmd return cmd
} }
@ -98,10 +98,10 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
closeChan <- err closeChan <- err
} }
for _, container := range cs { for _, container := range cs {
s := &containerStats{Name: container.ID[:12]} s := formatter.NewContainerStats(container.ID[:12], daemonOSType)
if cStats.add(s) { if cStats.add(s) {
waitFirst.Add(1) waitFirst.Add(1)
go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) go collect(s, ctx, dockerCli.Client(), !opts.noStream, waitFirst)
} }
} }
} }
@ -115,19 +115,19 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
eh := system.InitEventHandler() eh := system.InitEventHandler()
eh.Handle("create", func(e events.Message) { eh.Handle("create", func(e events.Message) {
if opts.all { if opts.all {
s := &containerStats{Name: e.ID[:12]} s := formatter.NewContainerStats(e.ID[:12], daemonOSType)
if cStats.add(s) { if cStats.add(s) {
waitFirst.Add(1) waitFirst.Add(1)
go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) go collect(s, ctx, dockerCli.Client(), !opts.noStream, waitFirst)
} }
} }
}) })
eh.Handle("start", func(e events.Message) { eh.Handle("start", func(e events.Message) {
s := &containerStats{Name: e.ID[:12]} s := formatter.NewContainerStats(e.ID[:12], daemonOSType)
if cStats.add(s) { if cStats.add(s) {
waitFirst.Add(1) waitFirst.Add(1)
go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) go collect(s, ctx, dockerCli.Client(), !opts.noStream, waitFirst)
} }
}) })
@ -150,10 +150,10 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
// Artificially send creation events for the containers we were asked to // Artificially send creation events for the containers we were asked to
// monitor (same code path than we use when monitoring all containers). // monitor (same code path than we use when monitoring all containers).
for _, name := range opts.containers { for _, name := range opts.containers {
s := &containerStats{Name: name} s := formatter.NewContainerStats(name, daemonOSType)
if cStats.add(s) { if cStats.add(s) {
waitFirst.Add(1) waitFirst.Add(1)
go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) go collect(s, ctx, dockerCli.Client(), !opts.noStream, waitFirst)
} }
} }
@ -166,11 +166,11 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
var errs []string var errs []string
cStats.mu.Lock() cStats.mu.Lock()
for _, c := range cStats.cs { for _, c := range cStats.cs {
c.mu.Lock() c.Mu.Lock()
if c.err != nil { if c.Err != nil {
errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.Err))
} }
c.mu.Unlock() c.Mu.Unlock()
} }
cStats.mu.Unlock() cStats.mu.Unlock()
if len(errs) > 0 { if len(errs) > 0 {
@ -180,44 +180,34 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
// before print to screen, make sure each container get at least one valid stat data // before print to screen, make sure each container get at least one valid stat data
waitFirst.Wait() waitFirst.Wait()
f := "table"
if len(opts.format) > 0 {
f = opts.format
}
statsCtx := formatter.Context{
Output: dockerCli.Out(),
Format: formatter.NewStatsFormat(f, daemonOSType),
}
w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) cleanHeader := func() {
printHeader := func() {
if !opts.noStream { if !opts.noStream {
fmt.Fprint(dockerCli.Out(), "\033[2J") fmt.Fprint(dockerCli.Out(), "\033[2J")
fmt.Fprint(dockerCli.Out(), "\033[H") fmt.Fprint(dockerCli.Out(), "\033[H")
} }
switch daemonOSType {
case "":
// Before we have any stats from the daemon, we don't know the platform...
io.WriteString(w, "Waiting for statistics...\n")
case "windows":
io.WriteString(w, "CONTAINER\tCPU %\tPRIV WORKING SET\tNET I/O\tBLOCK I/O\n")
default:
io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n")
}
} }
var err error
for range time.Tick(500 * time.Millisecond) { for range time.Tick(500 * time.Millisecond) {
printHeader() cleanHeader()
toRemove := []string{} cStats.mu.RLock()
cStats.mu.Lock() csLen := len(cStats.cs)
for _, s := range cStats.cs { if err = formatter.ContainerStatsWrite(statsCtx, cStats.cs); err != nil {
if err := s.Display(w); err != nil && !opts.noStream { break
logrus.Debugf("stats: got error for %s: %v", s.Name, err)
if err == io.EOF {
toRemove = append(toRemove, s.Name)
} }
cStats.mu.RUnlock()
if csLen == 0 && !showAll {
break
} }
}
cStats.mu.Unlock()
for _, name := range toRemove {
cStats.remove(name)
}
if len(cStats.cs) == 0 && !showAll {
return nil
}
w.Flush()
if opts.noStream { if opts.noStream {
break break
} }
@ -237,5 +227,5 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
// just skip // just skip
} }
} }
return nil return err
} }

View File

@ -3,7 +3,6 @@ package container
import ( import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"io" "io"
"strings" "strings"
"sync" "sync"
@ -11,30 +10,15 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/cli/command/formatter"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/docker/go-units"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
type containerStats struct {
Name string
CPUPercentage float64
Memory float64 // On Windows this is the private working set
MemoryLimit float64 // Not used on Windows
MemoryPercentage float64 // Not used on Windows
NetworkRx float64
NetworkTx float64
BlockRead float64
BlockWrite float64
PidsCurrent uint64 // Not used on Windows
mu sync.Mutex
err error
}
type stats struct { type stats struct {
mu sync.Mutex
ostype string ostype string
cs []*containerStats mu sync.RWMutex
cs []*formatter.ContainerStats
} }
// daemonOSType is set once we have at least one stat for a container // daemonOSType is set once we have at least one stat for a container
@ -42,7 +26,7 @@ type stats struct {
// on the daemon platform. // on the daemon platform.
var daemonOSType string var daemonOSType string
func (s *stats) add(cs *containerStats) bool { func (s *stats) add(cs *formatter.ContainerStats) bool {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
if _, exists := s.isKnownContainer(cs.Name); !exists { if _, exists := s.isKnownContainer(cs.Name); !exists {
@ -69,7 +53,7 @@ func (s *stats) isKnownContainer(cid string) (int, bool) {
return -1, false return -1, false
} }
func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { func collect(s *formatter.ContainerStats, ctx context.Context, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) {
logrus.Debugf("collecting stats for %s", s.Name) logrus.Debugf("collecting stats for %s", s.Name)
var ( var (
getFirst bool getFirst bool
@ -88,9 +72,9 @@ func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, stre
response, err := cli.ContainerStats(ctx, s.Name, streamStats) response, err := cli.ContainerStats(ctx, s.Name, streamStats)
if err != nil { if err != nil {
s.mu.Lock() s.Mu.Lock()
s.err = err s.Err = err
s.mu.Unlock() s.Mu.Unlock()
return return
} }
defer response.Body.Close() defer response.Body.Close()
@ -137,7 +121,7 @@ func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, stre
mem = float64(v.MemoryStats.PrivateWorkingSet) mem = float64(v.MemoryStats.PrivateWorkingSet)
} }
s.mu.Lock() s.Mu.Lock()
s.CPUPercentage = cpuPercent s.CPUPercentage = cpuPercent
s.Memory = mem s.Memory = mem
s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks)
@ -148,7 +132,7 @@ func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, stre
s.MemoryPercentage = memPercent s.MemoryPercentage = memPercent
s.PidsCurrent = v.PidsStats.Current s.PidsCurrent = v.PidsStats.Current
} }
s.mu.Unlock() s.Mu.Unlock()
u <- nil u <- nil
if !streamStats { if !streamStats {
return return
@ -160,7 +144,7 @@ func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, stre
case <-time.After(2 * time.Second): case <-time.After(2 * time.Second):
// zero out the values if we have not received an update within // zero out the values if we have not received an update within
// the specified duration. // the specified duration.
s.mu.Lock() s.Mu.Lock()
s.CPUPercentage = 0 s.CPUPercentage = 0
s.Memory = 0 s.Memory = 0
s.MemoryPercentage = 0 s.MemoryPercentage = 0
@ -170,8 +154,8 @@ func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, stre
s.BlockRead = 0 s.BlockRead = 0
s.BlockWrite = 0 s.BlockWrite = 0
s.PidsCurrent = 0 s.PidsCurrent = 0
s.err = errors.New("timeout waiting for stats") s.Err = errors.New("timeout waiting for stats")
s.mu.Unlock() s.Mu.Unlock()
// if this is the first stat you get, release WaitGroup // if this is the first stat you get, release WaitGroup
if !getFirst { if !getFirst {
getFirst = true getFirst = true
@ -179,12 +163,12 @@ func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, stre
} }
case err := <-u: case err := <-u:
if err != nil { if err != nil {
s.mu.Lock() s.Mu.Lock()
s.err = err s.Err = err
s.mu.Unlock() s.Mu.Unlock()
continue continue
} }
s.err = nil s.Err = nil
// if this is the first stat you get, release WaitGroup // if this is the first stat you get, release WaitGroup
if !getFirst { if !getFirst {
getFirst = true getFirst = true
@ -197,51 +181,6 @@ func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, stre
} }
} }
func (s *containerStats) Display(w io.Writer) error {
s.mu.Lock()
defer s.mu.Unlock()
if daemonOSType == "windows" {
// NOTE: if you change this format, you must also change the err format below!
format := "%s\t%.2f%%\t%s\t%s / %s\t%s / %s\n"
if s.err != nil {
format = "%s\t%s\t%s\t%s / %s\t%s / %s\n"
errStr := "--"
fmt.Fprintf(w, format,
s.Name, errStr, errStr, errStr, errStr, errStr, errStr,
)
err := s.err
return err
}
fmt.Fprintf(w, format,
s.Name,
s.CPUPercentage,
units.BytesSize(s.Memory),
units.HumanSizeWithPrecision(s.NetworkRx, 3), units.HumanSizeWithPrecision(s.NetworkTx, 3),
units.HumanSizeWithPrecision(s.BlockRead, 3), units.HumanSizeWithPrecision(s.BlockWrite, 3))
} else {
// NOTE: if you change this format, you must also change the err format below!
format := "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\t%d\n"
if s.err != nil {
format = "%s\t%s\t%s / %s\t%s\t%s / %s\t%s / %s\t%s\n"
errStr := "--"
fmt.Fprintf(w, format,
s.Name, errStr, errStr, errStr, errStr, errStr, errStr, errStr, errStr, errStr,
)
err := s.err
return err
}
fmt.Fprintf(w, format,
s.Name,
s.CPUPercentage,
units.BytesSize(s.Memory), units.BytesSize(s.MemoryLimit),
s.MemoryPercentage,
units.HumanSizeWithPrecision(s.NetworkRx, 3), units.HumanSizeWithPrecision(s.NetworkTx, 3),
units.HumanSizeWithPrecision(s.BlockRead, 3), units.HumanSizeWithPrecision(s.BlockWrite, 3),
s.PidsCurrent)
}
return nil
}
func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
var ( var (
cpuPercent = 0.0 cpuPercent = 0.0

View File

@ -1,36 +1,11 @@
package container package container
import ( import (
"bytes"
"testing" "testing"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
) )
func TestDisplay(t *testing.T) {
c := &containerStats{
Name: "app",
CPUPercentage: 30.0,
Memory: 100 * 1024 * 1024.0,
MemoryLimit: 2048 * 1024 * 1024.0,
MemoryPercentage: 100.0 / 2048.0 * 100.0,
NetworkRx: 100 * 1024 * 1024,
NetworkTx: 800 * 1024 * 1024,
BlockRead: 100 * 1024 * 1024,
BlockWrite: 800 * 1024 * 1024,
PidsCurrent: 1,
}
var b bytes.Buffer
if err := c.Display(&b); err != nil {
t.Fatalf("c.Display() gave error: %s", err)
}
got := b.String()
want := "app\t30.00%\t100 MiB / 2 GiB\t4.88%\t105 MB / 839 MB\t105 MB / 839 MB\t1\n"
if got != want {
t.Fatalf("c.Display() = %q, want %q", got, want)
}
}
func TestCalculBlockIO(t *testing.T) { func TestCalculBlockIO(t *testing.T) {
blkio := types.BlkioStats{ blkio := types.BlkioStats{
IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}},

135
command/formatter/stats.go Normal file
View File

@ -0,0 +1,135 @@
package formatter
import (
"fmt"
"sync"
"github.com/docker/go-units"
)
const (
defaultStatsTableFormat = "table {{.Container}}\t{{.CPUPrec}}\t{{.MemUsage}}\t{{.MemPrec}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}"
winDefaultStatsTableFormat = "table {{.Container}}\t{{.CPUPrec}}\t{{{.MemUsage}}\t{.NetIO}}\t{{.BlockIO}}"
emptyStatsTableFormat = "Waiting for statistics..."
containerHeader = "CONTAINER"
cpuPrecHeader = "CPU %"
netIOHeader = "NET I/O"
blockIOHeader = "BLOCK I/O"
winMemPrecHeader = "PRIV WORKING SET" // Used only on Window
memPrecHeader = "MEM %" // Used only on Linux
memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux
pidsHeader = "PIDS" // Used only on Linux
)
// ContainerStatsAttrs represents the statistics data collected from a container.
type ContainerStatsAttrs struct {
Windows bool
Name string
CPUPercentage float64
Memory float64 // On Windows this is the private working set
MemoryLimit float64 // Not used on Windows
MemoryPercentage float64 // Not used on Windows
NetworkRx float64
NetworkTx float64
BlockRead float64
BlockWrite float64
PidsCurrent uint64 // Not used on Windows
}
// ContainerStats represents the containers statistics data.
type ContainerStats struct {
Mu sync.RWMutex
ContainerStatsAttrs
Err error
}
// NewStatsFormat returns a format for rendering an CStatsContext
func NewStatsFormat(source, osType string) Format {
if source == TableFormatKey {
if osType == "windows" {
return Format(winDefaultStatsTableFormat)
}
return Format(defaultStatsTableFormat)
}
return Format(source)
}
// NewContainerStats returns a new ContainerStats entity and sets in it the given name
func NewContainerStats(name, osType string) *ContainerStats {
return &ContainerStats{
ContainerStatsAttrs: ContainerStatsAttrs{
Name: name,
Windows: (osType == "windows"),
},
}
}
// ContainerStatsWrite renders the context for a list of containers statistics
func ContainerStatsWrite(ctx Context, containerStats []*ContainerStats) error {
render := func(format func(subContext subContext) error) error {
for _, cstats := range containerStats {
cstats.Mu.RLock()
cstatsAttrs := cstats.ContainerStatsAttrs
cstats.Mu.RUnlock()
containerStatsCtx := &containerStatsContext{
s: cstatsAttrs,
}
if err := format(containerStatsCtx); err != nil {
return err
}
}
return nil
}
return ctx.Write(&containerStatsContext{}, render)
}
type containerStatsContext struct {
HeaderContext
s ContainerStatsAttrs
}
func (c *containerStatsContext) Container() string {
c.AddHeader(containerHeader)
return c.s.Name
}
func (c *containerStatsContext) CPUPrec() string {
c.AddHeader(cpuPrecHeader)
return fmt.Sprintf("%.2f%%", c.s.CPUPercentage)
}
func (c *containerStatsContext) MemUsage() string {
c.AddHeader(memUseHeader)
if !c.s.Windows {
return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit))
}
return fmt.Sprintf("-- / --")
}
func (c *containerStatsContext) MemPrec() string {
header := memPrecHeader
if c.s.Windows {
header = winMemPrecHeader
}
c.AddHeader(header)
return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage)
}
func (c *containerStatsContext) NetIO() string {
c.AddHeader(netIOHeader)
return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3))
}
func (c *containerStatsContext) BlockIO() string {
c.AddHeader(blockIOHeader)
return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3))
}
func (c *containerStatsContext) PIDs() string {
c.AddHeader(pidsHeader)
if !c.s.Windows {
return fmt.Sprintf("%d", c.s.PidsCurrent)
}
return fmt.Sprintf("-")
}