|
@@ -5,25 +5,24 @@ import (
|
|
"io"
|
|
"io"
|
|
"strings"
|
|
"strings"
|
|
"sync"
|
|
"sync"
|
|
- "text/tabwriter"
|
|
|
|
"time"
|
|
"time"
|
|
|
|
|
|
"golang.org/x/net/context"
|
|
"golang.org/x/net/context"
|
|
|
|
|
|
- "github.com/Sirupsen/logrus"
|
|
|
|
"github.com/docker/docker/api/types"
|
|
"github.com/docker/docker/api/types"
|
|
"github.com/docker/docker/api/types/events"
|
|
"github.com/docker/docker/api/types/events"
|
|
"github.com/docker/docker/api/types/filters"
|
|
"github.com/docker/docker/api/types/filters"
|
|
"github.com/docker/docker/cli"
|
|
"github.com/docker/docker/cli"
|
|
"github.com/docker/docker/cli/command"
|
|
"github.com/docker/docker/cli/command"
|
|
|
|
+ "github.com/docker/docker/cli/command/formatter"
|
|
"github.com/docker/docker/cli/command/system"
|
|
"github.com/docker/docker/cli/command/system"
|
|
"github.com/spf13/cobra"
|
|
"github.com/spf13/cobra"
|
|
)
|
|
)
|
|
|
|
|
|
type statsOptions struct {
|
|
type statsOptions struct {
|
|
- all bool
|
|
|
|
- noStream bool
|
|
|
|
-
|
|
|
|
|
|
+ all bool
|
|
|
|
+ noStream bool
|
|
|
|
+ format string
|
|
containers []string
|
|
containers []string
|
|
}
|
|
}
|
|
|
|
|
|
@@ -44,6 +43,7 @@ func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|
flags := cmd.Flags()
|
|
flags := cmd.Flags()
|
|
flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)")
|
|
flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)")
|
|
flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result")
|
|
flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result")
|
|
|
|
+ flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template")
|
|
return cmd
|
|
return cmd
|
|
}
|
|
}
|
|
|
|
|
|
@@ -98,10 +98,10 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
|
|
closeChan <- err
|
|
closeChan <- err
|
|
}
|
|
}
|
|
for _, container := range cs {
|
|
for _, container := range cs {
|
|
- s := &containerStats{Name: container.ID[:12]}
|
|
|
|
|
|
+ s := formatter.NewContainerStats(container.ID[:12], daemonOSType)
|
|
if cStats.add(s) {
|
|
if cStats.add(s) {
|
|
waitFirst.Add(1)
|
|
waitFirst.Add(1)
|
|
- go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst)
|
|
|
|
|
|
+ go collect(s, ctx, dockerCli.Client(), !opts.noStream, waitFirst)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -115,19 +115,19 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
|
|
eh := system.InitEventHandler()
|
|
eh := system.InitEventHandler()
|
|
eh.Handle("create", func(e events.Message) {
|
|
eh.Handle("create", func(e events.Message) {
|
|
if opts.all {
|
|
if opts.all {
|
|
- s := &containerStats{Name: e.ID[:12]}
|
|
|
|
|
|
+ s := formatter.NewContainerStats(e.ID[:12], daemonOSType)
|
|
if cStats.add(s) {
|
|
if cStats.add(s) {
|
|
waitFirst.Add(1)
|
|
waitFirst.Add(1)
|
|
- go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst)
|
|
|
|
|
|
+ go collect(s, ctx, dockerCli.Client(), !opts.noStream, waitFirst)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
})
|
|
})
|
|
|
|
|
|
eh.Handle("start", func(e events.Message) {
|
|
eh.Handle("start", func(e events.Message) {
|
|
- s := &containerStats{Name: e.ID[:12]}
|
|
|
|
|
|
+ s := formatter.NewContainerStats(e.ID[:12], daemonOSType)
|
|
if cStats.add(s) {
|
|
if cStats.add(s) {
|
|
waitFirst.Add(1)
|
|
waitFirst.Add(1)
|
|
- go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst)
|
|
|
|
|
|
+ go collect(s, ctx, dockerCli.Client(), !opts.noStream, waitFirst)
|
|
}
|
|
}
|
|
})
|
|
})
|
|
|
|
|
|
@@ -150,10 +150,10 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
|
|
// Artificially send creation events for the containers we were asked to
|
|
// Artificially send creation events for the containers we were asked to
|
|
// monitor (same code path than we use when monitoring all containers).
|
|
// monitor (same code path than we use when monitoring all containers).
|
|
for _, name := range opts.containers {
|
|
for _, name := range opts.containers {
|
|
- s := &containerStats{Name: name}
|
|
|
|
|
|
+ s := formatter.NewContainerStats(name, daemonOSType)
|
|
if cStats.add(s) {
|
|
if cStats.add(s) {
|
|
waitFirst.Add(1)
|
|
waitFirst.Add(1)
|
|
- go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst)
|
|
|
|
|
|
+ go collect(s, ctx, dockerCli.Client(), !opts.noStream, waitFirst)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -166,11 +166,11 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
|
|
var errs []string
|
|
var errs []string
|
|
cStats.mu.Lock()
|
|
cStats.mu.Lock()
|
|
for _, c := range cStats.cs {
|
|
for _, c := range cStats.cs {
|
|
- c.mu.Lock()
|
|
|
|
- if c.err != nil {
|
|
|
|
- errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err))
|
|
|
|
|
|
+ c.Mu.Lock()
|
|
|
|
+ if c.Err != nil {
|
|
|
|
+ errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.Err))
|
|
}
|
|
}
|
|
- c.mu.Unlock()
|
|
|
|
|
|
+ c.Mu.Unlock()
|
|
}
|
|
}
|
|
cStats.mu.Unlock()
|
|
cStats.mu.Unlock()
|
|
if len(errs) > 0 {
|
|
if len(errs) > 0 {
|
|
@@ -180,44 +180,34 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
|
|
|
|
|
|
// before print to screen, make sure each container get at least one valid stat data
|
|
// before print to screen, make sure each container get at least one valid stat data
|
|
waitFirst.Wait()
|
|
waitFirst.Wait()
|
|
|
|
+ f := "table"
|
|
|
|
+ if len(opts.format) > 0 {
|
|
|
|
+ f = opts.format
|
|
|
|
+ }
|
|
|
|
+ statsCtx := formatter.Context{
|
|
|
|
+ Output: dockerCli.Out(),
|
|
|
|
+ Format: formatter.NewStatsFormat(f, daemonOSType),
|
|
|
|
+ }
|
|
|
|
|
|
- w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0)
|
|
|
|
- printHeader := func() {
|
|
|
|
|
|
+ cleanHeader := func() {
|
|
if !opts.noStream {
|
|
if !opts.noStream {
|
|
fmt.Fprint(dockerCli.Out(), "\033[2J")
|
|
fmt.Fprint(dockerCli.Out(), "\033[2J")
|
|
fmt.Fprint(dockerCli.Out(), "\033[H")
|
|
fmt.Fprint(dockerCli.Out(), "\033[H")
|
|
}
|
|
}
|
|
- switch daemonOSType {
|
|
|
|
- case "":
|
|
|
|
- // Before we have any stats from the daemon, we don't know the platform...
|
|
|
|
- io.WriteString(w, "Waiting for statistics...\n")
|
|
|
|
- case "windows":
|
|
|
|
- io.WriteString(w, "CONTAINER\tCPU %\tPRIV WORKING SET\tNET I/O\tBLOCK I/O\n")
|
|
|
|
- default:
|
|
|
|
- io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n")
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ var err error
|
|
for range time.Tick(500 * time.Millisecond) {
|
|
for range time.Tick(500 * time.Millisecond) {
|
|
- printHeader()
|
|
|
|
- toRemove := []string{}
|
|
|
|
- cStats.mu.Lock()
|
|
|
|
- for _, s := range cStats.cs {
|
|
|
|
- if err := s.Display(w); err != nil && !opts.noStream {
|
|
|
|
- logrus.Debugf("stats: got error for %s: %v", s.Name, err)
|
|
|
|
- if err == io.EOF {
|
|
|
|
- toRemove = append(toRemove, s.Name)
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- cStats.mu.Unlock()
|
|
|
|
- for _, name := range toRemove {
|
|
|
|
- cStats.remove(name)
|
|
|
|
|
|
+ cleanHeader()
|
|
|
|
+ cStats.mu.RLock()
|
|
|
|
+ csLen := len(cStats.cs)
|
|
|
|
+ if err = formatter.ContainerStatsWrite(statsCtx, cStats.cs); err != nil {
|
|
|
|
+ break
|
|
}
|
|
}
|
|
- if len(cStats.cs) == 0 && !showAll {
|
|
|
|
- return nil
|
|
|
|
|
|
+ cStats.mu.RUnlock()
|
|
|
|
+ if csLen == 0 && !showAll {
|
|
|
|
+ break
|
|
}
|
|
}
|
|
- w.Flush()
|
|
|
|
if opts.noStream {
|
|
if opts.noStream {
|
|
break
|
|
break
|
|
}
|
|
}
|
|
@@ -237,5 +227,5 @@ func runStats(dockerCli *command.DockerCli, opts *statsOptions) error {
|
|
// just skip
|
|
// just skip
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- return nil
|
|
|
|
|
|
+ return err
|
|
}
|
|
}
|