|
@@ -2614,7 +2614,12 @@ type containerStats struct {
|
|
err error
|
|
err error
|
|
}
|
|
}
|
|
|
|
|
|
-func (s *containerStats) Collect(stream io.ReadCloser) {
|
|
|
|
|
|
+func (s *containerStats) Collect(cli *DockerCli) {
|
|
|
|
+ stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats", nil, false)
|
|
|
|
+ if err != nil {
|
|
|
|
+ s.err = err
|
|
|
|
+ return
|
|
|
|
+ }
|
|
defer stream.Close()
|
|
defer stream.Close()
|
|
var (
|
|
var (
|
|
previousCpu uint64
|
|
previousCpu uint64
|
|
@@ -2694,28 +2699,44 @@ func (cli *DockerCli) CmdStats(args ...string) error {
|
|
|
|
|
|
names := cmd.Args()
|
|
names := cmd.Args()
|
|
sort.Strings(names)
|
|
sort.Strings(names)
|
|
- var cStats []*containerStats
|
|
|
|
|
|
+ var (
|
|
|
|
+ cStats []*containerStats
|
|
|
|
+ w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
|
|
|
+ )
|
|
|
|
+ printHeader := func() {
|
|
|
|
+ fmt.Fprint(cli.out, "\033[2J")
|
|
|
|
+ fmt.Fprint(cli.out, "\033[H")
|
|
|
|
+ fmt.Fprintln(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O")
|
|
|
|
+ }
|
|
for _, n := range names {
|
|
for _, n := range names {
|
|
s := &containerStats{Name: n}
|
|
s := &containerStats{Name: n}
|
|
cStats = append(cStats, s)
|
|
cStats = append(cStats, s)
|
|
- stream, _, err := cli.call("GET", "/containers/"+n+"/stats", nil, false)
|
|
|
|
- if err != nil {
|
|
|
|
- return err
|
|
|
|
|
|
+ go s.Collect(cli)
|
|
|
|
+ }
|
|
|
|
+ // do a quick pause so that any failed connections for containers that do not exist are able to be
|
|
|
|
+ // evicted before we display the initial or default values.
|
|
|
|
+ time.Sleep(500 * time.Millisecond)
|
|
|
|
+ var errs []string
|
|
|
|
+ for _, c := range cStats {
|
|
|
|
+ c.mu.Lock()
|
|
|
|
+ if c.err != nil {
|
|
|
|
+ errs = append(errs, fmt.Sprintf("%s: %s", c.Name, c.err.Error()))
|
|
}
|
|
}
|
|
- go s.Collect(stream)
|
|
|
|
|
|
+ c.mu.Unlock()
|
|
|
|
+ }
|
|
|
|
+ if len(errs) > 0 {
|
|
|
|
+ return fmt.Errorf("%s", strings.Join(errs, ", "))
|
|
}
|
|
}
|
|
- w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
|
|
|
for _ = range time.Tick(500 * time.Millisecond) {
|
|
for _ = range time.Tick(500 * time.Millisecond) {
|
|
- fmt.Fprint(cli.out, "\033[2J")
|
|
|
|
- fmt.Fprint(cli.out, "\033[H")
|
|
|
|
- fmt.Fprintln(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O")
|
|
|
|
|
|
+ printHeader()
|
|
toRemove := []int{}
|
|
toRemove := []int{}
|
|
for i, s := range cStats {
|
|
for i, s := range cStats {
|
|
if err := s.Display(w); err != nil {
|
|
if err := s.Display(w); err != nil {
|
|
toRemove = append(toRemove, i)
|
|
toRemove = append(toRemove, i)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- for _, i := range toRemove {
|
|
|
|
|
|
+ for j := len(toRemove) - 1; j >= 0; j-- {
|
|
|
|
+ i := toRemove[j]
|
|
cStats = append(cStats[:i], cStats[i+1:]...)
|
|
cStats = append(cStats[:i], cStats[i+1:]...)
|
|
}
|
|
}
|
|
if len(cStats) == 0 {
|
|
if len(cStats) == 0 {
|