Quellcode durchsuchen

Implement client side display for stats

Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
Michael Crosby vor 10 Jahren
Ursprung
Commit
2640a10bca

+ 105 - 0
api/client/commands.go

@@ -16,6 +16,7 @@ import (
 	"path"
 	"path/filepath"
 	"runtime"
+	"sort"
 	"strconv"
 	"strings"
 	"text/tabwriter"
@@ -42,6 +43,7 @@ import (
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/stats"
 	"github.com/docker/docker/utils"
 	"github.com/docker/libtrust"
 )
@@ -2618,3 +2620,106 @@ func (cli *DockerCli) CmdExec(args ...string) error {
 
 	return nil
 }
+
+type containerStats struct {
+	Name             string
+	CpuPercentage    float64
+	Memory           float64
+	MemoryPercentage float64
+	NetworkRx        int
+	NetworkTx        int
+}
+
+type statSorter struct {
+	stats []containerStats
+}
+
+func (s *statSorter) Len() int {
+	return len(s.stats)
+}
+
+func (s *statSorter) Swap(i, j int) {
+	s.stats[i], s.stats[j] = s.stats[j], s.stats[i]
+}
+
+func (s *statSorter) Less(i, j int) bool {
+	return s.stats[i].Name < s.stats[j].Name
+}
+
+func (cli *DockerCli) CmdStats(args ...string) error {
+	cmd := cli.Subcmd("stats", "CONTAINER", "Stream the stats of a container", true)
+	cmd.Require(flag.Min, 1)
+	utils.ParseFlags(cmd, args, true)
+
+	cStats := map[string]containerStats{}
+	for _, name := range cmd.Args() {
+		go cli.streamStats(name, cStats)
+	}
+	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+	for _ = range time.Tick(1000 * time.Millisecond) {
+		fmt.Fprint(cli.out, "\033[2J")
+		fmt.Fprint(cli.out, "\033[H")
+		fmt.Fprintln(w, "CONTAINER\tCPU %\tMEM\tMEM %\tNET I/O")
+		sStats := []containerStats{}
+		for _, s := range cStats {
+			sStats = append(sStats, s)
+		}
+		sorter := &statSorter{sStats}
+		sort.Sort(sorter)
+		for _, s := range sStats {
+			fmt.Fprintf(w, "%s\t%f%%\t%s\t%f%%\t%d/%d\n",
+				s.Name,
+				s.CpuPercentage,
+				units.HumanSize(s.Memory),
+				s.MemoryPercentage,
+				s.NetworkRx, s.NetworkTx)
+		}
+		w.Flush()
+	}
+	return nil
+}
+
+func (cli *DockerCli) streamStats(name string, data map[string]containerStats) error {
+	stream, _, err := cli.call("GET", "/containers/"+name+"/stats", nil, false)
+	if err != nil {
+		return err
+	}
+
+	var (
+		previousCpu    uint64
+		previousSystem uint64
+		start          = true
+		dec            = json.NewDecoder(stream)
+	)
+	for {
+		var v *stats.Stats
+		if err := dec.Decode(&v); err != nil {
+			return err
+		}
+		memPercent := float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
+		cpuPercent := 0.0
+
+		if !start {
+			cpuDelta := float64(v.CpuStats.CpuUsage.TotalUsage) - float64(previousCpu)
+			systemDelta := float64(int(v.CpuStats.SystemUsage)/v.ClockTicks) - float64(int(previousSystem)/v.ClockTicks)
+
+			if systemDelta > 0.0 {
+				cpuPercent = (cpuDelta / systemDelta) * float64(v.ClockTicks*len(v.CpuStats.CpuUsage.PercpuUsage))
+			}
+		}
+		start = false
+		d := data[name]
+		d.Name = name
+		d.CpuPercentage = cpuPercent
+		d.Memory = float64(v.MemoryStats.Usage)
+		d.MemoryPercentage = memPercent
+		d.NetworkRx = int(v.Network.RxBytes)
+		d.NetworkTx = int(v.Network.TxBytes)
+		data[name] = d
+
+		previousCpu = v.CpuStats.CpuUsage.TotalUsage
+		previousSystem = v.CpuStats.SystemUsage
+	}
+	return nil
+
+}

+ 4 - 2
daemon/execdriver/driver.go

@@ -106,8 +106,10 @@ type Resources struct {
 
 type ResourceStats struct {
 	*libcontainer.ContainerStats
-	Read       time.Time `json:"read"`
-	ClockTicks int       `json:"clock_ticks"`
+	Read        time.Time `json:"read"`
+	ClockTicks  int       `json:"clock_ticks"`
+	MemoryLimit int64     `json:"memory_limit"`
+	SystemUsage uint64    `json:"system_usage"`
 }
 
 type Mount struct {

+ 9 - 2
daemon/execdriver/execdrivers/execdrivers.go

@@ -2,14 +2,21 @@ package execdrivers
 
 import (
 	"fmt"
+	"path"
+
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/daemon/execdriver/native"
 	"github.com/docker/docker/pkg/sysinfo"
-	"path"
+	"github.com/docker/docker/pkg/system"
 )
 
 func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
+	meminfo, err := system.ReadMemInfo()
+	if err != nil {
+		return nil, err
+	}
+
 	switch name {
 	case "lxc":
 		// we want to give the lxc driver the full docker root because it needs
@@ -17,7 +24,7 @@ func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdrive
 		// to be backwards compatible
 		return lxc.NewDriver(root, initPath, sysInfo.AppArmor)
 	case "native":
-		return native.NewDriver(path.Join(root, "execdriver", "native"), initPath)
+		return native.NewDriver(path.Join(root, "execdriver", "native"), initPath, meminfo.MemTotal/1000)
 	}
 	return nil, fmt.Errorf("unknown exec driver %s", name)
 }

+ 9 - 3
daemon/execdriver/native/driver.go

@@ -42,23 +42,23 @@ type driver struct {
 	root             string
 	initPath         string
 	activeContainers map[string]*activeContainer
+	machineMemory    int64
 	sync.Mutex
 }
 
-func NewDriver(root, initPath string) (*driver, error) {
+func NewDriver(root, initPath string, machineMemory int64) (*driver, error) {
 	if err := os.MkdirAll(root, 0700); err != nil {
 		return nil, err
 	}
-
 	// native driver root is at docker_root/execdriver/native. Put apparmor at docker_root
 	if err := apparmor.InstallDefaultProfile(); err != nil {
 		return nil, err
 	}
-
 	return &driver{
 		root:             root,
 		initPath:         initPath,
 		activeContainers: make(map[string]*activeContainer),
+		machineMemory:    machineMemory,
 	}, nil
 }
 
@@ -281,6 +281,7 @@ func (d *driver) Clean(id string) error {
 }
 
 func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
+	c := d.activeContainers[id]
 	state, err := libcontainer.GetState(filepath.Join(d.root, id))
 	if err != nil {
 		return nil, err
@@ -290,10 +291,15 @@ func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
 	if err != nil {
 		return nil, err
 	}
+	memoryLimit := c.container.Cgroups.Memory
+	if memoryLimit == 0 {
+		memoryLimit = d.machineMemory
+	}
 	return &execdriver.ResourceStats{
 		ContainerStats: stats,
 		ClockTicks:     system.GetClockTicks(),
 		Read:           now,
+		MemoryLimit:    memoryLimit,
 	}, nil
 }
 

+ 13 - 3
daemon/start.go

@@ -8,6 +8,7 @@ import (
 
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/stats"
 )
 
 func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
@@ -80,15 +81,24 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
 }
 
 func (daemon *Daemon) ContainerStats(job *engine.Job) engine.Status {
-	stats, err := daemon.SubscribeToContainerStats(job.Args[0])
+	s, err := daemon.SubscribeToContainerStats(job.Args[0])
 	if err != nil {
 		return job.Error(err)
 	}
 	enc := json.NewEncoder(job.Stdout)
-	for update := range stats {
-		if err := enc.Encode(update); err != nil {
+	for update := range s {
+		ss := stats.ToStats(update.ContainerStats)
+		ss.MemoryStats.Limit = uint64(update.MemoryLimit)
+		ss.Read = update.Read
+		ss.ClockTicks = update.ClockTicks
+		ss.CpuStats.SystemUsage = update.SystemUsage
+		if err := enc.Encode(ss); err != nil {
 			return job.Error(err)
 		}
 	}
 	return engine.StatusOK
 }
+
+func mapToAPIStats() {
+
+}

+ 44 - 0
daemon/stats_collector.go

@@ -1,6 +1,11 @@
 package daemon
 
 import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
 	"sync"
 	"time"
 
@@ -55,12 +60,18 @@ func (s *statsCollector) start() {
 			log.Debugf("starting collection of container stats")
 			s.m.Lock()
 			for id, d := range s.containers {
+				systemUsage, err := getSystemCpuUsage()
+				if err != nil {
+					log.Errorf("collecting system cpu usage for %s: %v", id, err)
+					continue
+				}
 				stats, err := d.c.Stats()
 				if err != nil {
 					// TODO: @crosbymichael evict container depending on error
 					log.Errorf("collecting stats for %s: %v", id, err)
 					continue
 				}
+				stats.SystemUsage = systemUsage
 				for _, sub := range s.containers[id].subs {
 					sub <- stats
 				}
@@ -69,3 +80,36 @@ func (s *statsCollector) start() {
 		}
 	}()
 }
+
+// returns value in nanoseconds
+func getSystemCpuUsage() (uint64, error) {
+	f, err := os.Open("/proc/stat")
+	if err != nil {
+		return 0, err
+	}
+	defer f.Close()
+
+	sc := bufio.NewScanner(f)
+	for sc.Scan() {
+		parts := strings.Fields(sc.Text())
+		switch parts[0] {
+		case "cpu":
+			if len(parts) < 8 {
+				return 0, fmt.Errorf("invalid number of cpu fields")
+			}
+
+			var total uint64
+			for _, i := range parts[1:8] {
+				v, err := strconv.ParseUint(i, 10, 64)
+				if err != nil {
+					return 0.0, fmt.Errorf("Unable to convert value %s to int: %s", i, err)
+				}
+				total += v
+			}
+			return total * 1000000000, nil
+		default:
+			continue
+		}
+	}
+	return 0, fmt.Errorf("invalid stat format")
+}

+ 156 - 0
stats/stats.go

@@ -0,0 +1,156 @@
+package stats
+
+import (
+	"time"
+
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/cgroups"
+)
+
+type ThrottlingData struct {
+	// Number of periods with throttling active
+	Periods uint64 `json:"periods,omitempty"`
+	// Number of periods when the container hit its throttling limit.
+	ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+	// Aggregate time the container was throttled for in nanoseconds.
+	ThrottledTime uint64 `json:"throttled_time,omitempty"`
+}
+
+// All CPU stats are aggregate since container inception.
+type CpuUsage struct {
+	// Total CPU time consumed.
+	// Units: nanoseconds.
+	TotalUsage uint64 `json:"total_usage,omitempty"`
+	// Total CPU time consumed per core.
+	// Units: nanoseconds.
+	PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+	// Time spent by tasks of the cgroup in kernel mode.
+	// Units: nanoseconds.
+	UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+	// Time spent by tasks of the cgroup in user mode.
+	// Units: nanoseconds.
+	UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+type CpuStats struct {
+	CpuUsage       CpuUsage       `json:"cpu_usage,omitempty"`
+	SystemUsage    uint64         `json:"system_cpu_usage"`
+	ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+type MemoryStats struct {
+	// current res_counter usage for memory
+	Usage uint64 `json:"usage,omitempty"`
+	// maximum usage ever recorded.
+	MaxUsage uint64 `json:"max_usage,omitempty"`
+	// TODO(vishh): Export these as stronger types.
+	// all the stats exported via memory.stat.
+	Stats map[string]uint64 `json:"stats,omitempty"`
+	// number of times memory usage hits limits.
+	Failcnt uint64 `json:"failcnt"`
+	Limit   uint64 `json:"limit"`
+}
+
+type BlkioStatEntry struct {
+	Major uint64 `json:"major,omitempty"`
+	Minor uint64 `json:"minor,omitempty"`
+	Op    string `json:"op,omitempty"`
+	Value uint64 `json:"value,omitempty"`
+}
+
+type BlkioStats struct {
+	// number of bytes tranferred to and from the block device
+	IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
+	IoServicedRecursive     []BlkioStatEntry `json:"io_serviced_recursive,omitempty"`
+	IoQueuedRecursive       []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
+	IoServiceTimeRecursive  []BlkioStatEntry `json:"io_service_time_recursive,omitempty"`
+	IoWaitTimeRecursive     []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"`
+	IoMergedRecursive       []BlkioStatEntry `json:"io_merged_recursive,omitempty"`
+	IoTimeRecursive         []BlkioStatEntry `json:"io_time_recursive,omitempty"`
+	SectorsRecursive        []BlkioStatEntry `json:"sectors_recursive,omitempty"`
+}
+
+type Network struct {
+	RxBytes   uint64 `json:"rx_bytes"`
+	RxPackets uint64 `json:"rx_packets"`
+	RxErrors  uint64 `json:"rx_errors"`
+	RxDropped uint64 `json:"rx_dropped"`
+	TxBytes   uint64 `json:"tx_bytes"`
+	TxPackets uint64 `json:"tx_packets"`
+	TxErrors  uint64 `json:"tx_errors"`
+	TxDropped uint64 `json:"tx_dropped"`
+}
+
+type Stats struct {
+	Read        time.Time   `json:"read"`
+	ClockTicks  int         `json:"clock_ticks"`
+	Interval    int         `json:"interval"` // in ms
+	Network     Network     `json:"network,omitempty"`
+	CpuStats    CpuStats    `json:"cpu_stats,omitempty"`
+	MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+	BlkioStats  BlkioStats  `json:"blkio_stats,omitempty"`
+}
+
+func ToStats(ls *libcontainer.ContainerStats) *Stats {
+	s := &Stats{}
+	if ls.NetworkStats != nil {
+		s.Network = Network{
+			RxBytes:   ls.NetworkStats.RxBytes,
+			RxPackets: ls.NetworkStats.RxPackets,
+			RxErrors:  ls.NetworkStats.RxErrors,
+			RxDropped: ls.NetworkStats.RxDropped,
+			TxBytes:   ls.NetworkStats.TxBytes,
+			TxPackets: ls.NetworkStats.TxPackets,
+			TxErrors:  ls.NetworkStats.TxErrors,
+			TxDropped: ls.NetworkStats.TxDropped,
+		}
+	}
+	cs := ls.CgroupStats
+	if cs != nil {
+		s.BlkioStats = BlkioStats{
+			IoServiceBytesRecursive: copyBlkioEntry(cs.BlkioStats.IoServiceBytesRecursive),
+			IoServicedRecursive:     copyBlkioEntry(cs.BlkioStats.IoServicedRecursive),
+			IoQueuedRecursive:       copyBlkioEntry(cs.BlkioStats.IoQueuedRecursive),
+			IoServiceTimeRecursive:  copyBlkioEntry(cs.BlkioStats.IoServiceTimeRecursive),
+			IoWaitTimeRecursive:     copyBlkioEntry(cs.BlkioStats.IoWaitTimeRecursive),
+			IoMergedRecursive:       copyBlkioEntry(cs.BlkioStats.IoMergedRecursive),
+			IoTimeRecursive:         copyBlkioEntry(cs.BlkioStats.IoTimeRecursive),
+			SectorsRecursive:        copyBlkioEntry(cs.BlkioStats.SectorsRecursive),
+		}
+		cpu := cs.CpuStats
+		s.CpuStats = CpuStats{
+			CpuUsage: CpuUsage{
+				TotalUsage:        cpu.CpuUsage.TotalUsage,
+				PercpuUsage:       cpu.CpuUsage.PercpuUsage,
+				UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode,
+				UsageInUsermode:   cpu.CpuUsage.UsageInUsermode,
+			},
+			ThrottlingData: ThrottlingData{
+				Periods:          cpu.ThrottlingData.Periods,
+				ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods,
+				ThrottledTime:    cpu.ThrottlingData.ThrottledTime,
+			},
+		}
+		mem := cs.MemoryStats
+		s.MemoryStats = MemoryStats{
+			Usage:    mem.Usage,
+			MaxUsage: mem.MaxUsage,
+			Stats:    mem.Stats,
+			Failcnt:  mem.Failcnt,
+		}
+	}
+	return s
+}
+
+func copyBlkioEntry(entries []cgroups.BlkioStatEntry) []BlkioStatEntry {
+	out := make([]BlkioStatEntry, len(entries))
+	for i, re := range entries {
+		out[i] = BlkioStatEntry{
+			Major: re.Major,
+			Minor: re.Minor,
+			Op:    re.Op,
+			Value: re.Value,
+		}
+	}
+	return out
+}