2015-03-24 23:57:23 -04:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"text/tabwriter"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/docker/docker/api/types"
|
2015-12-28 15:15:34 -05:00
|
|
|
"github.com/docker/docker/api/types/events"
|
|
|
|
"github.com/docker/docker/api/types/filters"
|
2015-05-05 00:18:28 -04:00
|
|
|
Cli "github.com/docker/docker/cli"
|
2015-12-16 12:26:49 -05:00
|
|
|
"github.com/docker/go-units"
|
2015-03-24 23:57:23 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
type containerStats struct {
|
|
|
|
Name string
|
2015-03-25 22:31:29 -04:00
|
|
|
CPUPercentage float64
|
2015-03-24 23:57:23 -04:00
|
|
|
Memory float64
|
|
|
|
MemoryLimit float64
|
|
|
|
MemoryPercentage float64
|
|
|
|
NetworkRx float64
|
|
|
|
NetworkTx float64
|
2015-07-26 04:14:00 -04:00
|
|
|
BlockRead float64
|
|
|
|
BlockWrite float64
|
2015-03-24 23:57:23 -04:00
|
|
|
mu sync.RWMutex
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2015-10-03 08:53:25 -04:00
|
|
|
type stats struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
cs []*containerStats
|
|
|
|
}
|
|
|
|
|
2015-02-13 11:45:04 -05:00
|
|
|
func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
|
2015-12-06 02:34:23 -05:00
|
|
|
responseBody, err := cli.client.ContainerStats(s.Name, streamStats)
|
2015-03-24 23:57:23 -04:00
|
|
|
if err != nil {
|
2015-05-24 16:26:50 -04:00
|
|
|
s.mu.Lock()
|
2015-03-24 23:57:23 -04:00
|
|
|
s.err = err
|
2015-05-24 16:26:50 -04:00
|
|
|
s.mu.Unlock()
|
2015-03-24 23:57:23 -04:00
|
|
|
return
|
|
|
|
}
|
2015-12-06 02:34:23 -05:00
|
|
|
defer responseBody.Close()
|
2015-07-03 02:19:23 -04:00
|
|
|
|
2015-03-24 23:57:23 -04:00
|
|
|
var (
|
2015-03-25 22:31:29 -04:00
|
|
|
previousCPU uint64
|
2015-03-24 23:57:23 -04:00
|
|
|
previousSystem uint64
|
2015-12-06 02:34:23 -05:00
|
|
|
dec = json.NewDecoder(responseBody)
|
2015-03-24 23:57:23 -04:00
|
|
|
u = make(chan error, 1)
|
|
|
|
)
|
|
|
|
go func() {
|
|
|
|
for {
|
2015-08-24 05:17:15 -04:00
|
|
|
var v *types.StatsJSON
|
2015-03-24 23:57:23 -04:00
|
|
|
if err := dec.Decode(&v); err != nil {
|
|
|
|
u <- err
|
|
|
|
return
|
|
|
|
}
|
2015-07-08 16:17:53 -04:00
|
|
|
|
|
|
|
var memPercent = 0.0
|
|
|
|
var cpuPercent = 0.0
|
|
|
|
|
2015-12-10 01:57:25 -05:00
|
|
|
// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
|
2015-07-08 16:17:53 -04:00
|
|
|
// got any data from cgroup
|
|
|
|
if v.MemoryStats.Limit != 0 {
|
2015-03-24 23:57:23 -04:00
|
|
|
memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
|
2015-07-08 16:17:53 -04:00
|
|
|
}
|
|
|
|
|
2015-07-23 05:40:54 -04:00
|
|
|
previousCPU = v.PreCPUStats.CPUUsage.TotalUsage
|
|
|
|
previousSystem = v.PreCPUStats.SystemUsage
|
2015-05-30 13:25:51 -04:00
|
|
|
cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v)
|
2015-07-26 04:14:00 -04:00
|
|
|
blkRead, blkWrite := calculateBlockIO(v.BlkioStats)
|
2015-03-24 23:57:23 -04:00
|
|
|
s.mu.Lock()
|
2015-03-25 22:31:29 -04:00
|
|
|
s.CPUPercentage = cpuPercent
|
2015-03-24 23:57:23 -04:00
|
|
|
s.Memory = float64(v.MemoryStats.Usage)
|
|
|
|
s.MemoryLimit = float64(v.MemoryStats.Limit)
|
|
|
|
s.MemoryPercentage = memPercent
|
2015-08-24 05:17:15 -04:00
|
|
|
s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks)
|
2015-07-26 04:14:00 -04:00
|
|
|
s.BlockRead = float64(blkRead)
|
|
|
|
s.BlockWrite = float64(blkWrite)
|
2015-03-24 23:57:23 -04:00
|
|
|
s.mu.Unlock()
|
|
|
|
u <- nil
|
2015-02-13 11:45:04 -05:00
|
|
|
if !streamStats {
|
|
|
|
return
|
|
|
|
}
|
2015-03-24 23:57:23 -04:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(2 * time.Second):
|
|
|
|
// zero out the values if we have not received an update within
|
|
|
|
// the specified duration.
|
|
|
|
s.mu.Lock()
|
2015-03-25 22:31:29 -04:00
|
|
|
s.CPUPercentage = 0
|
2015-03-24 23:57:23 -04:00
|
|
|
s.Memory = 0
|
|
|
|
s.MemoryPercentage = 0
|
2015-09-18 03:24:13 -04:00
|
|
|
s.MemoryLimit = 0
|
|
|
|
s.NetworkRx = 0
|
|
|
|
s.NetworkTx = 0
|
|
|
|
s.BlockRead = 0
|
|
|
|
s.BlockWrite = 0
|
2015-03-24 23:57:23 -04:00
|
|
|
s.mu.Unlock()
|
|
|
|
case err := <-u:
|
|
|
|
if err != nil {
|
|
|
|
s.mu.Lock()
|
|
|
|
s.err = err
|
|
|
|
s.mu.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-02-13 11:45:04 -05:00
|
|
|
if !streamStats {
|
|
|
|
return
|
|
|
|
}
|
2015-03-24 23:57:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *containerStats) Display(w io.Writer) error {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
if s.err != nil {
|
|
|
|
return s.err
|
|
|
|
}
|
2015-07-26 04:14:00 -04:00
|
|
|
fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\n",
|
2015-03-24 23:57:23 -04:00
|
|
|
s.Name,
|
2015-03-25 22:31:29 -04:00
|
|
|
s.CPUPercentage,
|
2015-04-08 00:25:41 -04:00
|
|
|
units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit),
|
2015-03-24 23:57:23 -04:00
|
|
|
s.MemoryPercentage,
|
2015-07-26 04:14:00 -04:00
|
|
|
units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx),
|
|
|
|
units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite))
|
2015-03-24 23:57:23 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-03-25 13:34:41 -04:00
|
|
|
// CmdStats displays a live stream of resource usage statistics for one or more containers.
|
|
|
|
//
|
|
|
|
// This shows real-time information on CPU usage, memory usage, and network I/O.
|
|
|
|
//
|
2015-10-03 08:53:25 -04:00
|
|
|
// Usage: docker stats [OPTIONS] [CONTAINER...]
|
2015-03-24 23:57:23 -04:00
|
|
|
func (cli *DockerCli) CmdStats(args ...string) error {
|
2015-10-03 08:53:25 -04:00
|
|
|
cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true)
|
|
|
|
all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)")
|
2015-02-13 11:45:04 -05:00
|
|
|
noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result")
|
2015-07-03 05:26:09 -04:00
|
|
|
|
2015-03-28 21:22:46 -04:00
|
|
|
cmd.ParseFlags(args, true)
|
2015-03-24 23:57:23 -04:00
|
|
|
|
|
|
|
names := cmd.Args()
|
2015-10-03 08:53:25 -04:00
|
|
|
showAll := len(names) == 0
|
|
|
|
|
|
|
|
if showAll {
|
2015-12-06 02:34:23 -05:00
|
|
|
options := types.ContainerListOptions{
|
|
|
|
All: *all,
|
2015-10-03 08:53:25 -04:00
|
|
|
}
|
2015-12-06 02:34:23 -05:00
|
|
|
cs, err := cli.client.ContainerList(options)
|
2015-10-03 08:53:25 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, c := range cs {
|
|
|
|
names = append(names, c.ID[:12])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(names) == 0 && !showAll {
|
|
|
|
return fmt.Errorf("No containers found")
|
|
|
|
}
|
2015-03-24 23:57:23 -04:00
|
|
|
sort.Strings(names)
|
2015-10-03 08:53:25 -04:00
|
|
|
|
2015-03-24 23:57:23 -04:00
|
|
|
var (
|
2015-10-03 08:53:25 -04:00
|
|
|
cStats = stats{}
|
2015-03-24 23:57:23 -04:00
|
|
|
w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
|
|
|
)
|
|
|
|
printHeader := func() {
|
2015-02-13 11:45:04 -05:00
|
|
|
if !*noStream {
|
|
|
|
fmt.Fprint(cli.out, "\033[2J")
|
|
|
|
fmt.Fprint(cli.out, "\033[H")
|
|
|
|
}
|
2015-07-26 04:14:00 -04:00
|
|
|
io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\n")
|
2015-03-24 23:57:23 -04:00
|
|
|
}
|
|
|
|
for _, n := range names {
|
|
|
|
s := &containerStats{Name: n}
|
2015-10-03 08:53:25 -04:00
|
|
|
// no need to lock here since only the main goroutine is running here
|
|
|
|
cStats.cs = append(cStats.cs, s)
|
2015-02-13 11:45:04 -05:00
|
|
|
go s.Collect(cli, !*noStream)
|
2015-03-24 23:57:23 -04:00
|
|
|
}
|
2015-10-03 08:53:25 -04:00
|
|
|
closeChan := make(chan error)
|
|
|
|
if showAll {
|
|
|
|
type watch struct {
|
|
|
|
cid string
|
|
|
|
event string
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
getNewContainers := func(c chan<- watch) {
|
2015-12-28 15:15:34 -05:00
|
|
|
f := filters.NewArgs()
|
|
|
|
f.Add("type", "container")
|
|
|
|
options := types.EventsOptions{
|
|
|
|
Filters: f,
|
|
|
|
}
|
2015-12-06 02:34:23 -05:00
|
|
|
resBody, err := cli.client.Events(options)
|
2015-10-03 08:53:25 -04:00
|
|
|
if err != nil {
|
|
|
|
c <- watch{err: err}
|
|
|
|
return
|
|
|
|
}
|
2015-12-06 02:34:23 -05:00
|
|
|
defer resBody.Close()
|
2015-10-03 08:53:25 -04:00
|
|
|
|
2015-12-28 15:15:34 -05:00
|
|
|
decodeEvents(resBody, func(event events.Message, err error) error {
|
|
|
|
if err != nil {
|
2015-10-03 08:53:25 -04:00
|
|
|
c <- watch{err: err}
|
2015-12-28 15:15:34 -05:00
|
|
|
return nil
|
2015-10-03 08:53:25 -04:00
|
|
|
}
|
2015-12-28 15:15:34 -05:00
|
|
|
|
|
|
|
c <- watch{event.ID[:12], event.Action, nil}
|
|
|
|
return nil
|
|
|
|
})
|
2015-10-03 08:53:25 -04:00
|
|
|
}
|
|
|
|
go func(stopChan chan<- error) {
|
|
|
|
cChan := make(chan watch)
|
|
|
|
go getNewContainers(cChan)
|
|
|
|
for {
|
|
|
|
c := <-cChan
|
|
|
|
if c.err != nil {
|
|
|
|
stopChan <- c.err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch c.event {
|
|
|
|
case "create":
|
|
|
|
s := &containerStats{Name: c.cid}
|
|
|
|
cStats.mu.Lock()
|
|
|
|
cStats.cs = append(cStats.cs, s)
|
|
|
|
cStats.mu.Unlock()
|
|
|
|
go s.Collect(cli, !*noStream)
|
|
|
|
case "stop":
|
|
|
|
case "die":
|
|
|
|
if !*all {
|
|
|
|
var remove int
|
|
|
|
// cStats cannot be O(1) with a map cause ranging over it would cause
|
|
|
|
// containers in stats to move up and down in the list...:(
|
|
|
|
cStats.mu.Lock()
|
|
|
|
for i, s := range cStats.cs {
|
|
|
|
if s.Name == c.cid {
|
|
|
|
remove = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cStats.cs = append(cStats.cs[:remove], cStats.cs[remove+1:]...)
|
|
|
|
cStats.mu.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}(closeChan)
|
|
|
|
} else {
|
|
|
|
close(closeChan)
|
|
|
|
}
|
2015-03-24 23:57:23 -04:00
|
|
|
// do a quick pause so that any failed connections for containers that do not exist are able to be
|
|
|
|
// evicted before we display the initial or default values.
|
2015-05-30 13:25:51 -04:00
|
|
|
time.Sleep(1500 * time.Millisecond)
|
2015-03-24 23:57:23 -04:00
|
|
|
var errs []string
|
2015-10-03 08:53:25 -04:00
|
|
|
cStats.mu.Lock()
|
|
|
|
for _, c := range cStats.cs {
|
2015-03-24 23:57:23 -04:00
|
|
|
c.mu.Lock()
|
|
|
|
if c.err != nil {
|
|
|
|
errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err))
|
|
|
|
}
|
|
|
|
c.mu.Unlock()
|
|
|
|
}
|
2015-10-03 08:53:25 -04:00
|
|
|
cStats.mu.Unlock()
|
2015-03-24 23:57:23 -04:00
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf("%s", strings.Join(errs, ", "))
|
|
|
|
}
|
2015-04-20 04:08:01 -04:00
|
|
|
for range time.Tick(500 * time.Millisecond) {
|
2015-03-24 23:57:23 -04:00
|
|
|
printHeader()
|
|
|
|
toRemove := []int{}
|
2015-10-03 08:53:25 -04:00
|
|
|
cStats.mu.Lock()
|
|
|
|
for i, s := range cStats.cs {
|
2015-02-13 11:45:04 -05:00
|
|
|
if err := s.Display(w); err != nil && !*noStream {
|
2015-03-24 23:57:23 -04:00
|
|
|
toRemove = append(toRemove, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for j := len(toRemove) - 1; j >= 0; j-- {
|
|
|
|
i := toRemove[j]
|
2015-10-03 08:53:25 -04:00
|
|
|
cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...)
|
2015-03-24 23:57:23 -04:00
|
|
|
}
|
2015-10-03 08:53:25 -04:00
|
|
|
if len(cStats.cs) == 0 && !showAll {
|
2015-03-24 23:57:23 -04:00
|
|
|
return nil
|
|
|
|
}
|
2015-10-03 08:53:25 -04:00
|
|
|
cStats.mu.Unlock()
|
2015-03-24 23:57:23 -04:00
|
|
|
w.Flush()
|
2015-02-13 11:45:04 -05:00
|
|
|
if *noStream {
|
|
|
|
break
|
|
|
|
}
|
2015-10-03 08:53:25 -04:00
|
|
|
select {
|
|
|
|
case err, ok := <-closeChan:
|
|
|
|
if ok {
|
|
|
|
if err != nil {
|
|
|
|
// this is suppressing "unexpected EOF" in the cli when the
|
2015-12-10 01:57:25 -05:00
|
|
|
// daemon restarts so it shutdowns cleanly
|
2015-10-03 08:53:25 -04:00
|
|
|
if err == io.ErrUnexpectedEOF {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
// just skip
|
|
|
|
}
|
2015-03-24 23:57:23 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-24 05:17:15 -04:00
|
|
|
func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
|
2015-03-24 23:57:23 -04:00
|
|
|
var (
|
|
|
|
cpuPercent = 0.0
|
|
|
|
// calculate the change for the cpu usage of the container in between readings
|
2015-11-02 01:51:05 -05:00
|
|
|
cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU)
|
2015-03-24 23:57:23 -04:00
|
|
|
// calculate the change for the entire system between readings
|
2015-11-02 01:51:05 -05:00
|
|
|
systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem)
|
2015-03-24 23:57:23 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
2015-07-23 05:40:54 -04:00
|
|
|
cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
2015-03-24 23:57:23 -04:00
|
|
|
}
|
|
|
|
return cpuPercent
|
|
|
|
}
|
2015-07-26 04:14:00 -04:00
|
|
|
|
|
|
|
func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) {
|
|
|
|
for _, bioEntry := range blkio.IoServiceBytesRecursive {
|
|
|
|
switch strings.ToLower(bioEntry.Op) {
|
|
|
|
case "read":
|
2015-08-19 23:46:21 -04:00
|
|
|
blkRead = blkRead + bioEntry.Value
|
2015-07-26 04:14:00 -04:00
|
|
|
case "write":
|
2015-08-19 23:46:21 -04:00
|
|
|
blkWrite = blkWrite + bioEntry.Value
|
2015-07-26 04:14:00 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2015-08-24 05:17:15 -04:00
|
|
|
|
|
|
|
func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) {
|
|
|
|
var rx, tx float64
|
|
|
|
|
|
|
|
for _, v := range network {
|
|
|
|
rx += float64(v.RxBytes)
|
|
|
|
tx += float64(v.TxBytes)
|
|
|
|
}
|
|
|
|
return rx, tx
|
|
|
|
}
|