Implements stats for lxc driver

Implements stats and fixes stats test.

Signed-off-by: Abin Shahab <ashahab@altiscale.com> (github: ashahab-altiscale)
This commit is contained in:
Abin Shahab 2015-01-30 17:29:46 +00:00
parent 347a418f35
commit 1a26ed09ee
11 changed files with 212 additions and 123 deletions

View File

@ -2,13 +2,14 @@ package execdriver
import (
"errors"
"github.com/docker/docker/daemon/execdriver/native/template"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/devices"
"io"
"os"
"os/exec"
"strings"
"time"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/devices"
)
// Context is a generic key value pair that allows
@ -156,3 +157,71 @@ type Command struct {
LxcConfig []string `json:"lxc_config"`
AppArmorProfile string `json:"apparmor_profile"`
}
func InitContainer(c *Command) *libcontainer.Config {
container := template.New()
container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
container.Tty = c.ProcessConfig.Tty
container.User = c.ProcessConfig.User
container.WorkingDir = c.WorkingDir
container.Env = c.ProcessConfig.Env
container.Cgroups.Name = c.ID
container.Cgroups.AllowedDevices = c.AllowedDevices
container.MountConfig.DeviceNodes = c.AutoCreatedDevices
container.RootFs = c.Rootfs
container.MountConfig.ReadonlyFs = c.ReadonlyRootfs
// check to see if we are running in ramdisk to disable pivot root
container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
container.RestrictSys = true
return container
}
func getEnv(key string, env []string) string {
for _, pair := range env {
parts := strings.Split(pair, "=")
if parts[0] == key {
return parts[1]
}
}
return ""
}
func SetupCgroups(container *libcontainer.Config, c *Command) error {
if c.Resources != nil {
container.Cgroups.CpuShares = c.Resources.CpuShares
container.Cgroups.Memory = c.Resources.Memory
container.Cgroups.MemoryReservation = c.Resources.Memory
container.Cgroups.MemorySwap = c.Resources.MemorySwap
container.Cgroups.CpusetCpus = c.Resources.Cpuset
}
return nil
}
func Stats(stateFile string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
state, err := libcontainer.GetState(stateFile)
if err != nil {
if os.IsNotExist(err) {
return nil, ErrNotRunning
}
return nil, err
}
now := time.Now()
stats, err := libcontainer.GetStats(nil, state)
if err != nil {
return nil, err
}
// if the container does not have any memory limit specified set the
// limit to the machines memory
memoryLimit := containerMemoryLimit
if memoryLimit == 0 {
memoryLimit = machineMemory
}
return &ResourceStats{
Read: now,
ContainerStats: stats,
MemoryLimit: memoryLimit,
}, nil
}

View File

@ -12,17 +12,20 @@ import (
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/kr/pty"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
sysinfo "github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/utils"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/cgroups"
"github.com/docker/libcontainer/mount/nodes"
"github.com/docker/libcontainer/system"
"github.com/kr/pty"
)
const DriverName = "lxc"
@ -30,10 +33,18 @@ const DriverName = "lxc"
var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver")
type driver struct {
root string // root path for the driver to use
initPath string
apparmor bool
sharedRoot bool
root string // root path for the driver to use
initPath string
apparmor bool
sharedRoot bool
activeContainers map[string]*activeContainer
machineMemory int64
sync.Mutex
}
type activeContainer struct {
container *libcontainer.Config
cmd *exec.Cmd
}
func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
@ -41,12 +52,17 @@ func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
if err := linkLxcStart(root); err != nil {
return nil, err
}
meminfo, err := sysinfo.ReadMemInfo()
if err != nil {
return nil, err
}
return &driver{
apparmor: apparmor,
root: root,
initPath: initPath,
sharedRoot: rootIsShared(),
apparmor: apparmor,
root: root,
initPath: initPath,
sharedRoot: rootIsShared(),
activeContainers: make(map[string]*activeContainer),
machineMemory: meminfo.MemTotal,
}, nil
}
@ -57,8 +73,9 @@ func (d *driver) Name() string {
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
err error
term execdriver.Terminal
err error
dataPath = d.containerDir(c.ID)
)
if c.ProcessConfig.Tty {
@ -67,6 +84,16 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
c.ProcessConfig.Terminal = term
container, err := d.createContainer(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
cmd: &c.ProcessConfig.Cmd,
}
d.Unlock()
c.Mounts = append(c.Mounts, execdriver.Mount{
Source: d.initPath,
@ -108,15 +135,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
"--",
c.InitPath,
)
if c.Network.Interface != nil {
params = append(params,
"-g", c.Network.Interface.Gateway,
"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
)
}
params = append(params,
"-mtu", strconv.Itoa(c.Network.Mtu),
)
if c.ProcessConfig.User != "" {
params = append(params, "-u", c.ProcessConfig.User)
@ -186,25 +204,94 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
close(waitLock)
}()
// Poll lxc for RUNNING status
pid, err := d.waitForStart(c, waitLock)
if err != nil {
terminate := func(terr error) (execdriver.ExitStatus, error) {
if c.ProcessConfig.Process != nil {
c.ProcessConfig.Process.Kill()
c.ProcessConfig.Wait()
}
return execdriver.ExitStatus{ExitCode: -1}, err
return execdriver.ExitStatus{ExitCode: -1}, terr
}
// Poll lxc for RUNNING status
pid, err := d.waitForStart(c, waitLock)
if err != nil {
return terminate(err)
}
started, err := system.GetProcessStartTime(pid)
if err != nil {
return terminate(err)
}
cgroupPaths, err := cgroupPaths(c.ID)
if err != nil {
return terminate(err)
}
state := &libcontainer.State{
InitPid: pid,
InitStartTime: started,
CgroupPaths: cgroupPaths,
}
if err := libcontainer.SaveState(dataPath, state); err != nil {
return terminate(err)
}
c.ContainerPid = pid
if startCallback != nil {
log.Debugf("Invoking startCallback")
startCallback(&c.ProcessConfig, pid)
}
oomKill := false
oomKillNotification, err := libcontainer.NotifyOnOOM(state)
if err == nil {
_, oomKill = <-oomKillNotification
log.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)
} else {
log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
}
<-waitLock
return execdriver.ExitStatus{ExitCode: getExitCode(c)}, waitErr
// check oom error
exitCode := getExitCode(c)
if oomKill {
exitCode = 137
}
return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
}
// createContainer populates and configures the container type with the
// data provided by the execdriver.Command
func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) {
container := execdriver.InitContainer(c)
if err := execdriver.SetupCgroups(container, c); err != nil {
return nil, err
}
return container, nil
}
// Return an map of susbystem -> container cgroup
func cgroupPaths(containerId string) (map[string]string, error) {
subsystems, err := cgroups.GetAllSubsystems()
if err != nil {
return nil, err
}
log.Debugf("subsystems: %s", subsystems)
paths := make(map[string]string)
for _, subsystem := range subsystems {
cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem)
log.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir)
if err != nil {
//unsupported subystem
continue
}
path := filepath.Join(cgroupRoot, cgroupDir, "lxc", containerId)
paths[subsystem] = path
}
return paths, nil
}
/// Return the exit code of the process
@ -348,17 +435,25 @@ func (d *driver) Info(id string) execdriver.Info {
}
}
func findCgroupRootAndDir(subsystem string) (string, string, error) {
cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem)
if err != nil {
return "", "", err
}
cgroupDir, err := cgroups.GetThisCgroupDir(subsystem)
if err != nil {
return "", "", err
}
return cgroupRoot, cgroupDir, nil
}
func (d *driver) GetPidsForContainer(id string) ([]int, error) {
pids := []int{}
// cpu is chosen because it is the only non optional subsystem in cgroups
subsystem := "cpu"
cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem)
if err != nil {
return pids, err
}
cgroupDir, err := cgroups.GetThisCgroupDir(subsystem)
cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem)
if err != nil {
return pids, err
}
@ -418,8 +513,12 @@ func rootIsShared() bool {
return true
}
func (d *driver) containerDir(containerId string) string {
return path.Join(d.root, "containers", containerId)
}
func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
root := path.Join(d.root, "containers", c.ID, "config.lxc")
root := path.Join(d.containerDir(c.ID), "config.lxc")
fo, err := os.Create(root)
if err != nil {
@ -537,6 +636,5 @@ func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo
}
func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
return nil, fmt.Errorf("container stats are not supported with LXC")
return execdriver.Stats(d.containerDir(id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory)
}

View File

@ -79,11 +79,8 @@ func getArgs() *InitArgs {
var (
// Get cmdline arguments
user = flag.String("u", "", "username or uid")
gateway = flag.String("g", "", "gateway address")
ip = flag.String("i", "", "ip address")
workDir = flag.String("w", "", "workdir")
privileged = flag.Bool("privileged", false, "privileged mode")
mtu = flag.Int("mtu", 1500, "interface mtu")
capAdd = flag.String("cap-add", "", "capabilities to add")
capDrop = flag.String("cap-drop", "", "capabilities to drop")
)
@ -92,12 +89,9 @@ func getArgs() *InitArgs {
return &InitArgs{
User: *user,
Gateway: *gateway,
Ip: *ip,
WorkDir: *workDir,
Privileged: *privileged,
Args: flag.Args(),
Mtu: *mtu,
CapAdd: *capAdd,
CapDrop: *capDrop,
}

View File

@ -4,12 +4,10 @@ package native
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/execdriver/native/template"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/apparmor"
"github.com/docker/libcontainer/devices"
@ -20,22 +18,7 @@ import (
// createContainer populates and configures the container type with the
// data provided by the execdriver.Command
func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) {
container := template.New()
container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
container.Tty = c.ProcessConfig.Tty
container.User = c.ProcessConfig.User
container.WorkingDir = c.WorkingDir
container.Env = c.ProcessConfig.Env
container.Cgroups.Name = c.ID
container.Cgroups.AllowedDevices = c.AllowedDevices
container.MountConfig.DeviceNodes = c.AutoCreatedDevices
container.RootFs = c.Rootfs
container.MountConfig.ReadonlyFs = c.ReadonlyRootfs
// check to see if we are running in ramdisk to disable pivot root
container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
container.RestrictSys = true
container := execdriver.InitContainer(c)
if err := d.createIpc(container, c); err != nil {
return nil, err
@ -63,7 +46,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
container.AppArmorProfile = c.AppArmorProfile
}
if err := d.setupCgroups(container, c); err != nil {
if err := execdriver.SetupCgroups(container, c); err != nil {
return nil, err
}
@ -189,18 +172,6 @@ func (d *driver) setCapabilities(container *libcontainer.Config, c *execdriver.C
return err
}
func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error {
if c.Resources != nil {
container.Cgroups.CpuShares = c.Resources.CpuShares
container.Cgroups.Memory = c.Resources.Memory
container.Cgroups.MemoryReservation = c.Resources.Memory
container.Cgroups.MemorySwap = c.Resources.MemorySwap
container.Cgroups.CpusetCpus = c.Resources.Cpuset
}
return nil
}
func (d *driver) setupMounts(container *libcontainer.Config, c *execdriver.Command) error {
for _, m := range c.Mounts {
container.MountConfig.Mounts = append(container.MountConfig.Mounts, &mount.Mount{

View File

@ -11,10 +11,8 @@ import (
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
@ -291,40 +289,7 @@ func (d *driver) Clean(id string) error {
}
func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
c := d.activeContainers[id]
state, err := libcontainer.GetState(filepath.Join(d.root, id))
if err != nil {
if os.IsNotExist(err) {
return nil, execdriver.ErrNotRunning
}
return nil, err
}
now := time.Now()
stats, err := libcontainer.GetStats(nil, state)
if err != nil {
return nil, err
}
memoryLimit := c.container.Cgroups.Memory
// if the container does not have any memory limit specified set the
// limit to the machines memory
if memoryLimit == 0 {
memoryLimit = d.machineMemory
}
return &execdriver.ResourceStats{
Read: now,
ContainerStats: stats,
MemoryLimit: memoryLimit,
}, nil
}
func getEnv(key string, env []string) string {
for _, pair := range env {
parts := strings.Split(pair, "=")
if parts[0] == key {
return parts[1]
}
}
return ""
return execdriver.Stats(filepath.Join(d.root, id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory)
}
type TtyConsole struct {

View File

@ -13,8 +13,6 @@ CONTAINER [CONTAINER...]
Display a live stream of one or more containers' resource usage statistics
Note: this functionality currently only works when using the *libcontainer* exec-driver.
# OPTIONS
**--help**
Print usage statement

View File

@ -86,8 +86,6 @@ root filesystem as read only.
**New!**
This endpoint returns a live stream of a container's resource usage statistics.
> **Note**: this functionality currently only works when using the *libcontainer* exec-driver.
## v1.16

View File

@ -524,8 +524,6 @@ Status Codes:
This endpoint returns a live stream of a container's resource usage statistics.
> **Note**: this functionality currently only works when using the *libcontainer* exec-driver.
**Example request**:
GET /containers/redis1/stats HTTP/1.1

View File

@ -2007,8 +2007,6 @@ more details on finding shared images from the command line.
--help=false Print usage
> **Note**: this functionality currently only works when using the *libcontainer* exec-driver.
Running `docker stats` on multiple containers
$ sudo docker stats redis1 redis2

View File

@ -35,7 +35,7 @@ func TestEventsContainerFailStartDie(t *testing.T) {
out, _, _ := dockerCmd(t, "images", "-q")
image := strings.Split(out, "\n")[0]
eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg")
eventsCmd := exec.Command(dockerBinary, "run", "--name", "testeventdie", image, "blerg")
_, _, err := runCommandWithOutput(eventsCmd)
if err == nil {
t.Fatalf("Container run with command blerg should have failed, but it did not")

View File

@ -58,8 +58,8 @@ func TestHelpWidth(t *testing.T) {
if len(line) > 80 {
t.Fatalf("Line is too long(%d chars):\n%s", len(line), line)
}
if scanForHome && strings.Contains(line, `=`+home) {
t.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line)
if home != "" && strings.Contains(line, home) {
t.Fatalf("Help for %q should use ~ instead of %q on:\n%s", command, home, line)
}
if runtime.GOOS != "windows" {
i := strings.Index(line, homedir.GetShortcutString())