mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
51576069ad
Separate integration tests
1281 lines
30 KiB
Go
1281 lines
30 KiB
Go
package utils
|
|
|
|
import (
|
|
"bytes"
|
|
"crypto/sha1"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"index/suffixarray"
|
|
"io"
|
|
"io/ioutil"
|
|
"net/http"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"regexp"
|
|
"runtime"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
)
|
|
|
|
var (
|
|
IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary
|
|
INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary
|
|
)
|
|
|
|
// A common interface to access the Fatal method of
|
|
// both testing.B and testing.T.
|
|
type Fataler interface {
|
|
Fatal(args ...interface{})
|
|
}
|
|
|
|
// ListOpts type
|
|
type ListOpts []string
|
|
|
|
func (opts *ListOpts) String() string {
|
|
return fmt.Sprint(*opts)
|
|
}
|
|
|
|
func (opts *ListOpts) Set(value string) error {
|
|
*opts = append(*opts, value)
|
|
return nil
|
|
}
|
|
|
|
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
|
|
// and returns a channel which will later return the function's return value.
|
|
func Go(f func() error) chan error {
|
|
ch := make(chan error)
|
|
go func() {
|
|
ch <- f()
|
|
}()
|
|
return ch
|
|
}
|
|
|
|
// Request a given URL and return an io.Reader
|
|
func Download(url string, stderr io.Writer) (*http.Response, error) {
|
|
var resp *http.Response
|
|
var err error
|
|
if resp, err = http.Get(url); err != nil {
|
|
return nil, err
|
|
}
|
|
if resp.StatusCode >= 400 {
|
|
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
|
|
}
|
|
return resp, nil
|
|
}
|
|
|
|
func logf(level string, format string, a ...interface{}) {
|
|
// Retrieve the stack infos
|
|
_, file, line, ok := runtime.Caller(2)
|
|
if !ok {
|
|
file = "<unknown>"
|
|
line = -1
|
|
} else {
|
|
file = file[strings.LastIndex(file, "/")+1:]
|
|
}
|
|
|
|
fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...)
|
|
}
|
|
|
|
// Debug function, if the debug flag is set, then display. Do nothing otherwise
|
|
// If Docker is in damon mode, also send the debug info on the socket
|
|
func Debugf(format string, a ...interface{}) {
|
|
if os.Getenv("DEBUG") != "" {
|
|
logf("debug", format, a...)
|
|
}
|
|
}
|
|
|
|
func Errorf(format string, a ...interface{}) {
|
|
logf("error", format, a...)
|
|
}
|
|
|
|
// Reader with progress bar
|
|
type progressReader struct {
|
|
reader io.ReadCloser // Stream to read from
|
|
output io.Writer // Where to send progress bar to
|
|
readTotal int // Expected stream length (bytes)
|
|
readProgress int // How much has been read so far (bytes)
|
|
lastUpdate int // How many bytes read at least update
|
|
template string // Template to print. Default "%v/%v (%v)"
|
|
sf *StreamFormatter
|
|
newLine bool
|
|
}
|
|
|
|
func (r *progressReader) Read(p []byte) (n int, err error) {
|
|
read, err := io.ReadCloser(r.reader).Read(p)
|
|
r.readProgress += read
|
|
updateEvery := 1024 * 512 //512kB
|
|
if r.readTotal > 0 {
|
|
// Update progress for every 1% read if 1% < 512kB
|
|
if increment := int(0.01 * float64(r.readTotal)); increment < updateEvery {
|
|
updateEvery = increment
|
|
}
|
|
}
|
|
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
|
|
if r.readTotal > 0 {
|
|
fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
|
|
} else {
|
|
fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a")
|
|
}
|
|
r.lastUpdate = r.readProgress
|
|
}
|
|
// Send newline when complete
|
|
if r.newLine && err != nil {
|
|
r.output.Write(r.sf.FormatStatus("", ""))
|
|
}
|
|
return read, err
|
|
}
|
|
func (r *progressReader) Close() error {
|
|
return io.ReadCloser(r.reader).Close()
|
|
}
|
|
func ProgressReader(r io.ReadCloser, size int, output io.Writer, tpl []byte, sf *StreamFormatter, newline bool) *progressReader {
|
|
return &progressReader{
|
|
reader: r,
|
|
output: NewWriteFlusher(output),
|
|
readTotal: size,
|
|
template: string(tpl),
|
|
sf: sf,
|
|
newLine: newline,
|
|
}
|
|
}
|
|
|
|
// HumanDuration returns a human-readable approximation of a duration
|
|
// (eg. "About a minute", "4 hours ago", etc.)
|
|
func HumanDuration(d time.Duration) string {
|
|
if seconds := int(d.Seconds()); seconds < 1 {
|
|
return "Less than a second"
|
|
} else if seconds < 60 {
|
|
return fmt.Sprintf("%d seconds", seconds)
|
|
} else if minutes := int(d.Minutes()); minutes == 1 {
|
|
return "About a minute"
|
|
} else if minutes < 60 {
|
|
return fmt.Sprintf("%d minutes", minutes)
|
|
} else if hours := int(d.Hours()); hours == 1 {
|
|
return "About an hour"
|
|
} else if hours < 48 {
|
|
return fmt.Sprintf("%d hours", hours)
|
|
} else if hours < 24*7*2 {
|
|
return fmt.Sprintf("%d days", hours/24)
|
|
} else if hours < 24*30*3 {
|
|
return fmt.Sprintf("%d weeks", hours/24/7)
|
|
} else if hours < 24*365*2 {
|
|
return fmt.Sprintf("%d months", hours/24/30)
|
|
}
|
|
return fmt.Sprintf("%f years", d.Hours()/24/365)
|
|
}
|
|
|
|
// HumanSize returns a human-readable approximation of a size
|
|
// using SI standard (eg. "44kB", "17MB")
|
|
func HumanSize(size int64) string {
|
|
i := 0
|
|
var sizef float64
|
|
sizef = float64(size)
|
|
units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
|
for sizef >= 1000.0 {
|
|
sizef = sizef / 1000.0
|
|
i++
|
|
}
|
|
return fmt.Sprintf("%.4g %s", sizef, units[i])
|
|
}
|
|
|
|
// Parses a human-readable string representing an amount of RAM
|
|
// in bytes, kibibytes, mebibytes or gibibytes, and returns the
|
|
// number of bytes, or -1 if the string is unparseable.
|
|
// Units are case-insensitive, and the 'b' suffix is optional.
|
|
func RAMInBytes(size string) (bytes int64, err error) {
|
|
re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$")
|
|
if error != nil {
|
|
return -1, error
|
|
}
|
|
|
|
matches := re.FindStringSubmatch(size)
|
|
|
|
if len(matches) != 3 {
|
|
return -1, fmt.Errorf("Invalid size: '%s'", size)
|
|
}
|
|
|
|
memLimit, error := strconv.ParseInt(matches[1], 10, 0)
|
|
if error != nil {
|
|
return -1, error
|
|
}
|
|
|
|
unit := strings.ToLower(matches[2])
|
|
|
|
if unit == "k" {
|
|
memLimit *= 1024
|
|
} else if unit == "m" {
|
|
memLimit *= 1024 * 1024
|
|
} else if unit == "g" {
|
|
memLimit *= 1024 * 1024 * 1024
|
|
}
|
|
|
|
return memLimit, nil
|
|
}
|
|
|
|
func Trunc(s string, maxlen int) string {
|
|
if len(s) <= maxlen {
|
|
return s
|
|
}
|
|
return s[:maxlen]
|
|
}
|
|
|
|
// Figure out the absolute path of our own binary
|
|
func SelfPath() string {
|
|
path, err := exec.LookPath(os.Args[0])
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
path, err = filepath.Abs(path)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return path
|
|
}
|
|
|
|
func dockerInitSha1(target string) string {
|
|
f, err := os.Open(target)
|
|
if err != nil {
|
|
return ""
|
|
}
|
|
defer f.Close()
|
|
h := sha1.New()
|
|
_, err = io.Copy(h, f)
|
|
if err != nil {
|
|
return ""
|
|
}
|
|
return hex.EncodeToString(h.Sum(nil))
|
|
}
|
|
|
|
func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this)
|
|
if IAMSTATIC {
|
|
if target == selfPath {
|
|
return true
|
|
}
|
|
targetFileInfo, err := os.Lstat(target)
|
|
if err != nil {
|
|
return false
|
|
}
|
|
selfPathFileInfo, err := os.Lstat(selfPath)
|
|
if err != nil {
|
|
return false
|
|
}
|
|
return os.SameFile(targetFileInfo, selfPathFileInfo)
|
|
}
|
|
return INITSHA1 != "" && dockerInitSha1(target) == INITSHA1
|
|
}
|
|
|
|
// Figure out the path of our dockerinit (which may be SelfPath())
|
|
func DockerInitPath() string {
|
|
selfPath := SelfPath()
|
|
if isValidDockerInitPath(selfPath, selfPath) {
|
|
// if we're valid, don't bother checking anything else
|
|
return selfPath
|
|
}
|
|
var possibleInits = []string{
|
|
filepath.Join(filepath.Dir(selfPath), "dockerinit"),
|
|
// "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec."
|
|
"/usr/libexec/docker/dockerinit",
|
|
"/usr/local/libexec/docker/dockerinit",
|
|
}
|
|
for _, dockerInit := range possibleInits {
|
|
path, err := exec.LookPath(dockerInit)
|
|
if err == nil {
|
|
path, err = filepath.Abs(path)
|
|
if err != nil {
|
|
// LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail?
|
|
panic(err)
|
|
}
|
|
if isValidDockerInitPath(path, selfPath) {
|
|
return path
|
|
}
|
|
}
|
|
}
|
|
return ""
|
|
}
|
|
|
|
type NopWriter struct{}
|
|
|
|
func (*NopWriter) Write(buf []byte) (int, error) {
|
|
return len(buf), nil
|
|
}
|
|
|
|
type nopWriteCloser struct {
|
|
io.Writer
|
|
}
|
|
|
|
func (w *nopWriteCloser) Close() error { return nil }
|
|
|
|
func NopWriteCloser(w io.Writer) io.WriteCloser {
|
|
return &nopWriteCloser{w}
|
|
}
|
|
|
|
type bufReader struct {
|
|
sync.Mutex
|
|
buf *bytes.Buffer
|
|
reader io.Reader
|
|
err error
|
|
wait sync.Cond
|
|
}
|
|
|
|
func NewBufReader(r io.Reader) *bufReader {
|
|
reader := &bufReader{
|
|
buf: &bytes.Buffer{},
|
|
reader: r,
|
|
}
|
|
reader.wait.L = &reader.Mutex
|
|
go reader.drain()
|
|
return reader
|
|
}
|
|
|
|
func (r *bufReader) drain() {
|
|
buf := make([]byte, 1024)
|
|
for {
|
|
n, err := r.reader.Read(buf)
|
|
r.Lock()
|
|
if err != nil {
|
|
r.err = err
|
|
} else {
|
|
r.buf.Write(buf[0:n])
|
|
}
|
|
r.wait.Signal()
|
|
r.Unlock()
|
|
if err != nil {
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
func (r *bufReader) Read(p []byte) (n int, err error) {
|
|
r.Lock()
|
|
defer r.Unlock()
|
|
for {
|
|
n, err = r.buf.Read(p)
|
|
if n > 0 {
|
|
return n, err
|
|
}
|
|
if r.err != nil {
|
|
return 0, r.err
|
|
}
|
|
r.wait.Wait()
|
|
}
|
|
}
|
|
|
|
func (r *bufReader) Close() error {
|
|
closer, ok := r.reader.(io.ReadCloser)
|
|
if !ok {
|
|
return nil
|
|
}
|
|
return closer.Close()
|
|
}
|
|
|
|
type WriteBroadcaster struct {
|
|
sync.Mutex
|
|
buf *bytes.Buffer
|
|
writers map[StreamWriter]bool
|
|
}
|
|
|
|
type StreamWriter struct {
|
|
wc io.WriteCloser
|
|
stream string
|
|
}
|
|
|
|
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) {
|
|
w.Lock()
|
|
sw := StreamWriter{wc: writer, stream: stream}
|
|
w.writers[sw] = true
|
|
w.Unlock()
|
|
}
|
|
|
|
type JSONLog struct {
|
|
Log string `json:"log,omitempty"`
|
|
Stream string `json:"stream,omitempty"`
|
|
Created time.Time `json:"time"`
|
|
}
|
|
|
|
func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
|
|
w.Lock()
|
|
defer w.Unlock()
|
|
w.buf.Write(p)
|
|
for sw := range w.writers {
|
|
lp := p
|
|
if sw.stream != "" {
|
|
lp = nil
|
|
for {
|
|
line, err := w.buf.ReadString('\n')
|
|
if err != nil {
|
|
w.buf.Write([]byte(line))
|
|
break
|
|
}
|
|
b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now()})
|
|
if err != nil {
|
|
// On error, evict the writer
|
|
delete(w.writers, sw)
|
|
continue
|
|
}
|
|
lp = append(lp, b...)
|
|
lp = append(lp, '\n')
|
|
}
|
|
}
|
|
if n, err := sw.wc.Write(lp); err != nil || n != len(lp) {
|
|
// On error, evict the writer
|
|
delete(w.writers, sw)
|
|
}
|
|
}
|
|
return len(p), nil
|
|
}
|
|
|
|
func (w *WriteBroadcaster) CloseWriters() error {
|
|
w.Lock()
|
|
defer w.Unlock()
|
|
for sw := range w.writers {
|
|
sw.wc.Close()
|
|
}
|
|
w.writers = make(map[StreamWriter]bool)
|
|
return nil
|
|
}
|
|
|
|
func NewWriteBroadcaster() *WriteBroadcaster {
|
|
return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)}
|
|
}
|
|
|
|
func GetTotalUsedFds() int {
|
|
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
|
Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
|
} else {
|
|
return len(fds)
|
|
}
|
|
return -1
|
|
}
|
|
|
|
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
|
|
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
|
|
type TruncIndex struct {
|
|
index *suffixarray.Index
|
|
ids map[string]bool
|
|
bytes []byte
|
|
}
|
|
|
|
func NewTruncIndex() *TruncIndex {
|
|
return &TruncIndex{
|
|
index: suffixarray.New([]byte{' '}),
|
|
ids: make(map[string]bool),
|
|
bytes: []byte{' '},
|
|
}
|
|
}
|
|
|
|
func (idx *TruncIndex) Add(id string) error {
|
|
if strings.Contains(id, " ") {
|
|
return fmt.Errorf("Illegal character: ' '")
|
|
}
|
|
if _, exists := idx.ids[id]; exists {
|
|
return fmt.Errorf("Id already exists: %s", id)
|
|
}
|
|
idx.ids[id] = true
|
|
idx.bytes = append(idx.bytes, []byte(id+" ")...)
|
|
idx.index = suffixarray.New(idx.bytes)
|
|
return nil
|
|
}
|
|
|
|
func (idx *TruncIndex) Delete(id string) error {
|
|
if _, exists := idx.ids[id]; !exists {
|
|
return fmt.Errorf("No such id: %s", id)
|
|
}
|
|
before, after, err := idx.lookup(id)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
delete(idx.ids, id)
|
|
idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...)
|
|
idx.index = suffixarray.New(idx.bytes)
|
|
return nil
|
|
}
|
|
|
|
func (idx *TruncIndex) lookup(s string) (int, int, error) {
|
|
offsets := idx.index.Lookup([]byte(" "+s), -1)
|
|
//log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes())
|
|
if offsets == nil || len(offsets) == 0 || len(offsets) > 1 {
|
|
return -1, -1, fmt.Errorf("No such id: %s", s)
|
|
}
|
|
offsetBefore := offsets[0] + 1
|
|
offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ")
|
|
return offsetBefore, offsetAfter, nil
|
|
}
|
|
|
|
func (idx *TruncIndex) Get(s string) (string, error) {
|
|
before, after, err := idx.lookup(s)
|
|
//log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
return string(idx.bytes[before:after]), err
|
|
}
|
|
|
|
// TruncateID returns a shorthand version of a string identifier for convenience.
|
|
// A collision with other shorthands is very unlikely, but possible.
|
|
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
|
|
// will need to use a langer prefix, or the full-length Id.
|
|
func TruncateID(id string) string {
|
|
shortLen := 12
|
|
if len(id) < shortLen {
|
|
shortLen = len(id)
|
|
}
|
|
return id[:shortLen]
|
|
}
|
|
|
|
// Code c/c from io.Copy() modified to handle escape sequence
|
|
func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
|
|
buf := make([]byte, 32*1024)
|
|
for {
|
|
nr, er := src.Read(buf)
|
|
if nr > 0 {
|
|
// ---- Docker addition
|
|
// char 16 is C-p
|
|
if nr == 1 && buf[0] == 16 {
|
|
nr, er = src.Read(buf)
|
|
// char 17 is C-q
|
|
if nr == 1 && buf[0] == 17 {
|
|
if err := src.Close(); err != nil {
|
|
return 0, err
|
|
}
|
|
return 0, io.EOF
|
|
}
|
|
}
|
|
// ---- End of docker
|
|
nw, ew := dst.Write(buf[0:nr])
|
|
if nw > 0 {
|
|
written += int64(nw)
|
|
}
|
|
if ew != nil {
|
|
err = ew
|
|
break
|
|
}
|
|
if nr != nw {
|
|
err = io.ErrShortWrite
|
|
break
|
|
}
|
|
}
|
|
if er == io.EOF {
|
|
break
|
|
}
|
|
if er != nil {
|
|
err = er
|
|
break
|
|
}
|
|
}
|
|
return written, err
|
|
}
|
|
|
|
func HashData(src io.Reader) (string, error) {
|
|
h := sha256.New()
|
|
if _, err := io.Copy(h, src); err != nil {
|
|
return "", err
|
|
}
|
|
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
|
}
|
|
|
|
type KernelVersionInfo struct {
|
|
Kernel int
|
|
Major int
|
|
Minor int
|
|
Flavor string
|
|
}
|
|
|
|
func (k *KernelVersionInfo) String() string {
|
|
flavor := ""
|
|
if len(k.Flavor) > 0 {
|
|
flavor = fmt.Sprintf("-%s", k.Flavor)
|
|
}
|
|
return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, flavor)
|
|
}
|
|
|
|
// Compare two KernelVersionInfo struct.
|
|
// Returns -1 if a < b, = if a == b, 1 it a > b
|
|
func CompareKernelVersion(a, b *KernelVersionInfo) int {
|
|
if a.Kernel < b.Kernel {
|
|
return -1
|
|
} else if a.Kernel > b.Kernel {
|
|
return 1
|
|
}
|
|
|
|
if a.Major < b.Major {
|
|
return -1
|
|
} else if a.Major > b.Major {
|
|
return 1
|
|
}
|
|
|
|
if a.Minor < b.Minor {
|
|
return -1
|
|
} else if a.Minor > b.Minor {
|
|
return 1
|
|
}
|
|
|
|
return 0
|
|
}
|
|
|
|
func FindCgroupMountpoint(cgroupType string) (string, error) {
|
|
output, err := ioutil.ReadFile("/proc/mounts")
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
// /proc/mounts has 6 fields per line, one mount per line, e.g.
|
|
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
|
|
for _, line := range strings.Split(string(output), "\n") {
|
|
parts := strings.Split(line, " ")
|
|
if len(parts) == 6 && parts[2] == "cgroup" {
|
|
for _, opt := range strings.Split(parts[3], ",") {
|
|
if opt == cgroupType {
|
|
return parts[1], nil
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
|
|
}
|
|
|
|
func GetKernelVersion() (*KernelVersionInfo, error) {
|
|
var (
|
|
err error
|
|
)
|
|
|
|
uts, err := uname()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
release := make([]byte, len(uts.Release))
|
|
|
|
i := 0
|
|
for _, c := range uts.Release {
|
|
release[i] = byte(c)
|
|
i++
|
|
}
|
|
|
|
// Remove the \x00 from the release for Atoi to parse correctly
|
|
release = release[:bytes.IndexByte(release, 0)]
|
|
|
|
return ParseRelease(string(release))
|
|
}
|
|
|
|
func ParseRelease(release string) (*KernelVersionInfo, error) {
|
|
var (
|
|
flavor string
|
|
kernel, major, minor int
|
|
err error
|
|
)
|
|
|
|
tmp := strings.SplitN(release, "-", 2)
|
|
tmp2 := strings.Split(tmp[0], ".")
|
|
|
|
if len(tmp2) > 0 {
|
|
kernel, err = strconv.Atoi(tmp2[0])
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
if len(tmp2) > 1 {
|
|
major, err = strconv.Atoi(tmp2[1])
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
if len(tmp2) > 2 {
|
|
// Removes "+" because git kernels might set it
|
|
minorUnparsed := strings.Trim(tmp2[2], "+")
|
|
minor, err = strconv.Atoi(minorUnparsed)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
if len(tmp) == 2 {
|
|
flavor = tmp[1]
|
|
} else {
|
|
flavor = ""
|
|
}
|
|
|
|
return &KernelVersionInfo{
|
|
Kernel: kernel,
|
|
Major: major,
|
|
Minor: minor,
|
|
Flavor: flavor,
|
|
}, nil
|
|
}
|
|
|
|
// FIXME: this is deprecated by CopyWithTar in archive.go
|
|
func CopyDirectory(source, dest string) error {
|
|
if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil {
|
|
return fmt.Errorf("Error copy: %s (%s)", err, output)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type NopFlusher struct{}
|
|
|
|
func (f *NopFlusher) Flush() {}
|
|
|
|
type WriteFlusher struct {
|
|
sync.Mutex
|
|
w io.Writer
|
|
flusher http.Flusher
|
|
}
|
|
|
|
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
|
wf.Lock()
|
|
defer wf.Unlock()
|
|
n, err = wf.w.Write(b)
|
|
wf.flusher.Flush()
|
|
return n, err
|
|
}
|
|
|
|
// Flush the stream immediately.
|
|
func (wf *WriteFlusher) Flush() {
|
|
wf.Lock()
|
|
defer wf.Unlock()
|
|
wf.flusher.Flush()
|
|
}
|
|
|
|
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
|
var flusher http.Flusher
|
|
if f, ok := w.(http.Flusher); ok {
|
|
flusher = f
|
|
} else {
|
|
flusher = &NopFlusher{}
|
|
}
|
|
return &WriteFlusher{w: w, flusher: flusher}
|
|
}
|
|
|
|
type JSONError struct {
|
|
Code int `json:"code,omitempty"`
|
|
Message string `json:"message,omitempty"`
|
|
}
|
|
|
|
type JSONMessage struct {
|
|
Status string `json:"status,omitempty"`
|
|
Progress string `json:"progress,omitempty"`
|
|
ErrorMessage string `json:"error,omitempty"` //deprecated
|
|
ID string `json:"id,omitempty"`
|
|
From string `json:"from,omitempty"`
|
|
Time int64 `json:"time,omitempty"`
|
|
Error *JSONError `json:"errorDetail,omitempty"`
|
|
}
|
|
|
|
func (e *JSONError) Error() string {
|
|
return e.Message
|
|
}
|
|
|
|
func NewHTTPRequestError(msg string, res *http.Response) error {
|
|
return &JSONError{
|
|
Message: msg,
|
|
Code: res.StatusCode,
|
|
}
|
|
}
|
|
|
|
func (jm *JSONMessage) Display(out io.Writer) error {
|
|
if jm.Error != nil {
|
|
if jm.Error.Code == 401 {
|
|
return fmt.Errorf("Authentication is required.")
|
|
}
|
|
return jm.Error
|
|
}
|
|
fmt.Fprintf(out, "%c[2K\r", 27)
|
|
if jm.Time != 0 {
|
|
fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0))
|
|
}
|
|
if jm.ID != "" {
|
|
fmt.Fprintf(out, "%s: ", jm.ID)
|
|
}
|
|
if jm.From != "" {
|
|
fmt.Fprintf(out, "(from %s) ", jm.From)
|
|
}
|
|
if jm.Progress != "" {
|
|
fmt.Fprintf(out, "%s %s\r", jm.Status, jm.Progress)
|
|
} else {
|
|
fmt.Fprintf(out, "%s\r\n", jm.Status)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func DisplayJSONMessagesStream(in io.Reader, out io.Writer) error {
|
|
dec := json.NewDecoder(in)
|
|
ids := make(map[string]int)
|
|
diff := 0
|
|
for {
|
|
jm := JSONMessage{}
|
|
if err := dec.Decode(&jm); err == io.EOF {
|
|
break
|
|
} else if err != nil {
|
|
return err
|
|
}
|
|
if jm.Progress != "" && jm.ID != "" {
|
|
line, ok := ids[jm.ID]
|
|
if !ok {
|
|
line = len(ids)
|
|
ids[jm.ID] = line
|
|
fmt.Fprintf(out, "\n")
|
|
diff = 0
|
|
} else {
|
|
diff = len(ids) - line
|
|
}
|
|
fmt.Fprintf(out, "%c[%dA", 27, diff)
|
|
}
|
|
err := jm.Display(out)
|
|
if jm.ID != "" {
|
|
fmt.Fprintf(out, "%c[%dB", 27, diff)
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type StreamFormatter struct {
|
|
json bool
|
|
used bool
|
|
}
|
|
|
|
func NewStreamFormatter(json bool) *StreamFormatter {
|
|
return &StreamFormatter{json, false}
|
|
}
|
|
|
|
func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte {
|
|
sf.used = true
|
|
str := fmt.Sprintf(format, a...)
|
|
if sf.json {
|
|
b, err := json.Marshal(&JSONMessage{ID: id, Status: str})
|
|
if err != nil {
|
|
return sf.FormatError(err)
|
|
}
|
|
return b
|
|
}
|
|
return []byte(str + "\r\n")
|
|
}
|
|
|
|
func (sf *StreamFormatter) FormatError(err error) []byte {
|
|
sf.used = true
|
|
if sf.json {
|
|
jsonError, ok := err.(*JSONError)
|
|
if !ok {
|
|
jsonError = &JSONError{Message: err.Error()}
|
|
}
|
|
if b, err := json.Marshal(&JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil {
|
|
return b
|
|
}
|
|
return []byte("{\"error\":\"format error\"}")
|
|
}
|
|
return []byte("Error: " + err.Error() + "\r\n")
|
|
}
|
|
|
|
func (sf *StreamFormatter) FormatProgress(id, action, progress string) []byte {
|
|
sf.used = true
|
|
if sf.json {
|
|
b, err := json.Marshal(&JSONMessage{Status: action, Progress: progress, ID: id})
|
|
if err != nil {
|
|
return nil
|
|
}
|
|
return b
|
|
}
|
|
return []byte(action + " " + progress + "\r")
|
|
}
|
|
|
|
func (sf *StreamFormatter) Used() bool {
|
|
return sf.used
|
|
}
|
|
|
|
func IsURL(str string) bool {
|
|
return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://")
|
|
}
|
|
|
|
func IsGIT(str string) bool {
|
|
return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/")
|
|
}
|
|
|
|
// GetResolvConf opens and read the content of /etc/resolv.conf.
|
|
// It returns it as byte slice.
|
|
func GetResolvConf() ([]byte, error) {
|
|
resolv, err := ioutil.ReadFile("/etc/resolv.conf")
|
|
if err != nil {
|
|
Errorf("Error openning resolv.conf: %s", err)
|
|
return nil, err
|
|
}
|
|
return resolv, nil
|
|
}
|
|
|
|
// CheckLocalDns looks into the /etc/resolv.conf,
|
|
// it returns true if there is a local nameserver or if there is no nameserver.
|
|
func CheckLocalDns(resolvConf []byte) bool {
|
|
var parsedResolvConf = StripComments(resolvConf, []byte("#"))
|
|
if !bytes.Contains(parsedResolvConf, []byte("nameserver")) {
|
|
return true
|
|
}
|
|
for _, ip := range [][]byte{
|
|
[]byte("127.0.0.1"),
|
|
[]byte("127.0.1.1"),
|
|
} {
|
|
if bytes.Contains(parsedResolvConf, ip) {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
// StripComments parses input into lines and strips away comments.
|
|
func StripComments(input []byte, commentMarker []byte) []byte {
|
|
lines := bytes.Split(input, []byte("\n"))
|
|
var output []byte
|
|
for _, currentLine := range lines {
|
|
var commentIndex = bytes.Index(currentLine, commentMarker)
|
|
if commentIndex == -1 {
|
|
output = append(output, currentLine...)
|
|
} else {
|
|
output = append(output, currentLine[:commentIndex]...)
|
|
}
|
|
output = append(output, []byte("\n")...)
|
|
}
|
|
return output
|
|
}
|
|
|
|
// GetNameserversAsCIDR returns nameservers (if any) listed in
|
|
// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
|
|
// This function's output is intended for net.ParseCIDR
|
|
func GetNameserversAsCIDR(resolvConf []byte) []string {
|
|
var parsedResolvConf = StripComments(resolvConf, []byte("#"))
|
|
nameservers := []string{}
|
|
re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`)
|
|
for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) {
|
|
var ns = re.FindSubmatch(line)
|
|
if len(ns) > 0 {
|
|
nameservers = append(nameservers, string(ns[1])+"/32")
|
|
}
|
|
}
|
|
|
|
return nameservers
|
|
}
|
|
|
|
func ParseHost(host string, port int, addr string) (string, error) {
|
|
var proto string
|
|
switch {
|
|
case strings.HasPrefix(addr, "unix://"):
|
|
return addr, nil
|
|
case strings.HasPrefix(addr, "tcp://"):
|
|
proto = "tcp"
|
|
addr = strings.TrimPrefix(addr, "tcp://")
|
|
default:
|
|
if strings.Contains(addr, "://") {
|
|
return "", fmt.Errorf("Invalid bind address protocol: %s", addr)
|
|
}
|
|
proto = "tcp"
|
|
}
|
|
|
|
if strings.Contains(addr, ":") {
|
|
hostParts := strings.Split(addr, ":")
|
|
if len(hostParts) != 2 {
|
|
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
|
}
|
|
if hostParts[0] != "" {
|
|
host = hostParts[0]
|
|
}
|
|
if p, err := strconv.Atoi(hostParts[1]); err == nil {
|
|
port = p
|
|
}
|
|
} else {
|
|
host = addr
|
|
}
|
|
return fmt.Sprintf("%s://%s:%d", proto, host, port), nil
|
|
}
|
|
|
|
func GetReleaseVersion() string {
|
|
resp, err := http.Get("http://get.docker.io/latest")
|
|
if err != nil {
|
|
return ""
|
|
}
|
|
defer resp.Body.Close()
|
|
if resp.ContentLength > 24 || resp.StatusCode != 200 {
|
|
return ""
|
|
}
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
if err != nil {
|
|
return ""
|
|
}
|
|
return strings.TrimSpace(string(body))
|
|
}
|
|
|
|
// Get a repos name and returns the right reposName + tag
|
|
// The tag can be confusing because of a port in a repository name.
|
|
// Ex: localhost.localdomain:5000/samalba/hipache:latest
|
|
func ParseRepositoryTag(repos string) (string, string) {
|
|
n := strings.LastIndex(repos, ":")
|
|
if n < 0 {
|
|
return repos, ""
|
|
}
|
|
if tag := repos[n+1:]; !strings.Contains(tag, "/") {
|
|
return repos[:n], tag
|
|
}
|
|
return repos, ""
|
|
}
|
|
|
|
type User struct {
|
|
Uid string // user id
|
|
Gid string // primary group id
|
|
Username string
|
|
Name string
|
|
HomeDir string
|
|
}
|
|
|
|
// UserLookup check if the given username or uid is present in /etc/passwd
|
|
// and returns the user struct.
|
|
// If the username is not found, an error is returned.
|
|
func UserLookup(uid string) (*User, error) {
|
|
file, err := ioutil.ReadFile("/etc/passwd")
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
for _, line := range strings.Split(string(file), "\n") {
|
|
data := strings.Split(line, ":")
|
|
if len(data) > 5 && (data[0] == uid || data[2] == uid) {
|
|
return &User{
|
|
Uid: data[2],
|
|
Gid: data[3],
|
|
Username: data[0],
|
|
Name: data[4],
|
|
HomeDir: data[5],
|
|
}, nil
|
|
}
|
|
}
|
|
return nil, fmt.Errorf("User not found in /etc/passwd")
|
|
}
|
|
|
|
type DependencyGraph struct {
|
|
nodes map[string]*DependencyNode
|
|
}
|
|
|
|
type DependencyNode struct {
|
|
id string
|
|
deps map[*DependencyNode]bool
|
|
}
|
|
|
|
func NewDependencyGraph() DependencyGraph {
|
|
return DependencyGraph{
|
|
nodes: map[string]*DependencyNode{},
|
|
}
|
|
}
|
|
|
|
func (graph *DependencyGraph) addNode(node *DependencyNode) string {
|
|
if graph.nodes[node.id] == nil {
|
|
graph.nodes[node.id] = node
|
|
}
|
|
return node.id
|
|
}
|
|
|
|
func (graph *DependencyGraph) NewNode(id string) string {
|
|
if graph.nodes[id] != nil {
|
|
return id
|
|
}
|
|
nd := &DependencyNode{
|
|
id: id,
|
|
deps: map[*DependencyNode]bool{},
|
|
}
|
|
graph.addNode(nd)
|
|
return id
|
|
}
|
|
|
|
func (graph *DependencyGraph) AddDependency(node, to string) error {
|
|
if graph.nodes[node] == nil {
|
|
return fmt.Errorf("Node %s does not belong to this graph", node)
|
|
}
|
|
|
|
if graph.nodes[to] == nil {
|
|
return fmt.Errorf("Node %s does not belong to this graph", to)
|
|
}
|
|
|
|
if node == to {
|
|
return fmt.Errorf("Dependency loops are forbidden!")
|
|
}
|
|
|
|
graph.nodes[node].addDependency(graph.nodes[to])
|
|
return nil
|
|
}
|
|
|
|
func (node *DependencyNode) addDependency(to *DependencyNode) bool {
|
|
node.deps[to] = true
|
|
return node.deps[to]
|
|
}
|
|
|
|
func (node *DependencyNode) Degree() int {
|
|
return len(node.deps)
|
|
}
|
|
|
|
// The magic happens here ::
|
|
func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) {
|
|
Debugf("Generating traversal map. Nodes: %d", len(graph.nodes))
|
|
result := [][]string{}
|
|
processed := map[*DependencyNode]bool{}
|
|
// As long as we haven't processed all nodes...
|
|
for len(processed) < len(graph.nodes) {
|
|
// Use a temporary buffer for processed nodes, otherwise
|
|
// nodes that depend on each other could end up in the same round.
|
|
tmp_processed := []*DependencyNode{}
|
|
for _, node := range graph.nodes {
|
|
// If the node has more dependencies than what we have cleared,
|
|
// it won't be valid for this round.
|
|
if node.Degree() > len(processed) {
|
|
continue
|
|
}
|
|
// If it's already processed, get to the next one
|
|
if processed[node] {
|
|
continue
|
|
}
|
|
// It's not been processed yet and has 0 deps. Add it!
|
|
// (this is a shortcut for what we're doing below)
|
|
if node.Degree() == 0 {
|
|
tmp_processed = append(tmp_processed, node)
|
|
continue
|
|
}
|
|
// If at least one dep hasn't been processed yet, we can't
|
|
// add it.
|
|
ok := true
|
|
for dep := range node.deps {
|
|
if !processed[dep] {
|
|
ok = false
|
|
break
|
|
}
|
|
}
|
|
// All deps have already been processed. Add it!
|
|
if ok {
|
|
tmp_processed = append(tmp_processed, node)
|
|
}
|
|
}
|
|
Debugf("Round %d: found %d available nodes", len(result), len(tmp_processed))
|
|
// If no progress has been made this round,
|
|
// that means we have circular dependencies.
|
|
if len(tmp_processed) == 0 {
|
|
return nil, fmt.Errorf("Could not find a solution to this dependency graph")
|
|
}
|
|
round := []string{}
|
|
for _, nd := range tmp_processed {
|
|
round = append(round, nd.id)
|
|
processed[nd] = true
|
|
}
|
|
result = append(result, round)
|
|
}
|
|
return result, nil
|
|
}
|
|
|
|
// An StatusError reports an unsuccessful exit by a command.
|
|
type StatusError struct {
|
|
Status int
|
|
}
|
|
|
|
func (e *StatusError) Error() string {
|
|
return fmt.Sprintf("Status: %d", e.Status)
|
|
}
|
|
|
|
func quote(word string, buf *bytes.Buffer) {
|
|
// Bail out early for "simple" strings
|
|
if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
|
|
buf.WriteString(word)
|
|
return
|
|
}
|
|
|
|
buf.WriteString("'")
|
|
|
|
for i := 0; i < len(word); i++ {
|
|
b := word[i]
|
|
if b == '\'' {
|
|
// Replace literal ' with a close ', a \', and a open '
|
|
buf.WriteString("'\\''")
|
|
} else {
|
|
buf.WriteByte(b)
|
|
}
|
|
}
|
|
|
|
buf.WriteString("'")
|
|
}
|
|
|
|
// Take a list of strings and escape them so they will be handled right
|
|
// when passed as arguments to an program via a shell
|
|
func ShellQuoteArguments(args []string) string {
|
|
var buf bytes.Buffer
|
|
for i, arg := range args {
|
|
if i != 0 {
|
|
buf.WriteByte(' ')
|
|
}
|
|
quote(arg, &buf)
|
|
}
|
|
return buf.String()
|
|
}
|
|
|
|
func IsClosedError(err error) bool {
|
|
/* This comparison is ugly, but unfortunately, net.go doesn't export errClosing.
|
|
* See:
|
|
* http://golang.org/src/pkg/net/net.go
|
|
* https://code.google.com/p/go/issues/detail?id=4337
|
|
* https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ
|
|
*/
|
|
return strings.HasSuffix(err.Error(), "use of closed network connection")
|
|
}
|
|
|
|
func PartParser(template, data string) (map[string]string, error) {
|
|
// ip:public:private
|
|
templateParts := strings.Split(template, ":")
|
|
parts := strings.Split(data, ":")
|
|
if len(parts) != len(templateParts) {
|
|
return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
|
|
}
|
|
out := make(map[string]string, len(templateParts))
|
|
|
|
for i, t := range templateParts {
|
|
value := ""
|
|
if len(parts) > i {
|
|
value = parts[i]
|
|
}
|
|
out[t] = value
|
|
}
|
|
return out, nil
|
|
}
|
|
|
|
var globalTestID string
|
|
|
|
// TestDirectory creates a new temporary directory and returns its path.
|
|
// The contents of directory at path `templateDir` is copied into the
|
|
// new directory.
|
|
func TestDirectory(templateDir string) (dir string, err error) {
|
|
if globalTestID == "" {
|
|
globalTestID = RandomString()[:4]
|
|
}
|
|
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2))
|
|
if prefix == "" {
|
|
prefix = "docker-test-"
|
|
}
|
|
dir, err = ioutil.TempDir("", prefix)
|
|
if err = os.Remove(dir); err != nil {
|
|
return
|
|
}
|
|
if templateDir != "" {
|
|
if err = CopyDirectory(templateDir, dir); err != nil {
|
|
return
|
|
}
|
|
}
|
|
return
|
|
}
|
|
|
|
// GetCallerName introspects the call stack and returns the name of the
|
|
// function `depth` levels down in the stack.
|
|
func GetCallerName(depth int) string {
|
|
// Use the caller function name as a prefix.
|
|
// This helps trace temp directories back to their test.
|
|
pc, _, _, _ := runtime.Caller(depth + 1)
|
|
callerLongName := runtime.FuncForPC(pc).Name()
|
|
parts := strings.Split(callerLongName, ".")
|
|
callerShortName := parts[len(parts)-1]
|
|
return callerShortName
|
|
}
|