diff --git a/README.md b/README.md index d15e125e88..079713ced9 100644 --- a/README.md +++ b/README.md @@ -235,8 +235,8 @@ Docker platform to broaden its application and utility. If you know of another project underway that should be listed here, please help us keep this list up-to-date by submitting a PR. -* [Docker Registry](https://github.com/docker/docker-registry): Registry -server for Docker (hosting/delivering of repositories and images) +* [Docker Registry](https://github.com/docker/distribution): Registry +server for Docker (hosting/delivery of repositories and images) * [Docker Machine](https://github.com/docker/machine): Machine management for a container-centric world * [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering diff --git a/api/client/cli.go b/api/client/cli.go index fcf6c033fb..e0ab4191bf 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -137,19 +137,12 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a if tlsConfig != nil { scheme = "https" } - if in != nil { - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = term.IsTerminal(inFd) - } + inFd, isTerminalIn = term.GetFdInfo(in) } if out != nil { - if file, ok := out.(*os.File); ok { - outFd = file.Fd() - isTerminalOut = term.IsTerminal(outFd) - } + outFd, isTerminalOut = term.GetFdInfo(out) } if err == nil { diff --git a/api/client/commands.go b/api/client/commands.go index 839676d276..d5dfb2a37c 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -37,11 +37,11 @@ import ( "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/homedir" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/filters" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/resolvconf" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/term" @@ -232,6 +232,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error { return err } } + + // windows: show error message about modified file permissions + // FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build. + if runtime.GOOS == "windows" { + log.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + var body io.Reader // Setup an upload progress bar // FIXME: ProgressReader shouldn't be this annoying to use @@ -443,17 +450,18 @@ func (cli *DockerCli) CmdLogin(args ...string) error { if err != nil { return err } - var out2 engine.Env - err = out2.Decode(stream) - if err != nil { + + var response types.AuthResponse + if err := json.NewDecoder(stream).Decode(response); err != nil { cli.configFile, _ = registry.LoadConfig(homedir.Get()) return err } + registry.SaveConfig(cli.configFile) fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s.\n", path.Join(homedir.Get(), registry.CONFIGFILE)) - if out2.Get("Status") != "" { - fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) + if response.Status != "" { + fmt.Fprintf(cli.out, "%s\n", response.Status) } return nil } @@ -762,18 +770,6 @@ func (cli *DockerCli) CmdStart(args ...string) error { cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) - hijacked := make(chan io.Closer) - // Block the return until the chan gets closed - defer func() { - log.Debugf("CmdStart() returned, defer waiting for hijack to finish.") - if _, ok := <-hijacked; ok { - log.Errorf("Hijack did not finish (chan still open)") - } - if *openStdin || *attach { - cli.in.Close() - } - }() - if *attach || *openStdin { if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") @@ -809,26 +805,34 @@ func (cli *DockerCli) CmdStart(args ...string) error { v.Set("stdout", "1") v.Set("stderr", "1") + hijacked := make(chan io.Closer) + // Block the return until the chan gets closed + defer func() { + log.Debugf("CmdStart() returned, defer waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + log.Errorf("Hijack did not finish (chan still open)") + } + cli.in.Close() + }() cErr = promise.Go(func() error { return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil) }) - } else { - close(hijacked) + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) + if closer != nil { + defer closer.Close() + } + case err := <-cErr: + if err != nil { + return err + } + } } - // Acknowledge the hijack before starting - select { - case closer := <-hijacked: - // Make sure that the hijack gets closed when returning (results - // in closing the hijack chan and freeing server's goroutines) - if closer != nil { - defer closer.Close() - } - case err := <-cErr: - if err != nil { - return err - } - } var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) @@ -2492,6 +2496,14 @@ func (cli *DockerCli) CmdRun(args ...string) error { } } + defer func() { + if *flAutoRemove { + if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, false)); err != nil { + log.Errorf("Error deleting container: %s", err) + } + } + }() + //start the container if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, false)); err != nil { return err @@ -2529,9 +2541,6 @@ func (cli *DockerCli) CmdRun(args ...string) error { if _, status, err = getExitCode(cli, createResponse.ID); err != nil { return err } - if _, _, err := readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, false)); err != nil { - return err - } } else { // No Autoremove: Simply retrieve the exit code if !config.Tty { @@ -2669,12 +2678,15 @@ func (cli *DockerCli) CmdExec(args ...string) error { return err } - var execResult engine.Env - if err := execResult.Decode(stream); err != nil { + var response types.ContainerExecCreateResponse + if err := json.NewDecoder(stream).Decode(&response); err != nil { return err } + for _, warning := range response.Warnings { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } - execID := execResult.Get("Id") + execID := response.ID if execID == "" { fmt.Fprintf(cli.out, "exec ID empty") diff --git a/api/server/server.go b/api/server/server.go index d244d2a0ce..a11a4fd9ff 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -192,7 +192,9 @@ func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter if status := engine.Tail(stdoutBuffer, 1); status != "" { var env engine.Env env.Set("Status", status) - return writeJSONEnv(w, http.StatusOK, env) + return writeJSON(w, http.StatusOK, &types.AuthResponse{ + Status: status, + }) } w.WriteHeader(http.StatusNoContent) return nil @@ -1087,6 +1089,20 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite job.Setenv("cpusetcpus", r.FormValue("cpusetcpus")) job.Setenv("cpushares", r.FormValue("cpushares")) + // Job cancellation. Note: not all job types support this. + if closeNotifier, ok := w.(http.CloseNotifier); ok { + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-finished: + case <-closeNotifier.CloseNotify(): + log.Infof("Client disconnected, cancelling job: %v", job) + job.Cancel() + } + }() + } + if err := job.Run(); err != nil { if !job.Stdout.Used() { return err @@ -1141,10 +1157,11 @@ func postContainerExecCreate(eng *engine.Engine, version version.Version, w http return nil } var ( - out engine.Env name = vars["name"] job = eng.Job("execCreate", name) stdoutBuffer = bytes.NewBuffer(nil) + outWarnings []string + warnings = bytes.NewBuffer(nil) ) if err := job.DecodeEnv(r.Body); err != nil { @@ -1152,15 +1169,23 @@ func postContainerExecCreate(eng *engine.Engine, version version.Version, w http } job.Stdout.Add(stdoutBuffer) + // Read warnings from stderr + job.Stderr.Add(warnings) // Register an instance of Exec in container. if err := job.Run(); err != nil { fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err) return err } - // Return the ID - out.Set("Id", engine.Tail(stdoutBuffer, 1)) + // Parse warnings from stderr + scanner := bufio.NewScanner(warnings) + for scanner.Scan() { + outWarnings = append(outWarnings, scanner.Text()) + } - return writeJSONEnv(w, http.StatusCreated, out) + return writeJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ + ID: engine.Tail(stdoutBuffer, 1), + Warnings: outWarnings, + }) } // TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. @@ -1578,7 +1603,15 @@ func ServeApi(job *engine.Job) engine.Status { chErrors <- err return } - chErrors <- srv.Serve() + job.Eng.OnShutdown(func() { + if err := srv.Close(); err != nil { + log.Error(err) + } + }) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err }() } diff --git a/api/types/types.go b/api/types/types.go index f1b1d041ea..21dba7729e 100644 --- a/api/types/types.go +++ b/api/types/types.go @@ -9,3 +9,18 @@ type ContainerCreateResponse struct { // Warnings are any warnings encountered during the creation of the container. Warnings []string `json:"Warnings"` } + +// POST /containers/{name:.*}/exec +type ContainerExecCreateResponse struct { + // ID is the exec ID. + ID string `json:"Id"` + + // Warnings are any warnings encountered during the execution of the command. + Warnings []string `json:"Warnings"` +} + +// POST /auth +type AuthResponse struct { + // Status is the authentication status + Status string `json:"Status"` +} diff --git a/builder/dispatchers.go b/builder/dispatchers.go index 3cb3a9fb3a..4d21a75eb5 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -427,6 +427,10 @@ func volume(b *Builder, args []string, attributes map[string]bool, original stri b.Config.Volumes = map[string]struct{}{} } for _, v := range args { + v = strings.TrimSpace(v) + if v == "" { + return fmt.Errorf("Volume specified can not be an empty string") + } b.Config.Volumes[v] = struct{}{} } if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { diff --git a/builder/evaluator.go b/builder/evaluator.go index 985656f16a..5e353c02b3 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -1,4 +1,4 @@ -// builder is the evaluation step in the Dockerfile parse/evaluate pipeline. +// Package builder is the evaluation step in the Dockerfile parse/evaluate pipeline. // // It incorporates a dispatch table based on the parser.Node values (see the // parser package for more information) that are yielded from the parser itself. @@ -20,7 +20,6 @@ package builder import ( - "errors" "fmt" "io" "os" @@ -42,10 +41,6 @@ import ( "github.com/docker/docker/utils" ) -var ( - ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") -) - // Environment variable interpolation will happen on these statements only. var replaceEnvAllowed = map[string]struct{}{ command.Env: {}, @@ -131,6 +126,8 @@ type Builder struct { cpuShares int64 memory int64 memorySwap int64 + + cancelled <-chan struct{} // When closed, job was cancelled. } // Run the builder with the context. This is the lynchpin of this package. This @@ -166,6 +163,14 @@ func (b *Builder) Run(context io.Reader) (string, error) { b.TmpContainers = map[string]struct{}{} for i, n := range b.dockerfile.Children { + select { + case <-b.cancelled: + log.Debug("Builder: build cancelled!") + fmt.Fprintf(b.OutStream, "Build cancelled") + return "", fmt.Errorf("Build cancelled") + default: + // Not cancelled yet, keep going... + } if err := b.dispatch(i, n); err != nil { if b.ForceRemove { b.clearTmp() @@ -215,7 +220,7 @@ func (b *Builder) readDockerfile() error { return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile) } if fi.Size() == 0 { - return ErrDockerfileEmpty + return fmt.Errorf("The Dockerfile (%s) cannot be empty", origFile) } f, err := os.Open(filename) @@ -302,7 +307,11 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error { var str string str = ast.Value if _, ok := replaceEnvAllowed[cmd]; ok { - str = b.replaceEnv(ast.Value) + var err error + str, err = ProcessWord(ast.Value, b.Config.Env) + if err != nil { + return err + } } strList[i+l] = str msgList[i] = ast.Value diff --git a/builder/internals.go b/builder/internals.go index 67650f75bc..7c22b47b2b 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -581,6 +581,17 @@ func (b *Builder) run(c *daemon.Container) error { return err } + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-b.cancelled: + log.Debugln("Build cancelled, killing container:", c.ID) + c.Kill() + case <-finished: + } + }() + if b.Verbose { // Block on reading output from container, stop on err or chan closed if err := <-errCh; err != nil { diff --git a/builder/job.go b/builder/job.go index 27591129cd..59df87e8c6 100644 --- a/builder/job.go +++ b/builder/job.go @@ -153,6 +153,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { cpuSetCpus: cpuSetCpus, memory: memory, memorySwap: memorySwap, + cancelled: job.WaitCancelled(), } id, err := builder.Run(context) diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index 45c929ee69..6e284d6fc3 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -90,7 +90,7 @@ func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { if blankOK || len(word) > 0 { words = append(words, word) - // Look for = and if no there assume + // Look for = and if not there assume // we're doing the old stuff and // just read the rest of the line if !strings.Contains(word, "=") { @@ -107,12 +107,15 @@ func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { quote = ch blankOK = true phase = inQuote - continue } if ch == '\\' { if pos+1 == len(rest) { continue // just skip \ at end } + // If we're not quoted and we see a \, then always just + // add \ plus the char to the word, even if the char + // is a quote. + word += string(ch) pos++ ch = rune(rest[pos]) } @@ -122,15 +125,17 @@ func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { if phase == inQuote { if ch == quote { phase = inWord - continue } - if ch == '\\' { + // \ is special except for ' quotes - can't escape anything for ' + if ch == '\\' && quote != '\'' { if pos+1 == len(rest) { phase = inWord continue // just skip \ at end } pos++ - ch = rune(rest[pos]) + nextCh := rune(rest[pos]) + word += string(ch) + ch = nextCh } word += string(ch) } @@ -234,17 +239,18 @@ func parseJSON(rest string) (*Node, map[string]bool, error) { var top, prev *Node for _, str := range myJson { - if s, ok := str.(string); !ok { + s, ok := str.(string) + if !ok { return nil, nil, errDockerfileNotStringArray - } else { - node := &Node{Value: s} - if prev == nil { - top = node - } else { - prev.Next = node - } - prev = node } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node } return top, map[string]bool{"json": true}, nil diff --git a/builder/parser/testfiles/env/Dockerfile b/builder/parser/testfiles/env/Dockerfile index bb78503cce..08fa18acec 100644 --- a/builder/parser/testfiles/env/Dockerfile +++ b/builder/parser/testfiles/env/Dockerfile @@ -7,6 +7,14 @@ ENV name=value\ value2 ENV name="value'quote space'value2" ENV name='value"double quote"value2' ENV name=value\ value2 name2=value2\ value3 +ENV name="a\"b" +ENV name="a\'b" +ENV name='a\'b' +ENV name='a\'b'' +ENV name='a\"b' +ENV name="''" +# don't put anything after the next line - it must be the last line of the +# Dockerfile and it must end with \ ENV name=value \ name1=value1 \ name2="value2a \ diff --git a/builder/parser/testfiles/env/result b/builder/parser/testfiles/env/result index a473d0fa39..ba0a6dd7cb 100644 --- a/builder/parser/testfiles/env/result +++ b/builder/parser/testfiles/env/result @@ -2,9 +2,15 @@ (env "name" "value") (env "name" "value") (env "name" "value" "name2" "value2") -(env "name" "value value1") -(env "name" "value value2") -(env "name" "value'quote space'value2") -(env "name" "value\"double quote\"value2") -(env "name" "value value2" "name2" "value2 value3") -(env "name" "value" "name1" "value1" "name2" "value2a value2b" "name3" "value3an\"value3b\"" "name4" "value4a\\nvalue4b") +(env "name" "\"value value1\"") +(env "name" "value\\ value2") +(env "name" "\"value'quote space'value2\"") +(env "name" "'value\"double quote\"value2'") +(env "name" "value\\ value2" "name2" "value2\\ value3") +(env "name" "\"a\\\"b\"") +(env "name" "\"a\\'b\"") +(env "name" "'a\\'b'") +(env "name" "'a\\'b''") +(env "name" "'a\\\"b'") +(env "name" "\"''\"") +(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/builder/shell_parser.go b/builder/shell_parser.go new file mode 100644 index 0000000000..d086645eb0 --- /dev/null +++ b/builder/shell_parser.go @@ -0,0 +1,208 @@ +package builder + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "strings" + "unicode" +) + +type shellWord struct { + word string + envs []string + pos int +} + +func ProcessWord(word string, env []string) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + return sw.process() +} + +func (sw *shellWord) process() (string, error) { + return sw.processStopOn('\000') +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, error) { + var result string + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.pos < len(sw.word) { + ch := sw.peek() + if stopChar != '\000' && ch == stopChar { + sw.next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", err + } + result += tmp + } else { + // Not special, just add it to the result + ch = sw.next() + if ch == '\\' { + // '\' escapes, except end of line + ch = sw.next() + if ch == '\000' { + continue + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) peek() rune { + if sw.pos == len(sw.word) { + return '\000' + } + return rune(sw.word[sw.pos]) +} + +func (sw *shellWord) next() rune { + if sw.pos == len(sw.word) { + return '\000' + } + ch := rune(sw.word[sw.pos]) + sw.pos++ + return ch +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.next() + + for { + ch := sw.next() + if ch == '\000' || ch == '\'' { + break + } + result += string(ch) + } + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ + var result string + + sw.next() + + for sw.pos < len(sw.word) { + ch := sw.peek() + if ch == '"' { + sw.next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.next() + if ch == '\\' { + chNext := sw.peek() + + if chNext == '\000' { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.next() + ch := sw.peek() + if ch == '{' { + sw.next() + name := sw.processName() + ch = sw.peek() + if ch == '}' { + // Normal ${xx} case + sw.next() + return sw.getEnv(name), nil + } + return "", fmt.Errorf("Unsupported ${} substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.pos < len(sw.word) { + ch := sw.peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + if name != env[:i] { + continue + } + return env[i+1:] + } + return "" +} diff --git a/builder/shell_parser_test.go b/builder/shell_parser_test.go new file mode 100644 index 0000000000..79260492f3 --- /dev/null +++ b/builder/shell_parser_test.go @@ -0,0 +1,51 @@ +package builder + +import ( + "bufio" + "os" + "strings" + "testing" +) + +func TestShellParser(t *testing.T) { + file, err := os.Open("words") + if err != nil { + t.Fatalf("Can't open 'words': %s", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash"} + for scanner.Scan() { + line := scanner.Text() + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in 'words' - should be 2 words:%q", words) + } + + words[0] = strings.TrimSpace(words[0]) + words[1] = strings.TrimSpace(words[1]) + + newWord, err := ProcessWord(words[0], envs) + + if err != nil { + newWord = "error" + } + + if newWord != words[1] { + t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1]) + } + } +} diff --git a/builder/support.go b/builder/support.go index 6833457f3a..787ff10ccb 100644 --- a/builder/support.go +++ b/builder/support.go @@ -1,50 +1,9 @@ package builder import ( - "regexp" "strings" ) -var ( - // `\\\\+|[^\\]|\b|\A` - match any number of "\\" (ie, properly-escaped backslashes), or a single non-backslash character, or a word boundary, or beginning-of-line - // `\$` - match literal $ - // `[[:alnum:]_]+` - match things like `$SOME_VAR` - // `{[[:alnum:]_]+}` - match things like `${SOME_VAR}` - tokenEnvInterpolation = regexp.MustCompile(`(\\|\\\\+|[^\\]|\b|\A)\$([[:alnum:]_]+|{[[:alnum:]_]+})`) - // this intentionally punts on more exotic interpolations like ${SOME_VAR%suffix} and lets the shell handle those directly -) - -// handle environment replacement. Used in dispatcher. -func (b *Builder) replaceEnv(str string) string { - for _, match := range tokenEnvInterpolation.FindAllString(str, -1) { - idx := strings.Index(match, "\\$") - if idx != -1 { - if idx+2 >= len(match) { - str = strings.Replace(str, match, "\\$", -1) - continue - } - - prefix := match[:idx] - stripped := match[idx+2:] - str = strings.Replace(str, match, prefix+"$"+stripped, -1) - continue - } - - match = match[strings.Index(match, "$"):] - matchKey := strings.Trim(match, "${}") - - for _, keyval := range b.Config.Env { - tmp := strings.SplitN(keyval, "=", 2) - if tmp[0] == matchKey { - str = strings.Replace(str, match, tmp[1], -1) - break - } - } - } - - return str -} - func handleJsonArgs(args []string, attributes map[string]bool) []string { if len(args) == 0 { return []string{} diff --git a/builder/words b/builder/words new file mode 100644 index 0000000000..2148f72537 --- /dev/null +++ b/builder/words @@ -0,0 +1,43 @@ +hello | hello +he'll'o | hello +he'llo | hello +he\'llo | he'llo +he\\'llo | he\llo +abc\tdef | abctdef +"abc\tdef" | abc\tdef +'abc\tdef' | abc\tdef +hello\ | hello +hello\\ | hello\ +"hello | hello +"hello\" | hello" +"hel'lo" | hel'lo +'hello | hello +'hello\' | hello\ +"''" | '' +$. | $. +$1 | +he$1x | hex +he$.x | he$.x +he$pwd. | he. +he$PWD | he/home +he\$PWD | he$PWD +he\\$PWD | he\/home +he\${} | he${} +he\${}xx | he${}xx +he${} | he +he${}xx | hexx +he${hi} | he +he${hi}xx | hexx +he${PWD} | he/home +he${.} | error +'he${XX}' | he${XX} +"he${PWD}" | he/home +"he'$PWD'" | he'/home' +"$PWD" | /home +'$PWD' | $PWD +'\$PWD' | \$PWD +'"hello"' | "hello" +he\$PWD | he$PWD +"he\$PWD" | he$PWD +'he\$PWD' | he\$PWD +he${PWD | error diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index 115cc15b39..ca874bc10c 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -325,7 +325,7 @@ _docker_cp() { (( counter++ )) if [ $cword -eq $counter ]; then - _filedir + _filedir -d return fi ;; diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index efee52f806..d3237588ef 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -16,7 +16,7 @@ function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' for i in (commandline -opc) - if contains -- $i attach build commit cp create diff events exec export history images import info insert inspect kill load login logout logs pause port ps pull push restart rm rmi run save search start stop tag top unpause version wait + if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait return 1 end end diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index 3215814313..28398f7524 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -270,11 +270,6 @@ __docker_subcommand () { {-q,--quiet}'[Only show numeric IDs]' \ ':repository:__docker_repositories' ;; - (inspect) - _arguments \ - {-f,--format=-}'[Format the output using the given go template]:template: ' \ - '*:containers:__docker_containers' - ;; (import) _arguments \ ':URL:(- http:// file://)' \ @@ -282,15 +277,10 @@ __docker_subcommand () { ;; (info) ;; - (import) + (inspect) _arguments \ - ':URL:(- http:// file://)' \ - ':repository:__docker_repositories_with_tags' - ;; - (insert) - _arguments '1:containers:__docker_containers' \ - '2:URL:(http:// file://)' \ - '3:file:_files' + {-f,--format=-}'[Format the output using the given go template]:template: ' \ + '*:containers:__docker_containers' ;; (kill) _arguments \ diff --git a/daemon/config.go b/daemon/config.go index 4adc025eef..9b38fde4ed 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -83,7 +83,7 @@ func (config *Config) InstallFlags() { opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon") config.Ulimits = make(map[string]*ulimit.Ulimit) opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers") - flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Containers logging driver(json-file/none)") + flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Containers logging driver") } func getDefaultNetworkMtu() int { diff --git a/daemon/container.go b/daemon/container.go index e9b360083c..db622334aa 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -23,6 +23,7 @@ import ( "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/logger/syslog" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/links" @@ -31,10 +32,10 @@ import ( "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/etchosts" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/networkfs/etchosts" - "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/resolvconf" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/runconfig" @@ -1223,6 +1224,7 @@ func (container *Container) initializeNetworking() error { if err != nil { return err } + container.HostnamePath = nc.HostnamePath container.HostsPath = nc.HostsPath container.ResolvConfPath = nc.ResolvConfPath container.Config.Hostname = nc.Config.Hostname @@ -1379,6 +1381,12 @@ func (container *Container) startLogging() error { return err } l = dl + case "syslog": + dl, err := syslog.New(container.ID[:12]) + if err != nil { + return err + } + l = dl case "none": return nil default: diff --git a/daemon/create.go b/daemon/create.go index e17b63636b..49bc6a7de9 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -33,7 +33,7 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") hostConfig.Memory = 0 } - if hostConfig.Memory > 0 && !daemon.SystemConfig().SwapLimit { + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit { job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") hostConfig.MemorySwap = -1 } diff --git a/daemon/daemon.go b/daemon/daemon.go index ebb43e2484..6a27a085a0 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -35,9 +35,9 @@ import ( "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/namesgenerator" - "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/resolvconf" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" diff --git a/daemon/graphdriver/btrfs/version_test.go b/daemon/graphdriver/btrfs/version_test.go index 2d5104d5e3..02fb131553 100644 --- a/daemon/graphdriver/btrfs/version_test.go +++ b/daemon/graphdriver/btrfs/version_test.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,!btrfs_noversion package btrfs diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go new file mode 100644 index 0000000000..afd3dacbb4 --- /dev/null +++ b/daemon/logger/syslog/syslog.go @@ -0,0 +1,42 @@ +package syslog + +import ( + "fmt" + "log/syslog" + "os" + "path" + + "github.com/docker/docker/daemon/logger" +) + +type Syslog struct { + writer *syslog.Writer + tag string +} + +func New(tag string) (logger.Logger, error) { + log, err := syslog.New(syslog.LOG_USER, path.Base(os.Args[0])) + if err != nil { + return nil, err + } + return &Syslog{ + writer: log, + tag: tag, + }, nil +} + +func (s *Syslog) Log(msg *logger.Message) error { + logMessage := fmt.Sprintf("%s: %s", s.tag, msg.Line) + if msg.Source == "stderr" { + return s.writer.Err(logMessage) + } + return s.writer.Info(logMessage) +} + +func (s *Syslog) Close() error { + return s.writer.Close() +} + +func (s *Syslog) Name() string { + return "Syslog" +} diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index aa139b9a39..b8dfdf9488 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -17,8 +17,8 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/iptables" - "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/resolvconf" "github.com/docker/libcontainer/netlink" ) diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go index da9f987397..a7c183d9da 100644 --- a/daemon/networkdriver/portallocator/portallocator.go +++ b/daemon/networkdriver/portallocator/portallocator.go @@ -50,12 +50,21 @@ var ( ) var ( - mutex sync.Mutex - - defaultIP = net.ParseIP("0.0.0.0") - globalMap = ipMapping{} + defaultIP = net.ParseIP("0.0.0.0") + defaultPortAllocator = New() ) +type PortAllocator struct { + mutex sync.Mutex + ipMap ipMapping +} + +func New() *PortAllocator { + return &PortAllocator{ + ipMap: ipMapping{}, + } +} + type ErrPortAlreadyAllocated struct { ip string port int @@ -70,10 +79,11 @@ func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated { func init() { const portRangeKernelParam = "/proc/sys/net/ipv4/ip_local_port_range" + portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", beginPortRange, endPortRange) file, err := os.Open(portRangeKernelParam) if err != nil { - log.Warnf("Failed to read %s kernel parameter: %v", portRangeKernelParam, err) + log.Warnf("port allocator - %s due to error: %v", portRangeFallback, err) return } var start, end int @@ -82,7 +92,7 @@ func init() { if err == nil { err = fmt.Errorf("unexpected count of parsed numbers (%d)", n) } - log.Errorf("Failed to parse port range from %s: %v", portRangeKernelParam, err) + log.Errorf("port allocator - failed to parse system ephemeral port range from %s - %s: %v", portRangeKernelParam, portRangeFallback, err) return } beginPortRange = start @@ -109,12 +119,9 @@ func (e ErrPortAlreadyAllocated) Error() string { return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port) } -// RequestPort requests new port from global ports pool for specified ip and proto. -// If port is 0 it returns first free port. Otherwise it cheks port availability -// in pool and return that port or error if port is already busy. -func RequestPort(ip net.IP, proto string, port int) (int, error) { - mutex.Lock() - defer mutex.Unlock() +func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, error) { + p.mutex.Lock() + defer p.mutex.Unlock() if proto != "tcp" && proto != "udp" { return 0, ErrUnknownProtocol @@ -124,10 +131,10 @@ func RequestPort(ip net.IP, proto string, port int) (int, error) { ip = defaultIP } ipstr := ip.String() - protomap, ok := globalMap[ipstr] + protomap, ok := p.ipMap[ipstr] if !ok { protomap = newProtoMap() - globalMap[ipstr] = protomap + p.ipMap[ipstr] = protomap } mapping := protomap[proto] if port > 0 { @@ -145,15 +152,22 @@ func RequestPort(ip net.IP, proto string, port int) (int, error) { return port, nil } +// RequestPort requests new port from global ports pool for specified ip and proto. +// If port is 0 it returns first free port. Otherwise it cheks port availability +// in pool and return that port or error if port is already busy. +func RequestPort(ip net.IP, proto string, port int) (int, error) { + return defaultPortAllocator.RequestPort(ip, proto, port) +} + // ReleasePort releases port from global ports pool for specified ip and proto. -func ReleasePort(ip net.IP, proto string, port int) error { - mutex.Lock() - defer mutex.Unlock() +func (p *PortAllocator) ReleasePort(ip net.IP, proto string, port int) error { + p.mutex.Lock() + defer p.mutex.Unlock() if ip == nil { ip = defaultIP } - protomap, ok := globalMap[ip.String()] + protomap, ok := p.ipMap[ip.String()] if !ok { return nil } @@ -161,14 +175,22 @@ func ReleasePort(ip net.IP, proto string, port int) error { return nil } +func ReleasePort(ip net.IP, proto string, port int) error { + return defaultPortAllocator.ReleasePort(ip, proto, port) +} + // ReleaseAll releases all ports for all ips. -func ReleaseAll() error { - mutex.Lock() - globalMap = ipMapping{} - mutex.Unlock() +func (p *PortAllocator) ReleaseAll() error { + p.mutex.Lock() + p.ipMap = ipMapping{} + p.mutex.Unlock() return nil } +func ReleaseAll() error { + return defaultPortAllocator.ReleaseAll() +} + func (pm *portMap) findPort() (int, error) { port := pm.last for i := 0; i <= endPortRange-beginPortRange; i++ { diff --git a/daemon/networkdriver/portallocator/portallocator_test.go b/daemon/networkdriver/portallocator/portallocator_test.go index bac558fa41..f6f122bbde 100644 --- a/daemon/networkdriver/portallocator/portallocator_test.go +++ b/daemon/networkdriver/portallocator/portallocator_test.go @@ -10,14 +10,10 @@ func init() { endPortRange = DefaultPortRangeEnd } -func reset() { - ReleaseAll() -} - func TestRequestNewPort(t *testing.T) { - defer reset() + p := New() - port, err := RequestPort(defaultIP, "tcp", 0) + port, err := p.RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } @@ -28,9 +24,9 @@ func TestRequestNewPort(t *testing.T) { } func TestRequestSpecificPort(t *testing.T) { - defer reset() + p := New() - port, err := RequestPort(defaultIP, "tcp", 5000) + port, err := p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } @@ -40,9 +36,9 @@ func TestRequestSpecificPort(t *testing.T) { } func TestReleasePort(t *testing.T) { - defer reset() + p := New() - port, err := RequestPort(defaultIP, "tcp", 5000) + port, err := p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } @@ -50,15 +46,15 @@ func TestReleasePort(t *testing.T) { t.Fatalf("Expected port 5000 got %d", port) } - if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil { t.Fatal(err) } } func TestReuseReleasedPort(t *testing.T) { - defer reset() + p := New() - port, err := RequestPort(defaultIP, "tcp", 5000) + port, err := p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } @@ -66,20 +62,20 @@ func TestReuseReleasedPort(t *testing.T) { t.Fatalf("Expected port 5000 got %d", port) } - if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil { t.Fatal(err) } - port, err = RequestPort(defaultIP, "tcp", 5000) + port, err = p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } } func TestReleaseUnreadledPort(t *testing.T) { - defer reset() + p := New() - port, err := RequestPort(defaultIP, "tcp", 5000) + port, err := p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } @@ -87,7 +83,7 @@ func TestReleaseUnreadledPort(t *testing.T) { t.Fatalf("Expected port 5000 got %d", port) } - port, err = RequestPort(defaultIP, "tcp", 5000) + port, err = p.RequestPort(defaultIP, "tcp", 5000) switch err.(type) { case ErrPortAlreadyAllocated: @@ -97,18 +93,16 @@ func TestReleaseUnreadledPort(t *testing.T) { } func TestUnknowProtocol(t *testing.T) { - defer reset() - - if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { + if _, err := New().RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) } } func TestAllocateAllPorts(t *testing.T) { - defer reset() + p := New() for i := 0; i <= endPortRange-beginPortRange; i++ { - port, err := RequestPort(defaultIP, "tcp", 0) + port, err := p.RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } @@ -118,21 +112,21 @@ func TestAllocateAllPorts(t *testing.T) { } } - if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated { + if _, err := p.RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated { t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err) } - _, err := RequestPort(defaultIP, "udp", 0) + _, err := p.RequestPort(defaultIP, "udp", 0) if err != nil { t.Fatal(err) } // release a port in the middle and ensure we get another tcp port port := beginPortRange + 5 - if err := ReleasePort(defaultIP, "tcp", port); err != nil { + if err := p.ReleasePort(defaultIP, "tcp", port); err != nil { t.Fatal(err) } - newPort, err := RequestPort(defaultIP, "tcp", 0) + newPort, err := p.RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } @@ -142,10 +136,10 @@ func TestAllocateAllPorts(t *testing.T) { // now pm.last == newPort, release it so that it's the only free port of // the range, and ensure we get it back - if err := ReleasePort(defaultIP, "tcp", newPort); err != nil { + if err := p.ReleasePort(defaultIP, "tcp", newPort); err != nil { t.Fatal(err) } - port, err = RequestPort(defaultIP, "tcp", 0) + port, err = p.RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } @@ -155,11 +149,11 @@ func TestAllocateAllPorts(t *testing.T) { } func BenchmarkAllocatePorts(b *testing.B) { - defer reset() + p := New() for i := 0; i < b.N; i++ { for i := 0; i <= endPortRange-beginPortRange; i++ { - port, err := RequestPort(defaultIP, "tcp", 0) + port, err := p.RequestPort(defaultIP, "tcp", 0) if err != nil { b.Fatal(err) } @@ -168,21 +162,21 @@ func BenchmarkAllocatePorts(b *testing.B) { b.Fatalf("Expected port %d got %d", expected, port) } } - reset() + p.ReleaseAll() } } func TestPortAllocation(t *testing.T) { - defer reset() + p := New() ip := net.ParseIP("192.168.0.1") ip2 := net.ParseIP("192.168.0.2") - if port, err := RequestPort(ip, "tcp", 80); err != nil { + if port, err := p.RequestPort(ip, "tcp", 80); err != nil { t.Fatal(err) } else if port != 80 { t.Fatalf("Acquire(80) should return 80, not %d", port) } - port, err := RequestPort(ip, "tcp", 0) + port, err := p.RequestPort(ip, "tcp", 0) if err != nil { t.Fatal(err) } @@ -190,41 +184,41 @@ func TestPortAllocation(t *testing.T) { t.Fatalf("Acquire(0) should return a non-zero port") } - if _, err := RequestPort(ip, "tcp", port); err == nil { + if _, err := p.RequestPort(ip, "tcp", port); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } - if newPort, err := RequestPort(ip, "tcp", 0); err != nil { + if newPort, err := p.RequestPort(ip, "tcp", 0); err != nil { t.Fatal(err) } else if newPort == port { t.Fatalf("Acquire(0) allocated the same port twice: %d", port) } - if _, err := RequestPort(ip, "tcp", 80); err == nil { + if _, err := p.RequestPort(ip, "tcp", 80); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } - if _, err := RequestPort(ip2, "tcp", 80); err != nil { + if _, err := p.RequestPort(ip2, "tcp", 80); err != nil { t.Fatalf("It should be possible to allocate the same port on a different interface") } - if _, err := RequestPort(ip2, "tcp", 80); err == nil { + if _, err := p.RequestPort(ip2, "tcp", 80); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } - if err := ReleasePort(ip, "tcp", 80); err != nil { + if err := p.ReleasePort(ip, "tcp", 80); err != nil { t.Fatal(err) } - if _, err := RequestPort(ip, "tcp", 80); err != nil { + if _, err := p.RequestPort(ip, "tcp", 80); err != nil { t.Fatal(err) } - port, err = RequestPort(ip, "tcp", 0) + port, err = p.RequestPort(ip, "tcp", 0) if err != nil { t.Fatal(err) } - port2, err := RequestPort(ip, "tcp", port+1) + port2, err := p.RequestPort(ip, "tcp", port+1) if err != nil { t.Fatal(err) } - port3, err := RequestPort(ip, "tcp", 0) + port3, err := p.RequestPort(ip, "tcp", 0) if err != nil { t.Fatal(err) } @@ -234,15 +228,15 @@ func TestPortAllocation(t *testing.T) { } func TestNoDuplicateBPR(t *testing.T) { - defer reset() + p := New() - if port, err := RequestPort(defaultIP, "tcp", beginPortRange); err != nil { + if port, err := p.RequestPort(defaultIP, "tcp", beginPortRange); err != nil { t.Fatal(err) } else if port != beginPortRange { t.Fatalf("Expected port %d got %d", beginPortRange, port) } - if port, err := RequestPort(defaultIP, "tcp", 0); err != nil { + if port, err := p.RequestPort(defaultIP, "tcp", 0); err != nil { t.Fatal(err) } else if port == beginPortRange { t.Fatalf("Acquire(0) allocated the same port twice: %d", port) diff --git a/daemon/volumes.go b/daemon/volumes.go index b389ad0b86..126d74a383 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -24,6 +24,7 @@ type Mount struct { Writable bool copyData bool from *Container + isBind bool } func (mnt *Mount) Export(resource string) (io.ReadCloser, error) { @@ -79,7 +80,7 @@ func (m *Mount) initialize() error { if hostPath, exists := m.container.Volumes[m.MountToPath]; exists { // If this is a bind-mount/volumes-from, maybe it was passed in at start instead of create // We need to make sure bind-mounts/volumes-from passed on start can override existing ones. - if !m.volume.IsBindMount && m.from == nil { + if (!m.volume.IsBindMount && !m.isBind) && m.from == nil { return nil } if m.volume.Path == hostPath { @@ -172,6 +173,7 @@ func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) volume: vol, MountToPath: mountToPath, Writable: writable, + isBind: true, // in case the volume itself is a normal volume, but is being mounted in as a bindmount here } } diff --git a/docker/daemon.go b/docker/daemon.go index e3bd06d901..b2a985b221 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -186,8 +186,9 @@ func mainDaemon() { errAPI := <-serveAPIWait // If we have an error here it is unique to API (as daemonErr would have // exited the daemon process above) - if errAPI != nil { - log.Errorf("Shutting down due to ServeAPI error: %v", errAPI) - } eng.Shutdown() + if errAPI != nil { + log.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) + } + } diff --git a/docker/docker.go b/docker/docker.go index 0641830098..347424432b 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/autogen/dockerversion" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" ) @@ -29,6 +30,11 @@ func main() { return } + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + + initLogging(stderr) + flag.Parse() // FIXME: validate daemon flags here @@ -42,16 +48,16 @@ func main() { if err != nil { log.Fatalf("Unable to parse logging level: %s", *flLogLevel) } - initLogging(lvl) + setLogLevel(lvl) } else { - initLogging(log.InfoLevel) + setLogLevel(log.InfoLevel) } // -D, --debug, -l/--log-level=debug processing // When/if -D is removed this block can be deleted if *flDebug { os.Setenv("DEBUG", "1") - initLogging(log.DebugLevel) + setLogLevel(log.DebugLevel) } if len(flHosts) == 0 { @@ -124,9 +130,9 @@ func main() { } if *flTls || *flTlsVerify { - cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], &tlsConfig) + cli = client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { - cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], nil) + cli = client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.Cmd(flag.Args()...); err != nil { diff --git a/docker/log.go b/docker/log.go index cdbbd4408f..0dd9a70eeb 100644 --- a/docker/log.go +++ b/docker/log.go @@ -1,12 +1,14 @@ package main import ( - "os" - log "github.com/Sirupsen/logrus" + "io" ) -func initLogging(lvl log.Level) { - log.SetOutput(os.Stderr) +func setLogLevel(lvl log.Level) { log.SetLevel(lvl) } + +func initLogging(stderr io.Writer) { + log.SetOutput(stderr) +} diff --git a/docs/README.md b/docs/README.md index 72172112ce..5feb496a73 100755 --- a/docs/README.md +++ b/docs/README.md @@ -1,156 +1,255 @@ # Docker Documentation -The source for Docker documentation is here under `sources/` and uses extended -Markdown, as implemented by [MkDocs](http://mkdocs.org). +The source for Docker documentation is in this directory under `sources/`. Our +documentation uses extended Markdown, as implemented by +[MkDocs](http://mkdocs.org). The current release of the Docker documentation +resides on [http://docs.docker.com](http://docs.docker.com). -The HTML files are built and hosted on -[http://docs.docker.com](http://docs.docker.com), and update automatically -after each change to the `docs` branch of [Docker on -GitHub](https://github.com/docker/docker) thanks to post-commit hooks. +## Understanding the documentation branches and processes -## Contributing - -Be sure to follow the [contribution guidelines](../CONTRIBUTING.md). -In particular, [remember to sign your work!](../CONTRIBUTING.md#sign-your-work) - -## Getting Started - -Docker documentation builds are done in a Docker container, which installs all -the required tools, adds the local `docs/` directory and builds the HTML docs. -It then starts a HTTP server on port 8000 so that you can connect and see your -changes. - -In the root of the `docker` source directory: - - $ make docs - .... (lots of output) .... - docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve - Running at: http://0.0.0.0:8000/ - Live reload enabled. - Hold ctrl+c to quit. - -If you have any issues you need to debug, you can use `make docs-shell` and then -run `mkdocs serve` - -## Testing the links - -You can use `make docs-test` to generate a report of missing links that are referenced in -the documentation - there should be none. - -## Adding a new document - -New document (`.md`) files are added to the documentation builds by adding them -to the menu definition in the `docs/mkdocs.yml` file. - -## Style guide - -If you have questions about how to write for Docker's documentation (e.g., -questions about grammar, syntax, formatting, styling, language, or tone) please -see the [style guide](sources/contributing/docs_style-guide.md). If something -isn't clear in the guide, please submit a PR to help us improve it. - -## Working using GitHub's file editor - -Alternatively, for small changes and typos you might want to use GitHub's built- -in file editor. It allows you to preview your changes right on-line (though -there can be some differences between GitHub Markdown and [MkDocs -Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be -careful not to create many commits. And you must still [sign your -work!](../CONTRIBUTING.md#sign-your-work) - -## Branches +Docker has two primary branches for documentation: | Branch | Description | URL (published via commit-hook) | |----------|--------------------------------|------------------------------------------------------------------------------| | `docs` | Official release documentation | [http://docs.docker.com](http://docs.docker.com) | -| `master` | Unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) | +| `master` | Merged but unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) | -**There are two branches related to editing docs**: `master` and `docs`. You -should always edit the documentation on a local branch of the `master` branch, -and send a PR against `master`. That way your fixes will automatically get -included in later releases, and docs maintainers can easily cherry-pick your -changes into the `docs` release branch. In the rare case where your change is -not forward-compatible, you may need to base your changes on the `docs` branch. +Additions and updates to upcoming releases are made in a feature branch off of +the `master` branch. The Docker maintainers also support a `docs` branch that +contains the last release of documentation. -Also, since there is a separate `docs` branch, we can keep -[http://docs.docker.com](http://docs.docker.com) up to date with any bugs found -between Docker code releases. +After a release, documentation updates are continually merged into `master` as +they occur. This work includes new documentation for forthcoming features, bug +fixes, and other updates. Docker's CI system automatically builds and updates +the `master` documentation after each merge and posts it to +[http://docs.master.dockerproject.com](http://docs.master.dockerproject.com). -## Publishing Documentation +Periodically, the Docker maintainers update `docs.docker.com` between official +releases of Docker. They do this by cherry-picking commits from `master`, +merging them into `docs`, and then publishing the result. -To publish a copy of the documentation you need to have Docker up and running on -your machine. You'll also need a `docs/awsconfig` file containing the settings -you need to access the AWS bucket you'll be deploying to. +In the rare case where a change is not forward-compatible, changes may be made +on other branches by special arrangement with the Docker maintainers. -The release script will create an s3 if needed, and will then push the files to it. +### Quickstart for documentation contributors - [profile dowideit-docs] - aws_access_key_id = IHOIUAHSIDH234rwf.... - aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... - region = ap-southeast-2 +If you are a new or beginner contributor, we encourage you to read through the +[our detailed contributors +guide](https://docs.docker.com/project/who-written-for/). The guide explains in +detail, with examples, how to contribute. If you are an experienced contributor +this quickstart should be enough to get you started. -The `profile` name must be the same as the name of the bucket you are deploying -to - which you call from the `docker` directory: +The following is the essential workflow for contributing to the documentation: - make AWS_S3_BUCKET=dowideit-docs docs-release +1. Fork the `docker/docker` repository. -This will publish _only_ to the `http://bucket-url/v1.2/` version of the -documentation. +2. Clone the repository to your local machine. -If you're publishing the current release's documentation, you need to -also update the root docs pages by running +3. Select an issue from `docker/docker` to work on or submit a proposal of your +own. - make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release +4. Create a feature branch from `master` in which to work. -> **Note:** -> if you are using Boot2Docker on OSX and the above command returns an error, -> `Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: -> dial unix /var/run/docker.sock: no such file or directory', you need to set the Docker -> host. Run `eval "$(boot2docker shellinit)"` to see the correct variable to set. The command -> will return the full `export` command, so you can just cut and paste. + By basing from `master` your work is automatically included in the next + release. It also allows docs maintainers to easily cherry-pick your changes + into the `docs` release branch. + +4. Modify existing or add new `.md` files to the `docs/sources` directory. + + If you add a new document (`.md`) file, you must also add it to the + appropriate section of the `docs/mkdocs.yml` file in this repository. + + +5. As you work, build the documentation site locally to see your changes. + + The `docker/docker` repository contains a `Dockerfile` and a `Makefile`. + Together, these create a development environment in which you can build and + run a container running the Docker documentation website. To build the + documentation site, enter `make docs` at the root of your `docker/docker` + fork: + + $ make docs + .... (lots of output) .... + docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve + Running at: http://0.0.0.0:8000/ + Live reload enabled. + Hold ctrl+c to quit. + + + The build creates an image containing all the required tools, adds the local + `docs/` directory and generates the HTML files. Then, it runs a Docker + container with this image. + + The container exposes port 8000 on the localhost so that you can connect and + see your changes. If you are running Boot2Docker, use the `boot2docker ip` + to get the address of your server. + +6. Check your writing for style and mechanical errors. + + Use our [documentation style + guide](https://docs.docker.com/project/doc-style/) to check style. There are + several [good grammar and spelling online + checkers](http://www.hemingwayapp.com/) that can check your writing + mechanics. + +7. Squash your commits on your branch. + +8. Make a pull request from your fork back to Docker's `master` branch. + +9. Work with the reviewers until your change is approved and merged. + +### Debugging and testing + +If you have any issues you need to debug, you can use `make docs-shell` and then +run `mkdocs serve`. You can use `make docs-test` to generate a report of missing +links that are referenced in the documentation—there should be none. + +## Style guide + +If you have questions about how to write for Docker's documentation, please see +the [style guide](sources/project/doc-style.md). The style guide provides +guidance about grammar, syntax, formatting, styling, language, or tone. If +something isn't clear in the guide, please submit an issue to let us know or +submit a pull request to help us improve it. + + +## Publishing documentation (for Docker maintainers) + +To publish Docker's documentation you need to have Docker up and running on your +machine. You'll also need a `docs/awsconfig` file containing the settings you +need to access the AWS bucket you'll be deploying to. + +The process for publishing is to build first to an AWS bucket, verify the build, +and then publish the final release. + +1. Have Docker installed and running on your machine. + +2. Ask the core maintainers for the `awsconfig` file. + +3. Copy the `awsconfig` file to the `docs/` directory. + + The `awsconfig` file contains the profiles of the S3 buckets for our + documentation sites. (If needed, the release script creates an S3 bucket and + pushes the files to it.) Each profile has this format: + + [profile dowideit-docs] + aws_access_key_id = IHOIUAHSIDH234rwf.... + aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... + region = ap-southeast-2 + + The `profile` name must be the same as the name of the bucket you are + deploying to. + +4. Call the `make` from the `docker` directory. + + $ make AWS_S3_BUCKET=dowideit-docs docs-release + + This publishes _only_ to the `http://bucket-url/v1.2/` version of the + documentation. + +5. If you're publishing the current release's documentation, you need to also +update the root docs pages by running + + $ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release + +### Errors publishing using Boot2Docker + +Sometimes, in a Boot2Docker environment, the publishing procedure returns this +error: + + Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: + dial unix /var/run/docker.sock: no such file or directory. + +If this happens, set the Docker host. Run the following command to set the +variables in your shell: + + $ eval "$(boot2docker shellinit)" ## Cherry-picking documentation changes to update an existing release. -Whenever the core team makes a release, they publish the documentation based -on the `release` branch (which is copied into the `docs` branch). The -documentation team can make updates in the meantime, by cherry-picking changes -from `master` into any of the docs branches. +Whenever the core team makes a release, they publish the documentation based on +the `release` branch. At that time, the `release` branch is copied into the +`docs` branch. The documentation team makes updates between Docker releases by +cherry-picking changes from `master` into any of the documentation branches. +Typically, we cherry-pick into the `docs` branch. -For example, to update the current release's docs: +For example, to update the current release's docs, do the following: - git fetch upstream - git checkout -b post-1.2.0-docs-update-1 upstream/docs - # Then go through the Merge commit linked to PR's (making sure they apply - to that release) - # see https://github.com/docker/docker/commits/master - git cherry-pick -x fe845c4 - # Repeat until you have cherry picked everything you will propose to be merged - git push upstream post-1.2.0-docs-update-1 +1. Go to your `docker/docker` fork and get the latest from master. -Then make a pull request to merge into the `docs` branch, __NOT__ into master. + $ git fetch upstream + +2. Checkout a new branch based on `upstream/docs`. -Once the PR has the needed `LGTM`s, merge it, then publish to our beta server -to test: + You should give your new branch a descriptive name. - git fetch upstream - git checkout docs - git reset --hard upstream/docs - make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release + $ git checkout -b post-1.2.0-docs-update-1 upstream/docs + +3. In a browser window, open [https://github.com/docker/docker/commits/master]. -Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/ -to view your results and make sure what you published is what you wanted. +4. Locate the merges you want to publish. -When you're happy with it, publish the docs to our live site: + You should only cherry-pick individual commits; do not cherry-pick merge + commits. To minimize merge conflicts, start with the oldest commit and work + your way forward in time. - make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release +5. Copy the commit SHA from GitHub. -Test the uncached version of the live docs at http://docs.docker.com.s3-website-us-east-1.amazonaws.com/ +6. Cherry-pick the commit. + + $ git cherry-pick -x fe845c4 + +7. Repeat until you have cherry-picked everything you want to merge. + +8. Push your changes to your fork. + + $ git push origin post-1.2.0-docs-update-1 + +9. Make a pull request to merge into the `docs` branch. + + Do __NOT__ merge into `master`. + +10. Have maintainers review your pull request. + +11. Once the PR has the needed "LGTMs", merge it on GitHub. + +12. Return to your local fork and make sure you are still on the `docs` branch. + + $ git checkout docs + +13. Fetch your merged pull request from `docs`. + + $ git fetch upstream/docs + +14. Ensure your branch is clean and set to the latest. + + $ git reset --hard upstream/docs -Note that the new docs will not appear live on the site until the cache (a complex, -distributed CDN system) is flushed. The `make docs-release` command will do this -_if_ the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID (ask the meta -team) - this will take at least 15 minutes to run and you can check its progress -with the CDN Cloudfront Chrome addin. +15. Copy the `awsconfig` file into the `docs` directory. + +16. Make the beta documentation + + $ make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release + +17. Open [the beta +website](http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/) site +and make sure what you published is correct. + +19. When you're happy with your content, publish the docs to our live site: + + $ make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes +DISTRIBUTION_ID=C2K6......FL2F docs-release + +20. Test the uncached version of the live docs at [http://docs.docker.com.s3-website-us-east-1.amazonaws.com/] + + +### Caching and the docs + +New docs do not appear live on the site until the cache (a complex, distributed +CDN system) is flushed. The `make docs-release` command flushes the cache _if_ +the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID. The cache flush +can take at least 15 minutes to run and you can check its progress with the CDN +Cloudfront Purge Tool Chrome app. ## Removing files from the docs.docker.com site diff --git a/docs/man/Dockerfile.5.md b/docs/man/Dockerfile.5.md index 7f884888e2..0ec54a8c9c 100644 --- a/docs/man/Dockerfile.5.md +++ b/docs/man/Dockerfile.5.md @@ -273,8 +273,15 @@ A Dockerfile is similar to a Makefile. **USER** -- `USER daemon` - The **USER** instruction sets the username or UID that is used when running the - image. + Sets the username or UID used for running subsequent commands. + + The **USER** instruction can optionally be used to set the group or GID. The + followings examples are all valid: + USER [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Until the **USER** instruction is set, instructions will be run as root. The USER + instruction can be used any number of times in a Dockerfile, and will only affect + subsequent commands. **WRKDIR** -- `WORKDIR /path/to/workdir` diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md index 62a4c60bb1..1a0da1b8f4 100644 --- a/docs/man/docker-create.1.md +++ b/docs/man/docker-create.1.md @@ -121,7 +121,7 @@ IMAGE [COMMAND] [ARG...] **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -**--log-driver**="|*json-file*|*none*" +**--log-driver**="|*json-file*|*syslog*|*none*" Logging driver for container. Default is defined by daemon `--log-driver` flag. **Warning**: `docker logs` command works only for `json-file` logging driver. diff --git a/docs/man/docker-login.1.md b/docs/man/docker-login.1.md index e3614cce4a..5ff9403a8d 100644 --- a/docs/man/docker-login.1.md +++ b/docs/man/docker-login.1.md @@ -17,6 +17,9 @@ Register or Login to a docker registry server, if no server is specified "https://index.docker.io/v1/" is the default. If you want to login to a private registry you can specify this by adding the server name. +This stores encoded credentials in `$HOME/.dockercfg` on Linux or `%USERPROFILE%/.dockercfg` +on Windows. + # OPTIONS **-e**, **--email**="" Email diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index ef2c9061ba..9ac5717383 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -222,7 +222,7 @@ which interface and port to use. **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -**--log-driver**="|*json-file*|*none*" +**--log-driver**="|*json-file*|*syslog*|*none*" Logging driver for container. Default is defined by daemon `--log-driver` flag. **Warning**: `docker logs` command works only for `json-file` logging driver. @@ -341,7 +341,12 @@ The **-t** option is incompatible with a redirection of the docker client standard input. **-u**, **--user**="" - Username or UID + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. **-v**, **--volume**=[] Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index 530fa95019..c9fe3eae9a 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -89,7 +89,7 @@ unix://[/path/to/socket] to use. **--label**="[]" Set key=value labels to the daemon (displayed in `docker info`) -**--log-driver**="*json-file*|*none*" +**--log-driver**="*json-file*|*syslog*|*none*" Container's logging driver. Default is `default`. **Warning**: `docker logs` command works only for `json-file` logging driver. diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 49449f7843..6e7be67d20 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -45,6 +45,8 @@ pages: - ['installation/google.md', 'Installation', 'Google Cloud Platform'] - ['installation/gentoolinux.md', 'Installation', 'Gentoo'] - ['installation/softlayer.md', 'Installation', 'IBM Softlayer'] +- ['installation/joyent.md', 'Installation', 'Joyent Compute Service'] +- ['installation/azure.md', 'Installation', 'Microsoft Azure'] - ['installation/rackspace.md', 'Installation', 'Rackspace Cloud'] - ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux'] - ['installation/oracle.md', 'Installation', 'Oracle Linux'] diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md index 164018e827..1613ad1d4b 100644 --- a/docs/sources/docker-hub/builds.md +++ b/docs/sources/docker-hub/builds.md @@ -63,13 +63,15 @@ public or private GitHub repositories with a `Dockerfile`. ### GitHub Submodules -If your GitHub repository contains links to private submodules, you'll -need to add a deploy key from your Docker Hub repository. +If your GitHub repository contains links to private submodules, you'll get an +error message in your build. -Your Docker Hub deploy key is located under the "Build Details" -menu on the Automated Build's main page in the Hub. Add this key -to your GitHub submodule by visiting the Settings page for the -repository on GitHub and selecting "Deploy keys". +Normally, the Docker Hub sets up a deploy key in your GitHub repository. +Unfortunately, GitHub only allows a repository deploy key to access a single repository. + +To work around this, you need to create a dedicated user account in GitHub and attach +the automated build's deploy key that account. This dedicated build account +can be limited to read-only access to just the repositories required to build. @@ -82,15 +84,33 @@ repository on GitHub and selecting "Deploy keys". - - + + - - + + + + + + + + + + + + + + + + +
1.Your automated build's deploy key is in the "Build Details" menu -under "Deploy keys".First, create the new account in GitHub. It should be given read-only + access to the main repository and all submodules that are needed.
2.In your GitHub submodule's repository Settings page, add the -deploy key from your Docker Hub Automated Build.This can be accomplished by adding the account to a read-only team in + the organization(s) where the main GitHub repository and all submodule + repositories are kept.
3.Next, remove the deploy key from the main GitHub repository. This can be done in the GitHub repository's "Deploy keys" Settings section.
4.Your automated build's deploy key is in the "Build Details" menu + under "Deploy keys".
5.In your dedicated GitHub User account, add the deploy key from your + Docker Hub Automated Build.
diff --git a/docs/sources/docker-hub/hub-images/gh_add_ssh_user_key.png b/docs/sources/docker-hub/hub-images/gh_add_ssh_user_key.png new file mode 100644 index 0000000000..7d0092170f Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh_add_ssh_user_key.png differ diff --git a/docs/sources/docker-hub/hub-images/gh_org_members.png b/docs/sources/docker-hub/hub-images/gh_org_members.png new file mode 100644 index 0000000000..465f5da565 Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh_org_members.png differ diff --git a/docs/sources/docker-hub/hub-images/gh_repo_deploy_key.png b/docs/sources/docker-hub/hub-images/gh_repo_deploy_key.png new file mode 100644 index 0000000000..983b5eec77 Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh_repo_deploy_key.png differ diff --git a/docs/sources/docker-hub/hub-images/gh_team_members.png b/docs/sources/docker-hub/hub-images/gh_team_members.png new file mode 100644 index 0000000000..3bdf4abd95 Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh_team_members.png differ diff --git a/docs/sources/docker-hub/hub-images/github_deploy_key.png b/docs/sources/docker-hub/hub-images/github_deploy_key.png deleted file mode 100644 index a0ec6a918f..0000000000 Binary files a/docs/sources/docker-hub/hub-images/github_deploy_key.png and /dev/null differ diff --git a/docs/sources/installation.md b/docs/sources/installation.md index 7eaabeeefe..5fa52fa978 100644 --- a/docs/sources/installation.md +++ b/docs/sources/installation.md @@ -23,4 +23,5 @@ techniques for installing Docker all the time. - [Amazon EC2](amazon/) - [Rackspace Cloud](rackspace/) - [Google Cloud Platform](google/) + - [Joyent Compute Service](joyent/) - [Binaries](binaries/) diff --git a/docs/sources/installation/azure.md b/docs/sources/installation/azure.md new file mode 100644 index 0000000000..a8e700fead --- /dev/null +++ b/docs/sources/installation/azure.md @@ -0,0 +1,27 @@ +page_title: Installation on Microsoft Azure Platform +page_description: Instructions for creating a Docker-ready virtual machine on Microsoft Azure cloud platform. +page_keywords: Docker, Docker documentation, installation, azure, microsoft + +# Microsoft Azure + + +## Creating a Docker host machine on Azure + +Please check out to the following detailed tutorials on [Microsoft Azure][0] +website to find out different ways to create a Docker-ready Linux virtual +machines on Azure: + +* [Docker Virtual Machine Extensions on Azure][1] + * [How to use the Docker VM Extension from Azure Cross-Platform Interface][2] + * [How to use the Docker VM Extension with the Azure Portal][3] +* [Using Docker Machine with Azure][4] + +## What next? + +Continue with the [User Guide](/userguide/). + +[0]: http://azure.microsoft.com/ +[1]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-vm-extension/ +[2]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-xplat-cli/ +[3]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-portal/ +[4]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-machine/ \ No newline at end of file diff --git a/docs/sources/installation/joyent.md b/docs/sources/installation/joyent.md new file mode 100644 index 0000000000..daa33b7996 --- /dev/null +++ b/docs/sources/installation/joyent.md @@ -0,0 +1,23 @@ +page_title: Install on Joyent Public Cloud +page_description: Installation instructions for Docker on the Joyent Compute Service. +page_keywords: Docker, Docker documentation, installation, joyent, Joyent Public Cloud, Joyent Compute Service, Joyent Container Service + +## Install on Joyent Public Cloud + +1. Sign in to the [Joyent customer portal](https://my.joyent.com/) + +2. [Create a Docker host](https://docs.joyent.com/jpc/managing-docker-containers/creating-a-docker-host). + +## Start and manage containers + +1. [Start containers in the web UI](https://docs.joyent.com/jpc/managing-docker-containers/starting-a-container) + +2. [Configure the Docker CLI on your laptop](https://docs.joyent.com/jpc/managing-docker-containers/access-your-jpc-docker-hosts-from-the-docker-cli) to connect to the remote host to launch and manage containers. + +3. SSH into the Docker host. + +4. Launch containers using the Docker CLI. + +## Where to go next + +Continue with the [Docker user guide](/userguide/), read Joyent's [getting started blog post](https://www.joyent.com/blog/first-steps-with-joyents-container-service), and [full documentation](https://docs.joyent.com/jpc/managing-docker-containers). \ No newline at end of file diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index a06233e0ea..9bf7632680 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -4,6 +4,20 @@ page_keywords: Docker, Docker documentation, requirements, boot2docker, VirtualB # Install Docker on Mac OS X +You can install Docker using Boot2Docker to run `docker` commands at your command-line. +Choose this installation if you are familiar with the command-line or plan to +contribute to the Docker project on GitHub. + +Alternatively, you may want to try Kitematic, an application that lets you set up Docker and +run containers using a graphical user interface (GUI). + +Download Kitematic + + +## Command-line Docker with Boot2Docker + Because the Docker daemon uses Linux-specific kernel features, you can't run Docker natively in OS X. Instead, you must install the Boot2Docker application. The application includes a VirtualBox Virtual Machine (VM), Docker itself, and the @@ -17,16 +31,7 @@ completely from RAM, is a small ~24MB download, and boots in approximately 5s. Your Mac must be running OS X 10.6 "Snow Leopard" or newer to run Boot2Docker. -## How do you want to work with Docker? - -You can set up Docker using the command line with Boot2Docker and the guide -below. Alternatively, you may want to try Kitematic, -an application that lets you set up Docker and run containers using a graphical -user interface (GUI). - -Download Kitematic - -## Learn the key concepts before installing +### Learn the key concepts before installing In a Docker installation on Linux, your machine is both the localhost and the Docker host. In networking, localhost means your computer. The Docker host is @@ -50,7 +55,7 @@ When you start the `boot2docker` process, the VM is assigned an IP address. Unde practice, work through the exercises on this page. -## Install Boot2Docker +### Install Boot2Docker 1. Go to the [boot2docker/osx-installer ]( https://github.com/boot2docker/osx-installer/releases/latest) release page. @@ -319,4 +324,4 @@ at [Boot2Docker repository](https://github.com/boot2docker/boot2docker). Thanks to Chris Jones whose [blog](http://goo.gl/Be6cCk) inspired me to redo this page. -Continue with the [Docker User Guide](/userguide/). +Continue with the [Docker User Guide](/userguide/). \ No newline at end of file diff --git a/docs/sources/project/create-pr.md b/docs/sources/project/create-pr.md index 84de397090..197aee849d 100644 --- a/docs/sources/project/create-pr.md +++ b/docs/sources/project/create-pr.md @@ -1,6 +1,6 @@ page_title: Create a pull request (PR) page_description: Basic workflow for Docker contributions -page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit +page_keywords: contribute, pull request, review, workflow, beginner, squash, commit # Create a pull request (PR) diff --git a/docs/sources/project/find-an-issue.md b/docs/sources/project/find-an-issue.md index 39572d17a4..2b3396e6e7 100644 --- a/docs/sources/project/find-an-issue.md +++ b/docs/sources/project/find-an-issue.md @@ -1,6 +1,6 @@ page_title: Make a project contribution page_description: Basic workflow for Docker contributions -page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit +page_keywords: contribute, pull request, review, workflow, beginner, expert, squash, commit @@ -37,20 +51,44 @@ An existing issue is something reported by a Docker user. As issues come in, our maintainers triage them. Triage is its own topic. For now, it is important for you to know that triage includes ranking issues according to difficulty. -Triaged issues have either a white-belt -or black-belt label. -A white-belt issue is considered -an easier issue. Issues can have more than one label, for example, -bug, -improvement, -project/doc, and so forth. -These other labels are there for filtering purposes but you might also find -them helpful. +Triaged issues have one of these labels: + + + + + + + + + + + + + + + + + + + + + + + + + +
LevelExperience level guideline
exp/beginnerYou have made less than 10 contributions in your life time to any open source project.
exp/noviceYou have made more than 10 contributions to an open source project or at least 5 contributions to Docker.
exp/proficientYou have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines.
exp/expertYou have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines.
exp/masterYou have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines.
-## Claim a white-belt issue +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert or exp/master level task. -In this section, you find and claim an open white-belt issue. +## Claim a beginner or novice issue + +In this section, you find and claim an open documentation lines issue. 1. Go to the `docker/docker` white-belt items on the list. +3. Look for the exp/beginner items on the list. -4. Click on the "labels" dropdown and select white-belt. +4. Click on the "labels" dropdown and select exp/beginner. - The system filters to show only open white-belt issues. + The system filters to show only open exp/beginner issues. 5. Open an issue that interests you. @@ -75,21 +113,18 @@ In this section, you find and claim an open white-belt issue. 6. Make sure that no other user has chosen to work on the issue. - We don't allow external contributors to assign issues to themselves, so you - need to read the comments to find if a user claimed an issue by saying: - - - "I'd love to give this a try~" - - "I'll work on this!" - - "I'll take this." - - The community is very good about claiming issues explicitly. + We don't allow external contributors to assign issues to themselves. So, you + need to read the comments to find if a user claimed the issue by leaving a + `#dibs` comment on the issue. -7. When you find an open issue that both interests you and is unclaimed, claim it yourself by adding a comment. +7. When you find an open issue that both interests you and is unclaimed, add a +`#dibs` comment. ![Easy issue](/project/images/easy_issue.png) This example uses issue 11038. Your issue # will be different depending on - what you claimed. + what you claimed. After a moment, Gordon the Docker bot, changes the issue + status to claimed. 8. Make a note of the issue number; you'll need it later. @@ -131,7 +166,7 @@ To sync your repository: 5. Fetch all the changes from the `upstream/master` branch. - $ git fetch upstream/master + $ git fetch upstream remote: Counting objects: 141, done. remote: Compressing objects: 100% (29/29), done. remote: Total 141 (delta 52), reused 46 (delta 46), pack-reused 66 diff --git a/docs/sources/project/images/easy_issue.png b/docs/sources/project/images/easy_issue.png index ac2ea6879c..de44b7826d 100644 Binary files a/docs/sources/project/images/easy_issue.png and b/docs/sources/project/images/easy_issue.png differ diff --git a/docs/sources/project/make-a-contribution.md b/docs/sources/project/make-a-contribution.md index b6fc4f34fa..e0b4e89720 100644 --- a/docs/sources/project/make-a-contribution.md +++ b/docs/sources/project/make-a-contribution.md @@ -16,7 +16,7 @@ process simple so you'll want to contribute frequently. ## The basic contribution workflow In this guide, you work through Docker's basic contribution workflow by fixing a -single *white-belt* issue in the `docker/docker` repository. The workflow +single *beginner* issue in the `docker/docker` repository. The workflow for fixing simple issues looks like this: ![Simple process](/project/images/existing_issue.png) diff --git a/docs/sources/project/review-pr.md b/docs/sources/project/review-pr.md index 44ad84f2a0..e8cb6c7c04 100644 --- a/docs/sources/project/review-pr.md +++ b/docs/sources/project/review-pr.md @@ -1,6 +1,6 @@ page_title: Participate in the PR Review page_description: Basic workflow for Docker contributions -page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit +page_keywords: contribute, pull request, review, workflow, beginner, squash, commit # Participate in the PR Review @@ -117,8 +117,7 @@ see the GitHub help on deleting branches. ## Where to go next At this point, you have completed all the basic tasks in our contributors guide. -If you enjoyed contributing, let us know by completing another -white-belt +If you enjoyed contributing, let us know by completing another beginner issue or two. We really appreciate the help. If you are very experienced and want to make a major change, go on to diff --git a/docs/sources/project/set-up-dev-env.md b/docs/sources/project/set-up-dev-env.md index 637eef6f58..9b767ad649 100644 --- a/docs/sources/project/set-up-dev-env.md +++ b/docs/sources/project/set-up-dev-env.md @@ -15,7 +15,7 @@ You use the `docker` repository and its `Dockerfile` to create a Docker image, run a Docker container, and develop code in the container. Docker itself builds, tests, and releases new Docker versions using this container. -If you followed the procedures that +If you followed the procedures that set up the prerequisites, you should have a fork of the `docker/docker` repository. You also created a branch called `dry-run-test`. In this section, you continue working with your fork on this branch. diff --git a/docs/sources/project/set-up-git.md b/docs/sources/project/set-up-git.md index ba42c81006..2292d93b3c 100644 --- a/docs/sources/project/set-up-git.md +++ b/docs/sources/project/set-up-git.md @@ -138,7 +138,7 @@ As you change code in your fork, you make your changes on a repository branch. The branch name should reflect what you are working on. In this section, you create a branch, make a change, and push it up to your fork. -This branch is just for testing your config for this guide. The changes arepart +This branch is just for testing your config for this guide. The changes are part of a dry run so the branch name is going to be dry-run-test. To create an push the branch to your fork on GitHub: diff --git a/docs/sources/project/test-and-docs.md b/docs/sources/project/test-and-docs.md index d586ea2c3c..cef3cae8eb 100644 --- a/docs/sources/project/test-and-docs.md +++ b/docs/sources/project/test-and-docs.md @@ -169,7 +169,7 @@ To run the same test inside your Docker development container, you do this: root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-run ^TestBuild$' hack/make.sh -## If test under Boot2Docker fail do to space errors +## If tests under Boot2Docker fail due to disk space errors Running the tests requires about 2GB of memory. If you are running your container on bare metal, that is you are not running with Boot2Docker, your diff --git a/docs/sources/project/work-issue.md b/docs/sources/project/work-issue.md index 68d2ed750f..190cec0557 100644 --- a/docs/sources/project/work-issue.md +++ b/docs/sources/project/work-issue.md @@ -1,6 +1,6 @@ page_title: Work on your issue page_description: Basic workflow for Docker contributions -page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit +page_keywords: contribute, pull request, review, workflow, beginner, squash, commit # Work on your issue diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 122546cf75..3da4cc82d5 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -76,6 +76,11 @@ Builds can now set resource constraints for all containers created for the build (`CgroupParent`) can be passed in the host config to setup container cgroups under a specific cgroup. +`POST /build` + +**New!** +Closing the HTTP request will now cause the build to be canceled. + ## v1.17 ### Full Documentation diff --git a/docs/sources/reference/api/docker_remote_api_v1.18.md b/docs/sources/reference/api/docker_remote_api_v1.18.md index 3ebddb7d13..2197066d16 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.18.md +++ b/docs/sources/reference/api/docker_remote_api_v1.18.md @@ -259,7 +259,7 @@ Json Parameters: `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` - **LogConfig** - Logging configuration to container, format `{ "Type": "", "Config": {"key1": "val1"}} - Available types: `json-file`, `none`. + Available types: `json-file`, `syslog`, `none`. `json-file` logging driver. - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. @@ -1144,6 +1144,9 @@ The archive may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](/reference/builder/#dockerbuilder)). +The build will also be canceled if the client drops the connection by quitting +or being killed. + Query Parameters: - **dockerfile** - path within the build context to the Dockerfile. This is diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 6955d31e0e..9e56abf639 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -146,6 +146,17 @@ The instructions that handle environment variables in the `Dockerfile` are: `ONBUILD` instructions are **NOT** supported for environment replacement, even the instructions above. +Environment variable subtitution will use the same value for each variable +throughout the entire command. In other words, in this example: + + ENV abc=hello + ENV abc=bye def=$abc + ENV ghi=$abc + +will result in `def` having a value of `hello`, not `bye`. However, +`ghi` will have a value of `bye` because it is not part of the same command +that set `abc` to `bye`. + ## The `.dockerignore` file If a file named `.dockerignore` exists in the source repository, then it diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 322f5f401e..30a8a6b42c 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -599,6 +599,12 @@ in cases where the same set of files are used for multiple builds. The path must be to a file within the build context. If a relative path is specified then it must to be relative to the current directory. +If the Docker client loses connection to the daemon, the build is canceled. +This happens if you interrupt the Docker client with `ctrl-c` or if the Docker +client is killed for any reason. + +> **Note:** Currently only the "run" phase of the build can be canceled until +> pull cancelation is implemented). See also: @@ -1514,14 +1520,6 @@ just a specific mapping: $ sudo docker port test 7890 0.0.0.0:4321 -## rename - - Usage: docker rename OLD_NAME NEW_NAME - - rename a existing container to a NEW_NAME - -The `docker rename` command allows the container to be renamed to a different name. - ## ps Usage: docker ps [OPTIONS] @@ -1617,6 +1615,14 @@ use `docker pull`: Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) registry or to a self-hosted one. +## rename + + Usage: docker rename OLD_NAME NEW_NAME + + rename a existing container to a NEW_NAME + +The `docker rename` command allows the container to be renamed to a different name. + ## restart Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] @@ -1655,6 +1661,8 @@ containers removing all network communication. The main process inside the container referenced under the link `/redis` will receive `SIGKILL`, then the container will be removed. + $ docker rm $(docker ps -a -q) + This command will delete all stopped containers. The command `docker ps -a -q` will return all existing container IDs and pass them to the `rm` command which will delete them. Any running containers will not be diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 052a35823e..5a466461e6 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -102,9 +102,10 @@ specify to which of the three standard streams (`STDIN`, `STDOUT`, $ sudo docker run -a stdin -a stdout -i -t ubuntu /bin/bash For interactive processes (like a shell), you must use `-i -t` together in -order to allocate a tty for the container process. Specifying `-t` is however -forbidden when the client standard output is redirected or pipe, such as in: -`echo test | docker run -i busybox cat`. +order to allocate a tty for the container process. `-i -t` is often written `-it` +as you'll see in later examples. Specifying `-t` is forbidden when the client +standard output is redirected or piped, such as in: +`echo test | sudo docker run -i busybox cat`. ## Container identification @@ -289,7 +290,7 @@ running the `redis-cli` command and connecting to the Redis server over the $ sudo docker run -d --name redis example/redis --bind 127.0.0.1 $ # use the redis container's network stack to access localhost - $ sudo docker run --rm -ti --net container:redis example/redis-cli -h 127.0.0.1 + $ sudo docker run --rm -it --net container:redis example/redis-cli -h 127.0.0.1 ### Managing /etc/hosts @@ -297,7 +298,7 @@ Your container will have lines in `/etc/hosts` which define the hostname of the container itself as well as `localhost` and a few other common things. The `--add-host` flag can be used to add additional lines to `/etc/hosts`. - $ /docker run -ti --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts + $ sudo docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts 172.17.0.22 09d03f76bf2c fe00::0 ip6-localnet ff00::0 ip6-mcastprefix @@ -656,6 +657,11 @@ this driver. Default logging driver for Docker. Writes JSON messages to file. `docker logs` command is available only for this logging driver +## Logging driver: syslog + +Syslog logging driver for Docker. Writes log messages to syslog. `docker logs` +command is not available for this logging driver + ## Overriding Dockerfile image defaults When a developer builds an image from a [*Dockerfile*](/reference/builder) diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md index d533224656..af4a7297fb 100644 --- a/docs/sources/userguide/dockervolumes.md +++ b/docs/sources/userguide/dockervolumes.md @@ -52,6 +52,27 @@ This will create a new volume inside a container at `/webapp`. > You can also use the `VOLUME` instruction in a `Dockerfile` to add one or > more new volumes to any container created from that image. +### Locating a volume + +You can locate the volume on the host by utilizing the 'docker inspect' command. + + $ docker inspect web + +The output will provide details on the container configurations including the +volumes. The output should look something similar to the following: + + ... + "Volumes": { + "/webapp": "/var/lib/docker/volumes/fac362...80535" + }, + "VolumesRW": { + "/webapp": true + } + ... + +You will notice in the above 'Volumes' is specifying the location on the host and +'VolumesRW' is specifying that the volume is read/write. + ### Mount a Host Directory as a Data Volume In addition to creating a volume using the `-v` flag you can also mount a diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md index 8d57def4ed..fd5f52a375 100644 --- a/docs/sources/userguide/usingdocker.md +++ b/docs/sources/userguide/usingdocker.md @@ -298,7 +298,7 @@ and won't need it again. So let's remove it using the `docker rm` command. Error: Impossible to remove a running container, please stop it first or use -f 2014/05/24 08:12:56 Error: failed to remove one or more containers -What's happened? We can't actually remove a running container. This protects +What happened? We can't actually remove a running container. This protects you from accidentally removing a running container you might need. Let's try this again by stopping the container first. diff --git a/engine/engine.go b/engine/engine.go index e8286d89f7..60532349ab 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -46,18 +46,19 @@ func unregister(name string) { // It acts as a store for *containers*, and allows manipulation of these // containers by executing *jobs*. type Engine struct { - handlers map[string]Handler - catchall Handler - hack Hack // data for temporary hackery (see hack.go) - id string - Stdout io.Writer - Stderr io.Writer - Stdin io.Reader - Logging bool - tasks sync.WaitGroup - l sync.RWMutex // lock for shutdown - shutdown bool - onShutdown []func() // shutdown handlers + handlers map[string]Handler + catchall Handler + hack Hack // data for temporary hackery (see hack.go) + id string + Stdout io.Writer + Stderr io.Writer + Stdin io.Reader + Logging bool + tasks sync.WaitGroup + l sync.RWMutex // lock for shutdown + shutdownWait sync.WaitGroup + shutdown bool + onShutdown []func() // shutdown handlers } func (eng *Engine) Register(name string, handler Handler) error { @@ -123,6 +124,8 @@ func (eng *Engine) Job(name string, args ...string) *Job { Stderr: NewOutput(), env: &Env{}, closeIO: true, + + cancelled: make(chan struct{}), } if eng.Logging { job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr)) @@ -143,6 +146,7 @@ func (eng *Engine) Job(name string, args ...string) *Job { func (eng *Engine) OnShutdown(h func()) { eng.l.Lock() eng.onShutdown = append(eng.onShutdown, h) + eng.shutdownWait.Add(1) eng.l.Unlock() } @@ -156,6 +160,7 @@ func (eng *Engine) Shutdown() { eng.l.Lock() if eng.shutdown { eng.l.Unlock() + eng.shutdownWait.Wait() return } eng.shutdown = true @@ -180,17 +185,15 @@ func (eng *Engine) Shutdown() { // Call shutdown handlers, if any. // Timeout after 10 seconds. - var wg sync.WaitGroup for _, h := range eng.onShutdown { - wg.Add(1) go func(h func()) { - defer wg.Done() h() + eng.shutdownWait.Done() }(h) } done := make(chan struct{}) go func() { - wg.Wait() + eng.shutdownWait.Wait() close(done) }() select { diff --git a/engine/env_test.go b/engine/env_test.go index 2ed99d0fea..5182783bb3 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -3,12 +3,24 @@ package engine import ( "bytes" "encoding/json" + "math/rand" "testing" "time" - - "github.com/docker/docker/pkg/testutils" ) +const chars = "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + +// RandomString returns random string of specified length +func RandomString(length int) string { + res := make([]byte, length) + for i := 0; i < length; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + func TestEnvLenZero(t *testing.T) { env := &Env{} if env.Len() != 0 { @@ -185,7 +197,7 @@ func TestMultiMap(t *testing.T) { func testMap(l int) [][2]string { res := make([][2]string, l) for i := 0; i < l; i++ { - t := [2]string{testutils.RandomString(5), testutils.RandomString(20)} + t := [2]string{RandomString(5), RandomString(20)} res[i] = t } return res diff --git a/engine/job.go b/engine/job.go index 4b2befb425..ecb68c3eb7 100644 --- a/engine/job.go +++ b/engine/job.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "strings" + "sync" "time" log "github.com/Sirupsen/logrus" @@ -34,6 +35,12 @@ type Job struct { status Status end time.Time closeIO bool + + // When closed, the job has been cancelled. + // Note: not all jobs implement cancellation. + // See Job.Cancel() and Job.WaitCancelled() + cancelled chan struct{} + cancelOnce sync.Once } type Status int @@ -248,3 +255,15 @@ func (job *Job) StatusCode() int { func (job *Job) SetCloseIO(val bool) { job.closeIO = val } + +// When called, causes the Job.WaitCancelled channel to unblock. +func (job *Job) Cancel() { + job.cancelOnce.Do(func() { + close(job.cancelled) + }) +} + +// Returns a channel which is closed ("never blocks") when the job is cancelled. +func (job *Job) WaitCancelled() <-chan struct{} { + return job.cancelled +} diff --git a/graph/manifest.go b/graph/manifest.go index 75c2d9060b..3b1d825576 100644 --- a/graph/manifest.go +++ b/graph/manifest.go @@ -6,8 +6,10 @@ import ( "fmt" log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" "github.com/docker/docker/engine" "github.com/docker/docker/registry" + "github.com/docker/docker/utils" "github.com/docker/libtrust" ) @@ -16,7 +18,7 @@ import ( // contains no signatures by a trusted key for the name in the manifest, the // image is not considered verified. The parsed manifest object and a boolean // for whether the manifest is verified is returned. -func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte) (*registry.ManifestData, bool, error) { +func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) { sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures") if err != nil { return nil, false, fmt.Errorf("error parsing payload: %s", err) @@ -32,6 +34,31 @@ func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte) (*regi return nil, false, fmt.Errorf("error retrieving payload: %s", err) } + var manifestDigest digest.Digest + + if dgst != "" { + manifestDigest, err = digest.ParseDigest(dgst) + if err != nil { + return nil, false, fmt.Errorf("invalid manifest digest from registry: %s", err) + } + + dgstVerifier, err := digest.NewDigestVerifier(manifestDigest) + if err != nil { + return nil, false, fmt.Errorf("unable to verify manifest digest from registry: %s", err) + } + + dgstVerifier.Write(payload) + + if !dgstVerifier.Verified() { + computedDigest, _ := digest.FromBytes(payload) + return nil, false, fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", manifestDigest, computedDigest) + } + } + + if utils.DigestReference(ref) && ref != manifestDigest.String() { + return nil, false, fmt.Errorf("mismatching image manifest digest: got %q, expected %q", manifestDigest, ref) + } + var manifest registry.ManifestData if err := json.Unmarshal(payload, &manifest); err != nil { return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err) diff --git a/graph/pull.go b/graph/pull.go index db46e519b1..adad6f3239 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -74,7 +74,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { logName = utils.ImageReference(logName, tag) } - if len(repoInfo.Index.Mirrors) == 0 && ((repoInfo.Official && repoInfo.Index.Official) || endpoint.Version == registry.APIVersion2) { + if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) { if repoInfo.Official { j := job.Eng.Job("trust_update_base") if err = j.Run(); err != nil { @@ -430,12 +430,15 @@ func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) { log.Debugf("Pulling tag from V2 registry: %q", tag) + manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth) if err != nil { return false, err } - manifest, verified, err := s.loadManifest(eng, manifestBytes) + // loadManifest ensures that the manifest payload has the expected digest + // if the tag is a digest reference. + manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag) if err != nil { return false, fmt.Errorf("error verifying manifest: %s", err) } @@ -605,7 +608,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.")) } - if len(manifestDigest) > 0 { + if manifestDigest != "" { out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest)) } diff --git a/graph/push.go b/graph/push.go index 7bc79dc99e..f86df6d0b3 100644 --- a/graph/push.go +++ b/graph/push.go @@ -1,7 +1,6 @@ package graph import ( - "bytes" "crypto/sha256" "encoding/json" "errors" @@ -432,14 +431,12 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID()) // push the manifest - digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, bytes.NewReader(signedBody), auth) + digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, signedBody, mBytes, auth) if err != nil { return err } - if len(digest) > 0 { - out.Write(sf.FormatStatus("", "Digest: %s", digest)) - } + out.Write(sf.FormatStatus("", "Digest: %s", digest)) } return nil } @@ -542,7 +539,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status { return job.Errorf("Repository does not exist: %s", repoInfo.LocalName) } - if endpoint.Version == registry.APIVersion2 { + if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 { err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf) if err == nil { return engine.StatusOK diff --git a/hack/vendor.sh b/hack/vendor.sh index b3ba928a05..f6422ccac5 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -53,7 +53,7 @@ clone hg code.google.com/p/gosqlite 74691fb6f837 clone git github.com/docker/libtrust 230dfd18c232 -clone git github.com/Sirupsen/logrus v0.6.6 +clone git github.com/Sirupsen/logrus v0.7.1 clone git github.com/go-fsnotify/fsnotify v1.0.4 @@ -75,7 +75,7 @@ rm -rf src/github.com/docker/distribution mkdir -p src/github.com/docker/distribution mv tmp-digest src/github.com/docker/distribution/digest -clone git github.com/docker/libcontainer 4a72e540feb67091156b907c4700e580a99f5a9d +clone git github.com/docker/libcontainer fd0087d3acdc4c5865de1829d4accee5e3ebb658 # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli' | grep -v 'github.com/Sirupsen/logrus')" diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go index e717eca574..ea2f2450a9 100644 --- a/integration-cli/docker_api_containers_test.go +++ b/integration-cli/docker_api_containers_test.go @@ -516,3 +516,40 @@ func TestBuildApiDockerfileSymlink(t *testing.T) { logDone("container REST API - check build w/bad Dockerfile symlink path") } + +// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume +func TestPostContainerBindNormalVolume(t *testing.T) { + defer deleteAllContainers() + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=one", "busybox")) + if err != nil { + t.Fatal(err, out) + } + + fooDir, err := inspectFieldMap("one", "Volumes", "/foo") + if err != nil { + t.Fatal(err) + } + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=two", "busybox")) + if err != nil { + t.Fatal(err, out) + } + + bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} + _, err = sockRequest("POST", "/containers/two/start", bindSpec) + if err != nil && !strings.Contains(err.Error(), "204 No Content") { + t.Fatal(err) + } + + fooDir2, err := inspectFieldMap("two", "Volumes", "/foo") + if err != nil { + t.Fatal(err) + } + + if fooDir2 != fooDir { + t.Fatal("expected volume path to be %s, got: %s", fooDir, fooDir2) + } + + logDone("container REST API - can use path from normal volume as bind-mount to overwrite another volume") +} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 52132f175e..9a8ee69e54 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2,6 +2,7 @@ package main import ( "archive/tar" + "bufio" "bytes" "encoding/json" "fmt" @@ -14,6 +15,7 @@ import ( "runtime" "strconv" "strings" + "sync" "testing" "text/template" "time" @@ -239,9 +241,18 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) { _, err := buildImage(name, ` - FROM scratch - ENV foo foo + FROM busybox + ENV foo zzz ENV bar ${foo} + ENV abc1='$foo' + ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" + RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) + ENV abc2="\$foo" + RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) + ENV abc3 '$foo' + RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) + ENV abc4 "\$foo" + RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) `, true) if err != nil { @@ -260,13 +271,19 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) { } found := false + envCount := 0 for _, env := range envResult { parts := strings.SplitN(env, "=", 2) if parts[0] == "bar" { found = true - if parts[1] != "foo" { - t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1]) + if parts[1] != "zzz" { + t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "zzz" { + t.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) } } } @@ -275,6 +292,10 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) { t.Fatal("Never found the `bar` env variable") } + if envCount != 4 { + t.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) + } + logDone("build - env environment replacement") } @@ -361,8 +382,8 @@ func TestBuildHandleEscapes(t *testing.T) { t.Fatal(err) } - if _, ok := result[`\\\\\\${FOO}`]; !ok { - t.Fatal(`Could not find volume \\\\\\${FOO} set from env foo in volumes table`) + if _, ok := result[`\\\${FOO}`]; !ok { + t.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result) } logDone("build - handle escapes") @@ -1924,6 +1945,132 @@ func TestBuildForceRm(t *testing.T) { logDone("build - ensure --force-rm doesn't leave containers behind") } +// Test that an infinite sleep during a build is killed if the client disconnects. +// This test is fairly hairy because there are lots of ways to race. +// Strategy: +// * Monitor the output of docker events starting from before +// * Run a 1-year-long sleep from a docker build. +// * When docker events sees container start, close the "docker build" command +// * Wait for docker events to emit a dying event. +func TestBuildCancelationKillsSleep(t *testing.T) { + // TODO(jfrazelle): Make this work on Windows. + testRequires(t, SameHostDaemon) + + name := "testbuildcancelation" + defer deleteImages(name) + + // (Note: one year, will never finish) + ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + var wg sync.WaitGroup + defer wg.Wait() + + finish := make(chan struct{}) + defer close(finish) + + eventStart := make(chan struct{}) + eventDie := make(chan struct{}) + + // Start one second ago, to avoid rounding problems + startEpoch := time.Now().Add(-1 * time.Second) + + // Goroutine responsible for watching start/die events from `docker events` + wg.Add(1) + go func() { + defer wg.Done() + + // Watch for events since epoch. + eventsCmd := exec.Command(dockerBinary, "events", + "-since", fmt.Sprint(startEpoch.Unix())) + stdout, err := eventsCmd.StdoutPipe() + err = eventsCmd.Start() + if err != nil { + t.Fatalf("failed to start 'docker events': %s", err) + } + + go func() { + <-finish + eventsCmd.Process.Kill() + }() + + var started, died bool + matchStart := regexp.MustCompile(" \\(from busybox\\:latest\\) start$") + matchDie := regexp.MustCompile(" \\(from busybox\\:latest\\) die$") + + // + // Read lines of `docker events` looking for container start and stop. + // + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + if ok := matchStart.MatchString(scanner.Text()); ok { + if started { + t.Fatal("assertion fail: more than one container started") + } + close(eventStart) + started = true + } + if ok := matchDie.MatchString(scanner.Text()); ok { + if died { + t.Fatal("assertion fail: more than one container died") + } + close(eventDie) + died = true + } + } + + err = eventsCmd.Wait() + if err != nil && !IsKilled(err) { + t.Fatalf("docker events had bad exit status: %s", err) + } + }() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") + buildCmd.Dir = ctx.Dir + buildCmd.Stdout = os.Stdout + + err = buildCmd.Start() + if err != nil { + t.Fatalf("failed to run build: %s", err) + } + + select { + case <-time.After(30 * time.Second): + t.Fatal("failed to observe build container start in timely fashion") + case <-eventStart: + // Proceeds from here when we see the container fly past in the + // output of "docker events". + // Now we know the container is running. + } + + // Send a kill to the `docker build` command. + // Causes the underlying build to be cancelled due to socket close. + err = buildCmd.Process.Kill() + if err != nil { + t.Fatalf("error killing build command: %s", err) + } + + // Get the exit status of `docker build`, check it exited because killed. + err = buildCmd.Wait() + if err != nil && !IsKilled(err) { + t.Fatalf("wait failed during build run: %T %s", err, err) + } + + select { + case <-time.After(30 * time.Second): + // If we don't get here in a timely fashion, it wasn't killed. + t.Fatal("container cancel did not succeed") + case <-eventDie: + // We saw the container shut down in the `docker events` stream, + // as expected. + } + + logDone("build - ensure canceled job finishes immediately") +} + func TestBuildRm(t *testing.T) { name := "testbuildrm" defer deleteImages(name) @@ -2128,7 +2275,7 @@ func TestBuildRelativeWorkdir(t *testing.T) { func TestBuildWorkdirWithEnvVariables(t *testing.T) { name := "testbuildworkdirwithenvvariables" - expected := "/test1/test2/$MISSING_VAR" + expected := "/test1/test2" defer deleteImages(name) _, err := buildImage(name, `FROM busybox @@ -3357,7 +3504,7 @@ func TestBuildFailsDockerfileEmpty(t *testing.T) { defer deleteImages(name) _, err := buildImage(name, ``, true) if err != nil { - if !strings.Contains(err.Error(), "Dockerfile cannot be empty") { + if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") { t.Fatalf("Wrong error %v, must be about empty Dockerfile", err) } } else { @@ -3897,9 +4044,9 @@ ENV abc=zzz TO=/docker/world/hello ADD $FROM $TO RUN [ "$(cat $TO)" = "hello" ] ENV abc "zzz" -RUN [ $abc = \"zzz\" ] +RUN [ $abc = "zzz" ] ENV abc 'yyy' -RUN [ $abc = \'yyy\' ] +RUN [ $abc = 'yyy' ] ENV abc= RUN [ "$abc" = "" ] @@ -3915,13 +4062,34 @@ RUN [ "$abc" = "'foo'" ] ENV abc=\"foo\" RUN [ "$abc" = "\"foo\"" ] ENV abc "foo" -RUN [ "$abc" = "\"foo\"" ] +RUN [ "$abc" = "foo" ] ENV abc 'foo' -RUN [ "$abc" = "'foo'" ] +RUN [ "$abc" = 'foo' ] ENV abc \'foo\' -RUN [ "$abc" = "\\'foo\\'" ] +RUN [ "$abc" = "'foo'" ] ENV abc \"foo\" -RUN [ "$abc" = "\\\"foo\\\"" ] +RUN [ "$abc" = '"foo"' ] + +ENV e1=bar +ENV e2=$e1 +ENV e3=$e11 +ENV e4=\$e1 +ENV e5=\$e11 +RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] + +ENV ee1 bar +ENV ee2 $ee1 +ENV ee3 $ee11 +ENV ee4 \$ee1 +ENV ee5 \$ee11 +RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] + +ENV eee1="foo" +ENV eee2='foo' +ENV eee3 "foo" +ENV eee4 'foo' +RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] + ` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", @@ -4585,14 +4753,29 @@ func TestBuildLabelsCache(t *testing.T) { `FROM busybox LABEL Vendor=Acme1`, true) if err != nil || id1 == id2 { - t.Fatalf("Build 2 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) + t.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor Acme`, true) // Note: " " and "=" should be same if err != nil || id1 != id2 { - t.Fatalf("Build 3 should have worked & used cache(%s,%s): %v", id1, id2, err) + t.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + // Now make sure the cache isn't used by mistake + id1, err = buildImage(name, + `FROM busybox + LABEL f1=b1 f2=b2`, false) + if err != nil { + t.Fatalf("Build 5 should have worked: %q", err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL f1="b1 f2=b2"`, true) + if err != nil || id1 == id2 { + t.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) } logDone("build - label cache") @@ -4608,8 +4791,19 @@ func TestBuildStderr(t *testing.T) { if err != nil { t.Fatal(err) } - if stderr != "" { - t.Fatalf("Stderr should have been empty, instead its: %q", stderr) + + if runtime.GOOS == "windows" { + // stderr might contain a security warning on windows + lines := strings.Split(stderr, "\n") + for _, v := range lines { + if v != "" && !strings.Contains(v, "SECURITY WARNING:") { + t.Fatalf("Stderr contains unexpected output line: %q", v) + } + } + } else { + if stderr != "" { + t.Fatalf("Stderr should have been empty, instead its: %q", stderr) + } } logDone("build - testing stderr") } @@ -5098,9 +5292,13 @@ func TestBuildSpaces(t *testing.T) { t.Fatal("Build 2 was supposed to fail, but didn't") } + removeLogTimestamps := func(s string) string { + return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) + } + // Skip over the times - e1 := err1.Error()[strings.Index(err1.Error(), `level=`):] - e2 := err2.Error()[strings.Index(err1.Error(), `level=`):] + e1 := removeLogTimestamps(err1.Error()) + e2 := removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { @@ -5113,8 +5311,8 @@ func TestBuildSpaces(t *testing.T) { } // Skip over the times - e1 = err1.Error()[strings.Index(err1.Error(), `level=`):] - e2 = err2.Error()[strings.Index(err1.Error(), `level=`):] + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { @@ -5127,8 +5325,8 @@ func TestBuildSpaces(t *testing.T) { } // Skip over the times - e1 = err1.Error()[strings.Index(err1.Error(), `level=`):] - e2 = err2.Error()[strings.Index(err1.Error(), `level=`):] + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { @@ -5289,7 +5487,7 @@ func TestBuildRUNoneJSON(t *testing.T) { name := "testbuildrunonejson" defer deleteAllContainers() - defer deleteImages(name) + defer deleteImages(name, "hello-world") ctx, err := fakeContext(`FROM hello-world:frozen RUN [ "/hello" ]`, map[string]string{}) @@ -5315,7 +5513,7 @@ RUN [ "/hello" ]`, map[string]string{}) func TestBuildResourceConstraintsAreUsed(t *testing.T) { name := "testbuildresourceconstraints" defer deleteAllContainers() - defer deleteImages(name) + defer deleteImages(name, "hello-world") ctx, err := fakeContext(` FROM hello-world:frozen @@ -5382,3 +5580,19 @@ func TestBuildResourceConstraintsAreUsed(t *testing.T) { logDone("build - resource constraints applied") } + +func TestBuildEmptyStringVolume(t *testing.T) { + name := "testbuildemptystringvolume" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM busybox + ENV foo="" + VOLUME $foo + `, false) + if err == nil { + t.Fatal("Should have failed to build") + } + + logDone("build - empty string volume") +} diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 49b43c2f28..c515a63787 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -800,3 +800,31 @@ func TestDaemonDots(t *testing.T) { logDone("daemon - test dots on INFO") } + +func TestDaemonUnixSockCleanedUp(t *testing.T) { + d := NewDaemon(t) + dir, err := ioutil.TempDir("", "socket-cleanup-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + sockPath := filepath.Join(dir, "docker.sock") + if err := d.Start("--host", "unix://"+sockPath); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(sockPath); err != nil { + t.Fatal("socket does not exist") + } + + if err := d.Stop(); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { + t.Fatal("unix socket is not cleaned up") + } + + logDone("daemon - unix socket is cleaned up") +} diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go index 926e763434..39b0eae3ff 100644 --- a/integration-cli/docker_cli_pull_test.go +++ b/integration-cli/docker_cli_pull_test.go @@ -106,6 +106,8 @@ func TestPullNonExistingImage(t *testing.T) { // pulling an image from the central registry using official names should work // ensure all pulls result in the same image func TestPullImageOfficialNames(t *testing.T) { + testRequires(t, Network) + names := []string{ "docker.io/hello-world", "index.docker.io/hello-world", diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 3cab284331..083e651bfd 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/docker/docker/nat" - "github.com/docker/docker/pkg/networkfs/resolvconf" + "github.com/docker/docker/pkg/resolvconf" ) // "test123" should be printed by docker run @@ -412,6 +412,31 @@ func TestRunLinkToContainerNetMode(t *testing.T) { logDone("run - link to a container which net mode is container success") } +func TestRunModeNetContainerHostname(t *testing.T) { + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "-i", "-d", "--name", "parent", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + cmd = exec.Command(dockerBinary, "exec", "parent", "cat", "/etc/hostname") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("failed to exec command: %v, output: %q", err, out) + } + + cmd = exec.Command(dockerBinary, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") + out1, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out1) + } + if out1 != out { + t.Fatal("containers with shared net namespace should have same hostname") + } + + logDone("run - containers with shared net namespace have same hostname") +} + // Regression test for #4741 func TestRunWithVolumesAsFiles(t *testing.T) { defer deleteAllContainers() @@ -3323,3 +3348,46 @@ func TestRunVolumesFromRestartAfterRemoved(t *testing.T) { logDone("run - can restart a volumes-from container after producer is removed") } + +// run container with --rm should remove container if exit code != 0 +func TestRunContainerWithRmFlagExitCodeNotEqualToZero(t *testing.T) { + defer deleteAllContainers() + + runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "ls", "/notexists") + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + t.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + t.Fatal(out, err) + } + + if out != "" { + t.Fatal("Expected not to have containers", out) + } + + logDone("run - container is removed if run with --rm and exit code != 0") +} + +func TestRunContainerWithRmFlagCannotStartContainer(t *testing.T) { + defer deleteAllContainers() + + runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "commandNotFound") + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + t.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + t.Fatal(out, err) + } + + if out != "" { + t.Fatal("Expected not to have containers", out) + } + + logDone("run - container is removed if run with --rm and cannot start") +} diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go index e328b74848..9327ac240a 100644 --- a/integration-cli/docker_cli_run_unix_test.go +++ b/integration-cli/docker_cli_run_unix_test.go @@ -109,19 +109,6 @@ func TestRunWithUlimits(t *testing.T) { logDone("run - ulimits are set") } -func getCgroupPaths(test string) map[string]string { - cgroupPaths := map[string]string{} - for _, line := range strings.Split(test, "\n") { - parts := strings.Split(line, ":") - if len(parts) != 3 { - fmt.Printf("unexpected file format for /proc/self/cgroup - %q\n", line) - continue - } - cgroupPaths[parts[1]] = parts[2] - } - return cgroupPaths -} - func TestRunContainerWithCgroupParent(t *testing.T) { testRequires(t, NativeExecDriver) defer deleteAllContainers() @@ -131,7 +118,7 @@ func TestRunContainerWithCgroupParent(t *testing.T) { if err != nil { t.Fatalf("failed to read '/proc/self/cgroup - %v", err) } - selfCgroupPaths := getCgroupPaths(string(data)) + selfCgroupPaths := parseCgroupPaths(string(data)) selfCpuCgroup, found := selfCgroupPaths["memory"] if !found { t.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths) @@ -141,7 +128,7 @@ func TestRunContainerWithCgroupParent(t *testing.T) { if err != nil { t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } - cgroupPaths := getCgroupPaths(string(out)) + cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { t.Fatalf("unexpected output - %q", string(out)) } @@ -169,7 +156,7 @@ func TestRunContainerWithCgroupParentAbsPath(t *testing.T) { if err != nil { t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } - cgroupPaths := getCgroupPaths(string(out)) + cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { t.Fatalf("unexpected output - %q", string(out)) } diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go index 01f0ef95a1..3ec04c9169 100644 --- a/integration-cli/docker_cli_start_test.go +++ b/integration-cli/docker_cli_start_test.go @@ -240,3 +240,49 @@ func TestStartMultipleContainers(t *testing.T) { logDone("start - start multiple containers continue on one failed") } + +func TestStartAttachMultipleContainers(t *testing.T) { + + var cmd *exec.Cmd + + defer deleteAllContainers() + // run multiple containers to test + for _, container := range []string{"test1", "test2", "test3"} { + cmd = exec.Command(dockerBinary, "run", "-d", "--name", container, "busybox", "top") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + } + + // stop all the containers + for _, container := range []string{"test1", "test2", "test3"} { + cmd = exec.Command(dockerBinary, "stop", container) + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + } + + // test start and attach multiple containers at once, expected error + for _, option := range []string{"-a", "-i", "-ai"} { + cmd = exec.Command(dockerBinary, "start", option, "test1", "test2", "test3") + out, _, err := runCommandWithOutput(cmd) + if !strings.Contains(out, "You cannot start and attach multiple containers at once.") || err == nil { + t.Fatal("Expected error but got none") + } + } + + // confirm the state of all the containers be stopped + for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { + cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + out = strings.Trim(out, "\r\n") + if out != expected { + t.Fatal("Container running state wrong") + } + } + + logDone("start - error on start and attach multiple containers at once") +} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 10cc6c9218..e0b9bacc4a 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -395,9 +395,8 @@ func getSliceOfPausedContainers() ([]string, error) { if err == nil { slice := strings.Split(strings.TrimSpace(out), "\n") return slice, err - } else { - return []string{out}, err } + return []string{out}, err } func unpauseContainer(container string) error { diff --git a/integration-cli/requirements.go b/integration-cli/requirements.go index 346d0cdf66..cdd9991873 100644 --- a/integration-cli/requirements.go +++ b/integration-cli/requirements.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "log" + "net/http" "os/exec" "strings" "testing" @@ -32,6 +33,16 @@ var ( func() bool { return supportsExec }, "Test requires 'docker exec' capabilities on the tested daemon.", } + Network = TestRequirement{ + func() bool { + resp, err := http.Get("http://hub.docker.com") + if resp != nil { + resp.Body.Close() + } + return err == nil + }, + "Test requires network availability, environment variable set to none to run in a non-network enabled mode.", + } RegistryHosting = TestRequirement{ func() bool { // for now registry binary is built only if we're running inside diff --git a/integration-cli/test_vars_windows.go b/integration-cli/test_vars_windows.go index 3cad4bceef..f81ac53cc3 100644 --- a/integration-cli/test_vars_windows.go +++ b/integration-cli/test_vars_windows.go @@ -6,6 +6,6 @@ const ( // identifies if test suite is running on a unix platform isUnixCli = false - // this is the expected file permission set on windows: gh#11047 - expectedFileChmod = "-rwx------" + // this is the expected file permission set on windows: gh#11395 + expectedFileChmod = "-rwxr-xr-x" ) diff --git a/integration-cli/utils.go b/integration-cli/utils.go index 691402f35e..85e6f1ccd3 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -42,6 +42,18 @@ func processExitCode(err error) (exitCode int) { return } +func IsKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + sys := exitErr.ProcessState.Sys() + status, ok := sys.(syscall.WaitStatus) + if !ok { + return false + } + return status.Signaled() && status.Signal() == os.Kill + } + return false +} + func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { exitCode = 0 out, err := cmd.CombinedOutput() @@ -328,3 +340,17 @@ func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s } } } + +// Parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func parseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} diff --git a/opts/opts.go b/opts/opts.go index e867c0a21d..df9decf61f 100644 --- a/opts/opts.go +++ b/opts/opts.go @@ -192,9 +192,8 @@ func ValidateMACAddress(val string) (string, error) { _, err := net.ParseMAC(strings.TrimSpace(val)) if err != nil { return "", err - } else { - return val, nil } + return val, nil } // Validates domain for resolvconf search configuration. diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go index 96a93ee7af..6caef3b735 100644 --- a/pkg/archive/archive_windows.go +++ b/pkg/archive/archive_windows.go @@ -28,10 +28,9 @@ func CanonicalTarNameForPath(p string) (string, error) { // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { - // Clear r/w on grp/others: no precise equivalen of group/others on NTFS. - perm &= 0711 + perm &= 0755 // Add the x bit: make everything +x from windows - perm |= 0100 + perm |= 0111 return perm } diff --git a/pkg/archive/archive_windows_test.go b/pkg/archive/archive_windows_test.go index 0c97a1040d..b33e0fb005 100644 --- a/pkg/archive/archive_windows_test.go +++ b/pkg/archive/archive_windows_test.go @@ -51,11 +51,11 @@ func TestChmodTarEntry(t *testing.T) { cases := []struct { in, expected os.FileMode }{ - {0000, 0100}, - {0777, 0711}, - {0644, 0700}, - {0755, 0711}, - {0444, 0500}, + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go index f2ac2a3561..c3cb4ebe0e 100644 --- a/pkg/archive/changes.go +++ b/pkg/archive/changes.go @@ -220,8 +220,8 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { oldStat.Gid() != newStat.Gid() || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change - (oldStat.Size() != newStat.Size() && oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR) || - !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) || bytes.Compare(oldChild.capability, newChild.capability) != 0 { change := Change{ Path: newChild.path(), diff --git a/pkg/archive/changes_test.go b/pkg/archive/changes_test.go index 8f32d7b30e..53ec575b67 100644 --- a/pkg/archive/changes_test.go +++ b/pkg/archive/changes_test.go @@ -218,7 +218,6 @@ func TestChangesDirsMutated(t *testing.T) { expectedChanges := []Change{ {"/dir1", ChangeDelete}, {"/dir2", ChangeModify}, - {"/dir3", ChangeModify}, {"/dirnew", ChangeAdd}, {"/file1", ChangeDelete}, {"/file2", ChangeModify}, diff --git a/pkg/networkfs/etchosts/etchosts.go b/pkg/etchosts/etchosts.go similarity index 100% rename from pkg/networkfs/etchosts/etchosts.go rename to pkg/etchosts/etchosts.go diff --git a/pkg/networkfs/etchosts/etchosts_test.go b/pkg/etchosts/etchosts_test.go similarity index 100% rename from pkg/networkfs/etchosts/etchosts_test.go rename to pkg/etchosts/etchosts_test.go diff --git a/pkg/ioutils/readers.go b/pkg/ioutils/readers.go index 22f46fbd92..58ff1af639 100644 --- a/pkg/ioutils/readers.go +++ b/pkg/ioutils/readers.go @@ -2,8 +2,11 @@ package ioutils import ( "bytes" + "crypto/rand" "io" + "math/big" "sync" + "time" ) type readCloserWrapper struct { @@ -42,20 +45,40 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { } } +// bufReader allows the underlying reader to continue to produce +// output by pre-emptively reading from the wrapped reader. +// This is achieved by buffering this data in bufReader's +// expanding buffer. type bufReader struct { sync.Mutex - buf *bytes.Buffer - reader io.Reader - err error - wait sync.Cond - drainBuf []byte + buf *bytes.Buffer + reader io.Reader + err error + wait sync.Cond + drainBuf []byte + reuseBuf []byte + maxReuse int64 + resetTimeout time.Duration + bufLenResetThreshold int64 + maxReadDataReset int64 } func NewBufReader(r io.Reader) *bufReader { + var timeout int + if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { + timeout = int(randVal.Int64()) + 180 + } else { + timeout = 300 + } reader := &bufReader{ - buf: &bytes.Buffer{}, - drainBuf: make([]byte, 1024), - reader: r, + buf: &bytes.Buffer{}, + drainBuf: make([]byte, 1024), + reuseBuf: make([]byte, 4096), + maxReuse: 1000, + resetTimeout: time.Second * time.Duration(timeout), + bufLenResetThreshold: 100 * 1024, + maxReadDataReset: 10 * 1024 * 1024, + reader: r, } reader.wait.L = &reader.Mutex go reader.drain() @@ -74,14 +97,94 @@ func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer * } func (r *bufReader) drain() { + var ( + duration time.Duration + lastReset time.Time + now time.Time + reset bool + bufLen int64 + dataSinceReset int64 + maxBufLen int64 + reuseBufLen int64 + reuseCount int64 + ) + reuseBufLen = int64(len(r.reuseBuf)) + lastReset = time.Now() for { n, err := r.reader.Read(r.drainBuf) + dataSinceReset += int64(n) r.Lock() + bufLen = int64(r.buf.Len()) + if bufLen > maxBufLen { + maxBufLen = bufLen + } + + // Avoid unbounded growth of the buffer over time. + // This has been discovered to be the only non-intrusive + // solution to the unbounded growth of the buffer. + // Alternative solutions such as compression, multiple + // buffers, channels and other similar pieces of code + // were reducing throughput, overall Docker performance + // or simply crashed Docker. + // This solution releases the buffer when specific + // conditions are met to avoid the continuous resizing + // of the buffer for long lived containers. + // + // Move data to the front of the buffer if it's + // smaller than what reuseBuf can store + if bufLen > 0 && reuseBufLen >= bufLen { + n, _ := r.buf.Read(r.reuseBuf) + r.buf.Write(r.reuseBuf[0:n]) + // Take action if the buffer has been reused too many + // times and if there's data in the buffer. + // The timeout is also used as means to avoid doing + // these operations more often or less often than + // required. + // The various conditions try to detect heavy activity + // in the buffer which might be indicators of heavy + // growth of the buffer. + } else if reuseCount >= r.maxReuse && bufLen > 0 { + now = time.Now() + duration = now.Sub(lastReset) + timeoutReached := duration >= r.resetTimeout + + // The timeout has been reached and the + // buffered data couldn't be moved to the front + // of the buffer, so the buffer gets reset. + if timeoutReached && bufLen > reuseBufLen { + reset = true + } + // The amount of buffered data is too high now, + // reset the buffer. + if timeoutReached && maxBufLen >= r.bufLenResetThreshold { + reset = true + } + // Reset the buffer if a certain amount of + // data has gone through the buffer since the + // last reset. + if timeoutReached && dataSinceReset >= r.maxReadDataReset { + reset = true + } + // The buffered data is moved to a fresh buffer, + // swap the old buffer with the new one and + // reset all counters. + if reset { + newbuf := &bytes.Buffer{} + newbuf.ReadFrom(r.buf) + r.buf = newbuf + lastReset = now + reset = false + dataSinceReset = 0 + maxBufLen = 0 + reuseCount = 0 + } + } if err != nil { r.err = err } else { r.buf.Write(r.drainBuf[0:n]) } + reuseCount++ r.wait.Signal() r.Unlock() if err != nil { diff --git a/pkg/ioutils/readers_test.go b/pkg/ioutils/readers_test.go index a7a2dad176..0af978e068 100644 --- a/pkg/ioutils/readers_test.go +++ b/pkg/ioutils/readers_test.go @@ -32,3 +32,61 @@ func TestBufReader(t *testing.T) { t.Error(string(output)) } } + +type repeatedReader struct { + readCount int + maxReads int + data []byte +} + +func newRepeatedReader(max int, data []byte) *repeatedReader { + return &repeatedReader{0, max, data} +} + +func (r *repeatedReader) Read(p []byte) (int, error) { + if r.readCount >= r.maxReads { + return 0, io.EOF + } + r.readCount++ + n := copy(p, r.data) + return n, nil +} + +func testWithData(data []byte, reads int) { + reader := newRepeatedReader(reads, data) + bufReader := NewBufReader(reader) + io.Copy(ioutil.Discard, bufReader) +} + +func Benchmark1M10BytesReads(b *testing.B) { + reads := 1000000 + readSize := int64(10) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} + +func Benchmark1M1024BytesReads(b *testing.B) { + reads := 1000000 + readSize := int64(1024) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} + +func Benchmark10k32KBytesReads(b *testing.B) { + reads := 10000 + readSize := int64(32 * 1024) + data := make([]byte, readSize) + b.SetBytes(readSize * int64(reads)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + testWithData(data, reads) + } +} diff --git a/pkg/networkfs/resolvconf/resolvconf.go b/pkg/resolvconf/resolvconf.go similarity index 100% rename from pkg/networkfs/resolvconf/resolvconf.go rename to pkg/resolvconf/resolvconf.go diff --git a/pkg/networkfs/resolvconf/resolvconf_test.go b/pkg/resolvconf/resolvconf_test.go similarity index 100% rename from pkg/networkfs/resolvconf/resolvconf_test.go rename to pkg/resolvconf/resolvconf_test.go diff --git a/pkg/term/console_windows.go b/pkg/term/console_windows.go deleted file mode 100644 index 6335b2b837..0000000000 --- a/pkg/term/console_windows.go +++ /dev/null @@ -1,87 +0,0 @@ -// +build windows - -package term - -import ( - "syscall" - "unsafe" -) - -const ( - // Consts for Get/SetConsoleMode function - // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_WINDOW_INPUT = 0x0008 - // If parameter is a screen buffer handle, additional values - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 -) - -var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - -var ( - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") -) - -func GetConsoleMode(fileDesc uintptr) (uint32, error) { - var mode uint32 - err := syscall.GetConsoleMode(syscall.Handle(fileDesc), &mode) - return mode, err -} - -func SetConsoleMode(fileDesc uintptr, mode uint32) error { - r, _, err := setConsoleModeProc.Call(fileDesc, uintptr(mode), 0) - if r == 0 { - if err != nil { - return err - } - return syscall.EINVAL - } - return nil -} - -// types for calling GetConsoleScreenBufferInfo -// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx -type ( - SHORT int16 - - SMALL_RECT struct { - Left SHORT - Top SHORT - Right SHORT - Bottom SHORT - } - - COORD struct { - X SHORT - Y SHORT - } - - WORD uint16 - - CONSOLE_SCREEN_BUFFER_INFO struct { - dwSize COORD - dwCursorPosition COORD - wAttributes WORD - srWindow SMALL_RECT - dwMaximumWindowSize COORD - } -) - -func GetConsoleScreenBufferInfo(fileDesc uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - var info CONSOLE_SCREEN_BUFFER_INFO - r, _, err := getConsoleScreenBufferInfoProc.Call(uintptr(fileDesc), uintptr(unsafe.Pointer(&info)), 0) - if r == 0 { - if err != nil { - return nil, err - } - return nil, syscall.EINVAL - } - return &info, nil -} diff --git a/pkg/term/term.go b/pkg/term/term.go index 8d807d8d44..b945a3dcea 100644 --- a/pkg/term/term.go +++ b/pkg/term/term.go @@ -4,6 +4,7 @@ package term import ( "errors" + "io" "os" "os/signal" "syscall" @@ -25,6 +26,20 @@ type Winsize struct { y uint16 } +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + func GetWinsize(fd uintptr) (*Winsize, error) { ws := &Winsize{} _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go index d372e86a88..abda841cb1 100644 --- a/pkg/term/term_windows.go +++ b/pkg/term/term_windows.go @@ -1,11 +1,19 @@ // +build windows - package term +import ( + "io" + "os" + + "github.com/docker/docker/pkg/term/winconsole" +) + +// State holds the console mode for the terminal. type State struct { mode uint32 } +// Winsize is used for window size. type Winsize struct { Height uint16 Width uint16 @@ -13,15 +21,17 @@ type Winsize struct { y uint16 } +// GetWinsize gets the window size of the given terminal func GetWinsize(fd uintptr) (*Winsize, error) { ws := &Winsize{} - var info *CONSOLE_SCREEN_BUFFER_INFO - info, err := GetConsoleScreenBufferInfo(fd) + var info *winconsole.CONSOLE_SCREEN_BUFFER_INFO + info, err := winconsole.GetConsoleScreenBufferInfo(fd) if err != nil { return nil, err } - ws.Height = uint16(info.srWindow.Right - info.srWindow.Left + 1) - ws.Width = uint16(info.srWindow.Bottom - info.srWindow.Top + 1) + + ws.Width = uint16(info.Window.Right - info.Window.Left + 1) + ws.Height = uint16(info.Window.Bottom - info.Window.Top + 1) ws.x = 0 // todo azlinux -- this is the pixel size of the Window, and not currently used by any caller ws.y = 0 @@ -29,37 +39,44 @@ func GetWinsize(fd uintptr) (*Winsize, error) { return ws, nil } +// SetWinsize sets the terminal connected to the given file descriptor to a +// given size. func SetWinsize(fd uintptr, ws *Winsize) error { return nil } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - _, e := GetConsoleMode(fd) + _, e := winconsole.GetConsoleMode(fd) return e == nil } -// Restore restores the terminal connected to the given file descriptor to a +// RestoreTerminal restores the terminal connected to the given file descriptor to a // previous state. func RestoreTerminal(fd uintptr, state *State) error { - return SetConsoleMode(fd, state.mode) + return winconsole.SetConsoleMode(fd, state.mode) } +// SaveState saves the state of the given console func SaveState(fd uintptr) (*State, error) { - mode, e := GetConsoleMode(fd) + mode, e := winconsole.GetConsoleMode(fd) if e != nil { return nil, e } return &State{mode}, nil } +// DisableEcho disbales the echo for given file descriptor and returns previous state // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings func DisableEcho(fd uintptr, state *State) error { - state.mode &^= (ENABLE_ECHO_INPUT) - state.mode |= (ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) - return SetConsoleMode(fd, state.mode) + state.mode &^= (winconsole.ENABLE_ECHO_INPUT) + state.mode |= (winconsole.ENABLE_PROCESSED_INPUT | winconsole.ENABLE_LINE_INPUT) + return winconsole.SetConsoleMode(fd, state.mode) } +// SetRawTerminal puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. func SetRawTerminal(fd uintptr) (*State, error) { oldState, err := MakeRaw(fd) if err != nil { @@ -79,11 +96,42 @@ func MakeRaw(fd uintptr) (*State, error) { return nil, err } - // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings - state.mode &^= (ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) - err = SetConsoleMode(fd, state.mode) + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + // All three input modes, along with processed output mode, are designed to work together. + // It is best to either enable or disable all of these modes as a group. + // When all are enabled, the application is said to be in "cooked" mode, which means that most of the processing is handled for the application. + // When all are disabled, the application is in "raw" mode, which means that input is unfiltered and any processing is left to the application. + state.mode = 0 + err = winconsole.SetConsoleMode(fd, state.mode) if err != nil { return nil, err } return state, nil } + +// GetFdInfo returns file descriptor and bool indicating whether the file is a terminal +func GetFdInfo(in interface{}) (uintptr, bool) { + return winconsole.GetHandleInfo(in) +} + +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + var shouldEmulateANSI bool + switch { + case os.Getenv("ConEmuANSI") == "ON": + // ConEmu shell, ansi emulated by default and ConEmu does an extensively + // good emulation. + shouldEmulateANSI = false + case os.Getenv("MSYSTEM") != "": + // MSYS (mingw) cannot fully emulate well and still shows escape characters + // mostly because it's still running on cmd.exe window. + shouldEmulateANSI = true + default: + shouldEmulateANSI = true + } + + if shouldEmulateANSI { + return winconsole.StdStreams() + } + + return os.Stdin, os.Stdout, os.Stderr +} diff --git a/pkg/term/winconsole/console_windows.go b/pkg/term/winconsole/console_windows.go new file mode 100644 index 0000000000..19977b1010 --- /dev/null +++ b/pkg/term/winconsole/console_windows.go @@ -0,0 +1,1042 @@ +// +build windows + +package winconsole + +import ( + "bytes" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" +) + +const ( + // Consts for Get/SetConsoleMode function + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_WINDOW_INPUT = 0x0008 + // If parameter is a screen buffer handle, additional values + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + + //http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes + FOREGROUND_BLUE = 1 + FOREGROUND_GREEN = 2 + FOREGROUND_RED = 4 + FOREGROUND_INTENSITY = 8 + FOREGROUND_MASK_SET = 0x000F + FOREGROUND_MASK_UNSET = 0xFFF0 + + BACKGROUND_BLUE = 16 + BACKGROUND_GREEN = 32 + BACKGROUND_RED = 64 + BACKGROUND_INTENSITY = 128 + BACKGROUND_MASK_SET = 0x00F0 + BACKGROUND_MASK_UNSET = 0xFF0F + + COMMON_LVB_REVERSE_VIDEO = 0x4000 + COMMON_LVB_UNDERSCORE = 0x8000 + + // http://man7.org/linux/man-pages/man4/console_codes.4.html + // ECMA-48 Set Graphics Rendition + ANSI_ATTR_RESET = 0 + ANSI_ATTR_BOLD = 1 + ANSI_ATTR_DIM = 2 + ANSI_ATTR_UNDERLINE = 4 + ANSI_ATTR_BLINK = 5 + ANSI_ATTR_REVERSE = 7 + ANSI_ATTR_INVISIBLE = 8 + + ANSI_ATTR_UNDERLINE_OFF = 24 + ANSI_ATTR_BLINK_OFF = 25 + ANSI_ATTR_REVERSE_OFF = 27 + ANSI_ATTR_INVISIBLE_OFF = 8 + + ANSI_FOREGROUND_BLACK = 30 + ANSI_FOREGROUND_RED = 31 + ANSI_FOREGROUND_GREEN = 32 + ANSI_FOREGROUND_YELLOW = 33 + ANSI_FOREGROUND_BLUE = 34 + ANSI_FOREGROUND_MAGENTA = 35 + ANSI_FOREGROUND_CYAN = 36 + ANSI_FOREGROUND_WHITE = 37 + ANSI_FOREGROUND_DEFAULT = 39 + + ANSI_BACKGROUND_BLACK = 40 + ANSI_BACKGROUND_RED = 41 + ANSI_BACKGROUND_GREEN = 42 + ANSI_BACKGROUND_YELLOW = 43 + ANSI_BACKGROUND_BLUE = 44 + ANSI_BACKGROUND_MAGENTA = 45 + ANSI_BACKGROUND_CYAN = 46 + ANSI_BACKGROUND_WHITE = 47 + ANSI_BACKGROUND_DEFAULT = 49 + + ANSI_MAX_CMD_LENGTH = 256 + + MAX_INPUT_EVENTS = 128 + MAX_INPUT_BUFFER = 1024 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 +) + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 //RIGHT ARROW key + VK_DOWN = 0x28 //DOWN ARROW key + VK_SELECT = 0x29 //SELECT key + VK_PRINT = 0x2A //PRINT key + VK_EXECUTE = 0x2B //EXECUTE key + VK_SNAPSHOT = 0x2C //PRINT SCREEN key + VK_INSERT = 0x2D //INS key + VK_DELETE = 0x2E //DEL key + VK_HELP = 0x2F //HELP key + VK_F1 = 0x70 //F1 key + VK_F2 = 0x71 //F2 key + VK_F3 = 0x72 //F3 key + VK_F4 = 0x73 //F4 key + VK_F5 = 0x74 //F5 key + VK_F6 = 0x75 //F6 key + VK_F7 = 0x76 //F7 key + VK_F8 = 0x77 //F8 key + VK_F9 = 0x78 //F9 key + VK_F10 = 0x79 //F10 key + VK_F11 = 0x7A //F11 key + VK_F12 = 0x7B //F12 key +) + +var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + +var ( + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + fillConsoleOutputCharacterProc = kernel32DLL.NewProc("FillConsoleOutputCharacterW") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + getNumberOfConsoleInputEventsProc = kernel32DLL.NewProc("GetNumberOfConsoleInputEvents") + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") +) + +// types for calling various windows API +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx +type ( + SHORT int16 + SMALL_RECT struct { + Left SHORT + Top SHORT + Right SHORT + Bottom SHORT + } + + COORD struct { + X SHORT + Y SHORT + } + + BOOL int32 + WORD uint16 + WCHAR uint16 + DWORD uint32 + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes WORD + Window SMALL_RECT + MaximumWindowSize COORD + } + + CONSOLE_CURSOR_INFO struct { + Size DWORD + Visible BOOL + } + + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms684166(v=vs.85).aspx + KEY_EVENT_RECORD struct { + KeyDown BOOL + RepeatCount WORD + VirtualKeyCode WORD + VirtualScanCode WORD + UnicodeChar WCHAR + ControlKeyState DWORD + } + + INPUT_RECORD struct { + EventType WORD + KeyEvent KEY_EVENT_RECORD + } + + CHAR_INFO struct { + UnicodeChar WCHAR + Attributes WORD + } +) + +// Implements the TerminalEmulator interface +type WindowsTerminal struct { + outMutex sync.Mutex + inMutex sync.Mutex + inputBuffer []byte + inputSize int + inputEvents []INPUT_RECORD + screenBufferInfo *CONSOLE_SCREEN_BUFFER_INFO + inputEscapeSequence []byte +} + +func getStdHandle(stdhandle int) uintptr { + handle, err := syscall.GetStdHandle(stdhandle) + if err != nil { + panic(fmt.Errorf("could not get standard io handle %d", stdhandle)) + } + return uintptr(handle) +} + +func StdStreams() (stdIn io.ReadCloser, stdOut io.Writer, stdErr io.Writer) { + handler := &WindowsTerminal{ + inputBuffer: make([]byte, MAX_INPUT_BUFFER), + inputEscapeSequence: []byte(KEY_ESC_CSI), + inputEvents: make([]INPUT_RECORD, MAX_INPUT_EVENTS), + } + + if IsTerminal(os.Stdin.Fd()) { + stdIn = &terminalReader{ + wrappedReader: os.Stdin, + emulator: handler, + command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), + fd: getStdHandle(syscall.STD_INPUT_HANDLE), + } + } else { + stdIn = os.Stdin + } + + if IsTerminal(os.Stdout.Fd()) { + stdoutHandle := getStdHandle(syscall.STD_OUTPUT_HANDLE) + + // Save current screen buffer info + screenBufferInfo, err := GetConsoleScreenBufferInfo(stdoutHandle) + if err != nil { + // If GetConsoleScreenBufferInfo returns a nil error, it usually means that stdout is not a TTY. + // However, this is in the branch where stdout is a TTY, hence the panic. + panic("could not get console screen buffer info") + } + handler.screenBufferInfo = screenBufferInfo + + // Set the window size + SetWindowSize(stdoutHandle, DEFAULT_WIDTH, DEFAULT_HEIGHT, DEFAULT_HEIGHT) + buffer = make([]CHAR_INFO, screenBufferInfo.MaximumWindowSize.X*screenBufferInfo.MaximumWindowSize.Y) + + stdOut = &terminalWriter{ + wrappedWriter: os.Stdout, + emulator: handler, + command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), + fd: stdoutHandle, + } + } else { + stdOut = os.Stdout + } + + if IsTerminal(os.Stderr.Fd()) { + stdErr = &terminalWriter{ + wrappedWriter: os.Stderr, + emulator: handler, + command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), + fd: getStdHandle(syscall.STD_ERROR_HANDLE), + } + } else { + stdErr = os.Stderr + } + + return stdIn, stdOut, stdErr +} + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a terminal +func GetHandleInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + if tr, ok := in.(*terminalReader); ok { + if file, ok := tr.wrappedReader.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + } + return inFd, isTerminalIn +} + +func getError(r1, r2 uintptr, lastErr error) error { + // If the function fails, the return value is zero. + if r1 == 0 { + if lastErr != nil { + return lastErr + } + return syscall.EINVAL + } + return nil +} + +// GetConsoleMode gets the console mode for given file descriptor +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx +func GetConsoleMode(handle uintptr) (uint32, error) { + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +func SetConsoleMode(handle uintptr, mode uint32) error { + return getError(setConsoleModeProc.Call(handle, uintptr(mode), 0)) +} + +// SetCursorVisible sets the cursor visbility +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx +func SetCursorVisible(handle uintptr, isVisible BOOL) (bool, error) { + var cursorInfo CONSOLE_CURSOR_INFO + if err := getError(getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(&cursorInfo)), 0)); err != nil { + return false, err + } + cursorInfo.Visible = isVisible + if err := getError(setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(&cursorInfo)), 0)); err != nil { + return false, err + } + return true, nil +} + +// SetWindowSize sets the size of the console window. +func SetWindowSize(handle uintptr, width, height, max SHORT) (bool, error) { + window := SMALL_RECT{Left: 0, Top: 0, Right: width - 1, Bottom: height - 1} + coord := COORD{X: width - 1, Y: max} + if err := getError(setConsoleWindowInfoProc.Call(handle, uintptr(1), uintptr(unsafe.Pointer(&window)))); err != nil { + return false, err + } + if err := getError(setConsoleScreenBufferSizeProc.Call(handle, marshal(coord))); err != nil { + return false, err + } + return true, nil +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + var info CONSOLE_SCREEN_BUFFER_INFO + if err := getError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)); err != nil { + return nil, err + } + return &info, nil +} + +// setConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function, +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx +func setConsoleTextAttribute(handle uintptr, attribute WORD) error { + return getError(setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)) +} + +func writeConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) (bool, error) { + if err := getError(writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), marshal(bufferSize), marshal(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))); err != nil { + return false, err + } + return true, nil +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682663(v=vs.85).aspx +func fillConsoleOutputCharacter(handle uintptr, fillChar byte, length uint32, writeCord COORD) (bool, error) { + out := int64(0) + if err := getError(fillConsoleOutputCharacterProc.Call(handle, uintptr(fillChar), uintptr(length), marshal(writeCord), uintptr(unsafe.Pointer(&out)))); err != nil { + return false, err + } + return true, nil +} + +// Gets the number of space characters to write for "clearing" the section of terminal +func getNumberOfChars(fromCoord COORD, toCoord COORD, screenSize COORD) uint32 { + // must be valid cursor position + if fromCoord.X < 0 || fromCoord.Y < 0 || toCoord.X < 0 || toCoord.Y < 0 { + return 0 + } + if fromCoord.X >= screenSize.X || fromCoord.Y >= screenSize.Y || toCoord.X >= screenSize.X || toCoord.Y >= screenSize.Y { + return 0 + } + // can't be backwards + if fromCoord.Y > toCoord.Y { + return 0 + } + // same line + if fromCoord.Y == toCoord.Y { + return uint32(toCoord.X-fromCoord.X) + 1 + } + // spans more than one line + if fromCoord.Y < toCoord.Y { + // from start till end of line for first line + from start of line till end + retValue := uint32(screenSize.X-fromCoord.X) + uint32(toCoord.X) + 1 + // don't count first and last line + linesBetween := toCoord.Y - fromCoord.Y - 1 + if linesBetween > 0 { + retValue = retValue + uint32(linesBetween*screenSize.X) + } + return retValue + } + return 0 +} + +var buffer []CHAR_INFO + +func clearDisplayRect(handle uintptr, fillChar rune, attributes WORD, fromCoord COORD, toCoord COORD, windowSize COORD) (uint32, error) { + var writeRegion SMALL_RECT + writeRegion.Top = fromCoord.Y + writeRegion.Left = fromCoord.X + writeRegion.Right = toCoord.X + writeRegion.Bottom = toCoord.Y + + // allocate and initialize buffer + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := width * height + if size > 0 { + for i := 0; i < int(size); i++ { + buffer[i].UnicodeChar = WCHAR(fillChar) + buffer[i].Attributes = attributes + } + + // Write to buffer + r, err := writeConsoleOutput(handle, buffer[:size], windowSize, COORD{X: 0, Y: 0}, &writeRegion) + if !r { + if err != nil { + return 0, err + } + return 0, syscall.EINVAL + } + } + return uint32(size), nil +} + +func clearDisplayRange(handle uintptr, fillChar rune, attributes WORD, fromCoord COORD, toCoord COORD, windowSize COORD) (uint32, error) { + nw := uint32(0) + // start and end on same line + if fromCoord.Y == toCoord.Y { + return clearDisplayRect(handle, fillChar, attributes, fromCoord, toCoord, windowSize) + } + // TODO(azlinux): if full screen, optimize + + // spans more than one line + if fromCoord.Y < toCoord.Y { + // from start position till end of line for first line + n, err := clearDisplayRect(handle, fillChar, attributes, fromCoord, COORD{X: windowSize.X - 1, Y: fromCoord.Y}, windowSize) + if err != nil { + return nw, err + } + nw += n + // lines between + linesBetween := toCoord.Y - fromCoord.Y - 1 + if linesBetween > 0 { + n, err = clearDisplayRect(handle, fillChar, attributes, COORD{X: 0, Y: fromCoord.Y + 1}, COORD{X: windowSize.X - 1, Y: toCoord.Y - 1}, windowSize) + if err != nil { + return nw, err + } + nw += n + } + // lines at end + n, err = clearDisplayRect(handle, fillChar, attributes, COORD{X: 0, Y: toCoord.Y}, toCoord, windowSize) + if err != nil { + return nw, err + } + nw += n + } + return nw, nil +} + +// setConsoleCursorPosition sets the console cursor position +// Note The X and Y are zero based +// If relative is true then the new position is relative to current one +func setConsoleCursorPosition(handle uintptr, isRelative bool, column int16, line int16) error { + screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) + if err != nil { + return err + } + var position COORD + if isRelative { + position.X = screenBufferInfo.CursorPosition.X + SHORT(column) + position.Y = screenBufferInfo.CursorPosition.Y + SHORT(line) + } else { + position.X = SHORT(column) + position.Y = SHORT(line) + } + return getError(setConsoleCursorPositionProc.Call(handle, marshal(position), 0)) +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683207(v=vs.85).aspx +func getNumberOfConsoleInputEvents(handle uintptr) (uint16, error) { + var n WORD + if err := getError(getNumberOfConsoleInputEventsProc.Call(handle, uintptr(unsafe.Pointer(&n)))); err != nil { + return 0, err + } + return uint16(n), nil +} + +//http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx +func readConsoleInputKey(handle uintptr, inputBuffer []INPUT_RECORD) (int, error) { + var nr WORD + if err := getError(readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&inputBuffer[0])), uintptr(len(inputBuffer)), uintptr(unsafe.Pointer(&nr)))); err != nil { + return 0, err + } + return int(nr), nil +} + +func getWindowsTextAttributeForAnsiValue(originalFlag WORD, defaultValue WORD, ansiValue int16) (WORD, error) { + flag := WORD(originalFlag) + if flag == 0 { + flag = defaultValue + } + switch ansiValue { + case ANSI_ATTR_RESET: + flag &^= COMMON_LVB_UNDERSCORE + flag &^= BACKGROUND_INTENSITY + flag = flag | FOREGROUND_INTENSITY + case ANSI_ATTR_INVISIBLE: + // TODO: how do you reset reverse? + case ANSI_ATTR_UNDERLINE: + flag = flag | COMMON_LVB_UNDERSCORE + case ANSI_ATTR_BLINK: + // seems like background intenisty is blink + flag = flag | BACKGROUND_INTENSITY + case ANSI_ATTR_UNDERLINE_OFF: + flag &^= COMMON_LVB_UNDERSCORE + case ANSI_ATTR_BLINK_OFF: + // seems like background intenisty is blink + flag &^= BACKGROUND_INTENSITY + case ANSI_ATTR_BOLD: + flag = flag | FOREGROUND_INTENSITY + case ANSI_ATTR_DIM: + flag &^= FOREGROUND_INTENSITY + case ANSI_ATTR_REVERSE, ANSI_ATTR_REVERSE_OFF: + // swap forground and background bits + foreground := flag & FOREGROUND_MASK_SET + background := flag & BACKGROUND_MASK_SET + flag = (flag & BACKGROUND_MASK_UNSET & FOREGROUND_MASK_UNSET) | (foreground << 4) | (background >> 4) + + // FOREGROUND + case ANSI_FOREGROUND_DEFAULT: + flag = (flag & FOREGROUND_MASK_UNSET) | (defaultValue & FOREGROUND_MASK_SET) + case ANSI_FOREGROUND_BLACK: + flag = flag ^ (FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE) + case ANSI_FOREGROUND_RED: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED + case ANSI_FOREGROUND_GREEN: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_GREEN + case ANSI_FOREGROUND_YELLOW: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_GREEN + case ANSI_FOREGROUND_BLUE: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_BLUE + case ANSI_FOREGROUND_MAGENTA: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_BLUE + case ANSI_FOREGROUND_CYAN: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_GREEN | FOREGROUND_BLUE + case ANSI_FOREGROUND_WHITE: + flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background + case ANSI_BACKGROUND_DEFAULT: + // Black with no intensity + flag = (flag & BACKGROUND_MASK_UNSET) | (defaultValue & BACKGROUND_MASK_SET) + case ANSI_BACKGROUND_BLACK: + flag = (flag & BACKGROUND_MASK_UNSET) + case ANSI_BACKGROUND_RED: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED + case ANSI_BACKGROUND_GREEN: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_GREEN + case ANSI_BACKGROUND_YELLOW: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_GREEN + case ANSI_BACKGROUND_BLUE: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_BLUE + case ANSI_BACKGROUND_MAGENTA: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_BLUE + case ANSI_BACKGROUND_CYAN: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_GREEN | BACKGROUND_BLUE + case ANSI_BACKGROUND_WHITE: + flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + return flag, nil +} + +// HandleOutputCommand interpretes the Ansi commands and then makes appropriate Win32 calls +func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) (n int, err error) { + // always consider all the bytes in command, processed + n = len(command) + + parsedCommand := parseAnsiCommand(command) + + // console settings changes need to happen in atomic way + term.outMutex.Lock() + defer term.outMutex.Unlock() + + switch parsedCommand.Command { + case "m": + // [Value;...;Valuem + // Set Graphics Mode: + // Calls the graphics functions specified by the following values. + // These specified functions remain active until the next occurrence of this escape sequence. + // Graphics mode changes the colors and attributes of text (such as bold and underline) displayed on the screen. + screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) + if err != nil { + return n, err + } + flag := screenBufferInfo.Attributes + for _, e := range parsedCommand.Parameters { + value, _ := strconv.ParseInt(e, 10, 16) // base 10, 16 bit + if value == ANSI_ATTR_RESET { + flag = term.screenBufferInfo.Attributes // reset + } else { + flag, err = getWindowsTextAttributeForAnsiValue(flag, term.screenBufferInfo.Attributes, int16(value)) + if err != nil { + return n, err + } + } + } + if err := setConsoleTextAttribute(handle, flag); err != nil { + return n, err + } + case "H", "f": + // [line;columnH + // [line;columnf + // Moves the cursor to the specified position (coordinates). + // If you do not specify a position, the cursor moves to the home position at the upper-left corner of the screen (line 0, column 0). + screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) + if err != nil { + return n, err + } + line, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return n, err + } + if line > int16(screenBufferInfo.Window.Bottom) { + line = int16(screenBufferInfo.Window.Bottom) + } + column, err := parseInt16OrDefault(parsedCommand.getParam(1), 1) + if err != nil { + return n, err + } + if column > int16(screenBufferInfo.Window.Right) { + column = int16(screenBufferInfo.Window.Right) + } + // The numbers are not 0 based, but 1 based + if err := setConsoleCursorPosition(handle, false, column-1, line-1); err != nil { + return n, err + } + + case "A": + // [valueA + // Moves the cursor up by the specified number of lines without changing columns. + // If the cursor is already on the top line, ignores this sequence. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return len(command), err + } + if err := setConsoleCursorPosition(handle, true, 0, -value); err != nil { + return n, err + } + case "B": + // [valueB + // Moves the cursor down by the specified number of lines without changing columns. + // If the cursor is already on the bottom line, ignores this sequence. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return n, err + } + if err := setConsoleCursorPosition(handle, true, 0, value); err != nil { + return n, err + } + case "C": + // [valueC + // Moves the cursor forward by the specified number of columns without changing lines. + // If the cursor is already in the rightmost column, ignores this sequence. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return n, err + } + if err := setConsoleCursorPosition(handle, true, value, 0); err != nil { + return n, err + } + case "D": + // [valueD + // Moves the cursor back by the specified number of columns without changing lines. + // If the cursor is already in the leftmost column, ignores this sequence. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) + if err != nil { + return n, err + } + if err := setConsoleCursorPosition(handle, true, -value, 0); err != nil { + return n, err + } + case "J": + // [J Erases from the cursor to the end of the screen, including the cursor position. + // [1J Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J Erases the complete display. The cursor does not move. + // Clears the screen and moves the cursor to the home position (line 0, column 0). + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 0) + if err != nil { + return n, err + } + var start COORD + var cursor COORD + var end COORD + screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) + if err != nil { + return n, err + } + switch value { + case 0: + start = screenBufferInfo.CursorPosition + // end of the screen + end.X = screenBufferInfo.MaximumWindowSize.X - 1 + end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + // cursor + cursor = screenBufferInfo.CursorPosition + case 1: + + // start of the screen + start.X = 0 + start.Y = 0 + // end of the screen + end = screenBufferInfo.CursorPosition + // cursor + cursor = screenBufferInfo.CursorPosition + case 2: + // start of the screen + start.X = 0 + start.Y = 0 + // end of the screen + end.X = screenBufferInfo.MaximumWindowSize.X - 1 + end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + // cursor + cursor.X = 0 + cursor.Y = 0 + } + if _, err := clearDisplayRange(uintptr(handle), ' ', term.screenBufferInfo.Attributes, start, end, screenBufferInfo.MaximumWindowSize); err != nil { + return n, err + } + // remember the the cursor position is 1 based + if err := setConsoleCursorPosition(handle, false, int16(cursor.X), int16(cursor.Y)); err != nil { + return n, err + } + case "K": + // [K + // Clears all characters from the cursor position to the end of the line (including the character at the cursor position). + // [K Erases from the cursor to the end of the line, including the cursor position. + // [1K Erases from the beginning of the line to the cursor, including the cursor position. + // [2K Erases the complete line. + value, err := parseInt16OrDefault(parsedCommand.getParam(0), 0) + var start COORD + var cursor COORD + var end COORD + screenBufferInfo, err := GetConsoleScreenBufferInfo(uintptr(handle)) + if err != nil { + return n, err + } + switch value { + case 0: + // start is where cursor is + start = screenBufferInfo.CursorPosition + // end of line + end.X = screenBufferInfo.MaximumWindowSize.X - 1 + end.Y = screenBufferInfo.CursorPosition.Y + // cursor remains the same + cursor = screenBufferInfo.CursorPosition + + case 1: + // beginning of line + start.X = 0 + start.Y = screenBufferInfo.CursorPosition.Y + // until cursor + end = screenBufferInfo.CursorPosition + // cursor remains the same + cursor = screenBufferInfo.CursorPosition + case 2: + // start of the line + start.X = 0 + start.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + // end of the line + end.X = screenBufferInfo.MaximumWindowSize.X - 1 + end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + // cursor + cursor.X = 0 + cursor.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + } + if _, err := clearDisplayRange(uintptr(handle), ' ', term.screenBufferInfo.Attributes, start, end, screenBufferInfo.MaximumWindowSize); err != nil { + return n, err + } + // remember the the cursor position is 1 based + if err := setConsoleCursorPosition(uintptr(handle), false, int16(cursor.X), int16(cursor.Y)); err != nil { + return n, err + } + + case "l": + for _, value := range parsedCommand.Parameters { + switch value { + case "?25", "25": + SetCursorVisible(uintptr(handle), BOOL(0)) + case "?1049", "1049": + // TODO (azlinux): Restore terminal + case "?1", "1": + // If the DECCKM function is reset, then the arrow keys send ANSI cursor sequences to the host. + term.inputEscapeSequence = []byte(KEY_ESC_CSI) + } + } + case "h": + for _, value := range parsedCommand.Parameters { + switch value { + case "?25", "25": + SetCursorVisible(uintptr(handle), BOOL(1)) + case "?1049", "1049": + // TODO (azlinux): Save terminal + case "?1", "1": + // If the DECCKM function is set, then the arrow keys send application sequences to the host. + // DECCKM (default off): When set, the cursor keys send an ESC O prefix, rather than ESC [. + term.inputEscapeSequence = []byte(KEY_ESC_O) + } + } + + case "]": + /* + TODO (azlinux): + Linux Console Private CSI Sequences + + The following sequences are neither ECMA-48 nor native VT102. They are + native to the Linux console driver. Colors are in SGR parameters: 0 = + black, 1 = red, 2 = green, 3 = brown, 4 = blue, 5 = magenta, 6 = cyan, + 7 = white. + + ESC [ 1 ; n ] Set color n as the underline color + ESC [ 2 ; n ] Set color n as the dim color + ESC [ 8 ] Make the current color pair the default attributes. + ESC [ 9 ; n ] Set screen blank timeout to n minutes. + ESC [ 10 ; n ] Set bell frequency in Hz. + ESC [ 11 ; n ] Set bell duration in msec. + ESC [ 12 ; n ] Bring specified console to the front. + ESC [ 13 ] Unblank the screen. + ESC [ 14 ; n ] Set the VESA powerdown interval in minutes. + + */ + } + return n, nil +} + +// WriteChars writes the bytes to given writer. +func (term *WindowsTerminal) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + return w.Write(p) +} + +const ( + CAPSLOCK_ON = 0x0080 //The CAPS LOCK light is on. + ENHANCED_KEY = 0x0100 //The key is enhanced. + LEFT_ALT_PRESSED = 0x0002 //The left ALT key is pressed. + LEFT_CTRL_PRESSED = 0x0008 //The left CTRL key is pressed. + NUMLOCK_ON = 0x0020 //The NUM LOCK light is on. + RIGHT_ALT_PRESSED = 0x0001 //The right ALT key is pressed. + RIGHT_CTRL_PRESSED = 0x0004 //The right CTRL key is pressed. + SCROLLLOCK_ON = 0x0040 //The SCROLL LOCK light is on. + SHIFT_PRESSED = 0x0010 // The SHIFT key is pressed. +) + +const ( + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" +) + +var keyMapPrefix = map[WORD]string{ + VK_UP: "\x1B[%sA", + VK_DOWN: "\x1B[%sB", + VK_RIGHT: "\x1B[%sC", + VK_LEFT: "\x1B[%sD", + VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + VK_END: "\x1B[4%s~", // showkey shows ^[[4 + VK_INSERT: "\x1B[2%s~", + VK_DELETE: "\x1B[3%s~", + VK_PRIOR: "\x1B[5%s~", + VK_NEXT: "\x1B[6%s~", + VK_F1: "", + VK_F2: "", + VK_F3: "\x1B[13%s~", + VK_F4: "\x1B[14%s~", + VK_F5: "\x1B[15%s~", + VK_F6: "\x1B[17%s~", + VK_F7: "\x1B[18%s~", + VK_F8: "\x1B[19%s~", + VK_F9: "\x1B[20%s~", + VK_F10: "\x1B[21%s~", + VK_F11: "\x1B[23%s~", + VK_F12: "\x1B[24%s~", +} + +var arrowKeyMapPrefix = map[WORD]string{ + VK_UP: "%s%sA", + VK_DOWN: "%s%sB", + VK_RIGHT: "%s%sC", + VK_LEFT: "%s%sD", +} + +func getControlStateParameter(shift, alt, control, meta bool) string { + if shift && alt && control { + return KEY_CONTROL_PARAM_8 + } + if alt && control { + return KEY_CONTROL_PARAM_7 + } + if shift && control { + return KEY_CONTROL_PARAM_6 + } + if control { + return KEY_CONTROL_PARAM_5 + } + if shift && alt { + return KEY_CONTROL_PARAM_4 + } + if alt { + return KEY_CONTROL_PARAM_3 + } + if shift { + return KEY_CONTROL_PARAM_2 + } + return "" +} + +func getControlKeys(controlState DWORD) (shift, alt, control bool) { + shift = 0 != (controlState & SHIFT_PRESSED) + alt = 0 != (controlState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (LEFT_CTRL_PRESSED | RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +func charSequenceForKeys(key WORD, controlState DWORD, escapeSequence []byte) string { + i, ok := arrowKeyMapPrefix[key] + if ok { + shift, alt, control := getControlKeys(controlState) + modifier := getControlStateParameter(shift, alt, control, false) + return fmt.Sprintf(i, escapeSequence, modifier) + } + + i, ok = keyMapPrefix[key] + if ok { + shift, alt, control := getControlKeys(controlState) + modifier := getControlStateParameter(shift, alt, control, false) + return fmt.Sprintf(i, modifier) + } + + return "" +} + +// mapKeystokeToTerminalString maps the given input event record to string +func mapKeystokeToTerminalString(keyEvent *KEY_EVENT_RECORD, escapeSequence []byte) string { + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if keyEvent.UnicodeChar == 0 { + return charSequenceForKeys(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + // +Key generates ESC N Key + if !control && alt { + return KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + return string(keyEvent.UnicodeChar) +} + +// getAvailableInputEvents polls the console for availble events +// The function does not return until at least one input record has been read. +func getAvailableInputEvents(handle uintptr, inputEvents []INPUT_RECORD) (n int, err error) { + // TODO(azlinux): Why is there a for loop? Seems to me, that `n` cannot be negative. - tibor + for { + // Read number of console events available + n, err = readConsoleInputKey(handle, inputEvents) + if err != nil || n >= 0 { + return n, err + } + } +} + +// getTranslatedKeyCodes converts the input events into the string of characters +// The ansi escape sequence are used to map key strokes to the strings +func getTranslatedKeyCodes(inputEvents []INPUT_RECORD, escapeSequence []byte) string { + var buf bytes.Buffer + for i := 0; i < len(inputEvents); i++ { + input := inputEvents[i] + if input.EventType == KEY_EVENT && input.KeyEvent.KeyDown != 0 { + keyString := mapKeystokeToTerminalString(&input.KeyEvent, escapeSequence) + buf.WriteString(keyString) + } + } + return buf.String() +} + +// ReadChars reads the characters from the given reader +func (term *WindowsTerminal) ReadChars(fd uintptr, r io.Reader, p []byte) (n int, err error) { + for term.inputSize == 0 { + nr, err := getAvailableInputEvents(fd, term.inputEvents) + if nr == 0 && nil != err { + return n, err + } + if nr > 0 { + keyCodes := getTranslatedKeyCodes(term.inputEvents[:nr], term.inputEscapeSequence) + term.inputSize = copy(term.inputBuffer, keyCodes) + } + } + n = copy(p, term.inputBuffer[:term.inputSize]) + term.inputSize -= n + return n, nil +} + +// HandleInputSequence interprets the input sequence command +func (term *WindowsTerminal) HandleInputSequence(fd uintptr, command []byte) (n int, err error) { + return 0, nil +} + +func marshal(c COORD) uintptr { + // works only on intel-endian machines + return uintptr(uint32(uint32(uint16(c.Y))<<16 | uint32(uint16(c.X)))) +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + _, e := GetConsoleMode(fd) + return e == nil +} diff --git a/pkg/term/winconsole/console_windows_test.go b/pkg/term/winconsole/console_windows_test.go new file mode 100644 index 0000000000..ee9d96834b --- /dev/null +++ b/pkg/term/winconsole/console_windows_test.go @@ -0,0 +1,232 @@ +// +build windows + +package winconsole + +import ( + "fmt" + "testing" +) + +func helpsTestParseInt16OrDefault(t *testing.T, expectedValue int16, shouldFail bool, input string, defaultValue int16, format string, args ...string) { + value, err := parseInt16OrDefault(input, defaultValue) + if nil != err && !shouldFail { + t.Errorf("Unexpected error returned %v", err) + t.Errorf(format, args) + } + if nil == err && shouldFail { + t.Errorf("Should have failed as expected\n\tReturned value = %d", value) + t.Errorf(format, args) + } + if expectedValue != value { + t.Errorf("The value returned does not macth expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value) + t.Errorf(format, args) + } +} + +func TestParseInt16OrDefault(t *testing.T) { + // empty string + helpsTestParseInt16OrDefault(t, 0, false, "", 0, "Empty string returns default") + helpsTestParseInt16OrDefault(t, 2, false, "", 2, "Empty string returns default") + + // normal case + helpsTestParseInt16OrDefault(t, 0, false, "0", 0, "0 handled correctly") + helpsTestParseInt16OrDefault(t, 111, false, "111", 2, "Normal") + helpsTestParseInt16OrDefault(t, 111, false, "+111", 2, "+N") + helpsTestParseInt16OrDefault(t, -111, false, "-111", 2, "-N") + helpsTestParseInt16OrDefault(t, 0, false, "+0", 11, "+0") + helpsTestParseInt16OrDefault(t, 0, false, "-0", 12, "-0") + + // ill formed strings + helpsTestParseInt16OrDefault(t, 0, true, "abc", 0, "Invalid string") + helpsTestParseInt16OrDefault(t, 42, true, "+= 23", 42, "Invalid string") + helpsTestParseInt16OrDefault(t, 42, true, "123.45", 42, "float like") + +} + +func helpsTestGetNumberOfChars(t *testing.T, expected uint32, fromCoord COORD, toCoord COORD, screenSize COORD, format string, args ...interface{}) { + actual := getNumberOfChars(fromCoord, toCoord, screenSize) + mesg := fmt.Sprintf(format, args) + assertTrue(t, expected == actual, fmt.Sprintf("%s Expected=%d, Actual=%d, Parameters = { fromCoord=%+v, toCoord=%+v, screenSize=%+v", mesg, expected, actual, fromCoord, toCoord, screenSize)) +} + +func TestGetNumberOfChars(t *testing.T) { + // Note: The columns and lines are 0 based + // Also that interval is "inclusive" means will have both start and end chars + // This test only tests the number opf characters being written + + // all four corners + maxWindow := COORD{X: 80, Y: 50} + leftTop := COORD{X: 0, Y: 0} + rightTop := COORD{X: 79, Y: 0} + leftBottom := COORD{X: 0, Y: 49} + rightBottom := COORD{X: 79, Y: 49} + + // same position + helpsTestGetNumberOfChars(t, 1, COORD{X: 1, Y: 14}, COORD{X: 1, Y: 14}, COORD{X: 80, Y: 50}, "Same position random line") + + // four corners + helpsTestGetNumberOfChars(t, 1, leftTop, leftTop, maxWindow, "Same position- leftTop") + helpsTestGetNumberOfChars(t, 1, rightTop, rightTop, maxWindow, "Same position- rightTop") + helpsTestGetNumberOfChars(t, 1, leftBottom, leftBottom, maxWindow, "Same position- leftBottom") + helpsTestGetNumberOfChars(t, 1, rightBottom, rightBottom, maxWindow, "Same position- rightBottom") + + // from this char to next char on same line + helpsTestGetNumberOfChars(t, 2, COORD{X: 0, Y: 0}, COORD{X: 1, Y: 0}, maxWindow, "Next position on same line") + helpsTestGetNumberOfChars(t, 2, COORD{X: 1, Y: 14}, COORD{X: 2, Y: 14}, maxWindow, "Next position on same line") + + // from this char to next 10 chars on same line + helpsTestGetNumberOfChars(t, 11, COORD{X: 0, Y: 0}, COORD{X: 10, Y: 0}, maxWindow, "Next position on same line") + helpsTestGetNumberOfChars(t, 11, COORD{X: 1, Y: 14}, COORD{X: 11, Y: 14}, maxWindow, "Next position on same line") + + helpsTestGetNumberOfChars(t, 5, COORD{X: 3, Y: 11}, COORD{X: 7, Y: 11}, maxWindow, "To and from on same line") + + helpsTestGetNumberOfChars(t, 8, COORD{X: 0, Y: 34}, COORD{X: 7, Y: 34}, maxWindow, "Start of line to middle") + helpsTestGetNumberOfChars(t, 4, COORD{X: 76, Y: 34}, COORD{X: 79, Y: 34}, maxWindow, "Middle to end of line") + + // multiple lines - 1 + helpsTestGetNumberOfChars(t, 81, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 1}, maxWindow, "one line below same X") + helpsTestGetNumberOfChars(t, 81, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 11}, maxWindow, "one line below same X") + + // multiple lines - 2 + helpsTestGetNumberOfChars(t, 161, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 2}, maxWindow, "one line below same X") + helpsTestGetNumberOfChars(t, 161, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 12}, maxWindow, "one line below same X") + + // multiple lines - 3 + helpsTestGetNumberOfChars(t, 241, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 3}, maxWindow, "one line below same X") + helpsTestGetNumberOfChars(t, 241, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 13}, maxWindow, "one line below same X") + + // full line + helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 0}, COORD{X: 79, Y: 0}, maxWindow, "Full line - first") + helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 23}, COORD{X: 79, Y: 23}, maxWindow, "Full line - random") + helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 49}, COORD{X: 79, Y: 49}, maxWindow, "Full line - last") + + // full screen + helpsTestGetNumberOfChars(t, 80*50, leftTop, rightBottom, maxWindow, "full screen") + + helpsTestGetNumberOfChars(t, 80*50-1, COORD{X: 1, Y: 0}, rightBottom, maxWindow, "dropping first char to, end of screen") + helpsTestGetNumberOfChars(t, 80*50-2, COORD{X: 2, Y: 0}, rightBottom, maxWindow, "dropping first two char to, end of screen") + + helpsTestGetNumberOfChars(t, 80*50-1, leftTop, COORD{X: 78, Y: 49}, maxWindow, "from start of screen, till last char-1") + helpsTestGetNumberOfChars(t, 80*50-2, leftTop, COORD{X: 77, Y: 49}, maxWindow, "from start of screen, till last char-2") + + helpsTestGetNumberOfChars(t, 80*50-5, COORD{X: 4, Y: 0}, COORD{X: 78, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-1") + helpsTestGetNumberOfChars(t, 80*50-6, COORD{X: 4, Y: 0}, COORD{X: 77, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-2") +} + +var allForeground = []int16{ + ANSI_FOREGROUND_BLACK, + ANSI_FOREGROUND_RED, + ANSI_FOREGROUND_GREEN, + ANSI_FOREGROUND_YELLOW, + ANSI_FOREGROUND_BLUE, + ANSI_FOREGROUND_MAGENTA, + ANSI_FOREGROUND_CYAN, + ANSI_FOREGROUND_WHITE, + ANSI_FOREGROUND_DEFAULT, +} +var allBackground = []int16{ + ANSI_BACKGROUND_BLACK, + ANSI_BACKGROUND_RED, + ANSI_BACKGROUND_GREEN, + ANSI_BACKGROUND_YELLOW, + ANSI_BACKGROUND_BLUE, + ANSI_BACKGROUND_MAGENTA, + ANSI_BACKGROUND_CYAN, + ANSI_BACKGROUND_WHITE, + ANSI_BACKGROUND_DEFAULT, +} + +func maskForeground(flag WORD) WORD { + return flag & FOREGROUND_MASK_UNSET +} + +func onlyForeground(flag WORD) WORD { + return flag & FOREGROUND_MASK_SET +} + +func maskBackground(flag WORD) WORD { + return flag & BACKGROUND_MASK_UNSET +} + +func onlyBackground(flag WORD) WORD { + return flag & BACKGROUND_MASK_SET +} + +func helpsTestGetWindowsTextAttributeForAnsiValue(t *testing.T, oldValue WORD /*, expected WORD*/, ansi int16, onlyMask WORD, restMask WORD) WORD { + actual, err := getWindowsTextAttributeForAnsiValue(oldValue, FOREGROUND_MASK_SET, ansi) + assertTrue(t, nil == err, "Should be no error") + // assert that other bits are not affected + if 0 != oldValue { + assertTrue(t, (actual&restMask) == (oldValue&restMask), "The operation should not have affected other bits actual=%X oldValue=%X ansi=%d", actual, oldValue, ansi) + } + return actual +} + +func TestBackgroundForAnsiValue(t *testing.T) { + // Check that nothing else changes + // background changes + for _, state1 := range allBackground { + for _, state2 := range allBackground { + flag := WORD(0) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + } + } + // cummulative bcakground changes + for _, state1 := range allBackground { + flag := WORD(0) + for _, state2 := range allBackground { + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + } + } + // change background after foreground + for _, state1 := range allForeground { + for _, state2 := range allBackground { + flag := WORD(0) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + } + } + // change background after change cumulative + for _, state1 := range allForeground { + flag := WORD(0) + for _, state2 := range allBackground { + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + } + } +} + +func TestForegroundForAnsiValue(t *testing.T) { + // Check that nothing else changes + for _, state1 := range allForeground { + for _, state2 := range allForeground { + flag := WORD(0) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + } + } + + for _, state1 := range allForeground { + flag := WORD(0) + for _, state2 := range allForeground { + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + } + } + for _, state1 := range allBackground { + for _, state2 := range allForeground { + flag := WORD(0) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + } + } + for _, state1 := range allBackground { + flag := WORD(0) + for _, state2 := range allForeground { + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) + flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) + } + } +} diff --git a/pkg/term/winconsole/term_emulator.go b/pkg/term/winconsole/term_emulator.go new file mode 100644 index 0000000000..8c9f34284d --- /dev/null +++ b/pkg/term/winconsole/term_emulator.go @@ -0,0 +1,218 @@ +package winconsole + +import ( + "io" + "strconv" + "strings" +) + +// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +const ( + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + ANSI_BEL = 0x07 + KEY_EVENT = 1 +) + +// Interface that implements terminal handling +type terminalEmulator interface { + HandleOutputCommand(fd uintptr, command []byte) (n int, err error) + HandleInputSequence(fd uintptr, command []byte) (n int, err error) + WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) + ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error) +} + +type terminalWriter struct { + wrappedWriter io.Writer + emulator terminalEmulator + command []byte + inSequence bool + fd uintptr +} + +type terminalReader struct { + wrappedReader io.ReadCloser + emulator terminalEmulator + command []byte + inSequence bool + fd uintptr +} + +// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +func isAnsiCommandChar(b byte) bool { + switch { + case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY: + return true + case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ANSI_CMD_STR_TERM || b == ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3) +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL) +} + +// Write writes len(p) bytes from p to the underlying data stream. +// http://golang.org/pkg/io/#Writer +func (tw *terminalWriter) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + if tw.emulator == nil { + return tw.wrappedWriter.Write(p) + } + // Emulate terminal by extracting commands and executing them + totalWritten := 0 + start := 0 // indicates start of the next chunk + end := len(p) + for current := 0; current < end; current++ { + if tw.inSequence { + // inside escape sequence + tw.command = append(tw.command, p[current]) + if isAnsiCommandChar(p[current]) { + if !isXtermOscSequence(tw.command, p[current]) { + // found the last command character. + // Now we have a complete command. + nchar, err := tw.emulator.HandleOutputCommand(tw.fd, tw.command) + totalWritten += nchar + if err != nil { + return totalWritten, err + } + + // clear the command + // don't include current character again + tw.command = tw.command[:0] + start = current + 1 + tw.inSequence = false + } + } + } else { + if p[current] == ANSI_ESCAPE_PRIMARY { + // entering escape sequnce + tw.inSequence = true + // indicates end of "normal sequence", write whatever you have so far + if len(p[start:current]) > 0 { + nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:current]) + totalWritten += nw + if err != nil { + return totalWritten, err + } + } + // include the current character as part of the next sequence + tw.command = append(tw.command, p[current]) + } + } + } + // note that so far, start of the escape sequence triggers writing out of bytes to console. + // For the part _after_ the end of last escape sequence, it is not written out yet. So write it out + if !tw.inSequence { + // assumption is that we can't be inside sequence and therefore command should be empty + if len(p[start:]) > 0 { + nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:]) + totalWritten += nw + if err != nil { + return totalWritten, err + } + } + } + return totalWritten, nil + +} + +// Read reads up to len(p) bytes into p. +// http://golang.org/pkg/io/#Reader +func (tr *terminalReader) Read(p []byte) (n int, err error) { + //Implementations of Read are discouraged from returning a zero byte count + // with a nil error, except when len(p) == 0. + if len(p) == 0 { + return 0, nil + } + if nil == tr.emulator { + return tr.readFromWrappedReader(p) + } + return tr.emulator.ReadChars(tr.fd, tr.wrappedReader, p) +} + +// Close the underlying stream +func (tr *terminalReader) Close() (err error) { + return tr.wrappedReader.Close() +} + +func (tr *terminalReader) readFromWrappedReader(p []byte) (n int, err error) { + return tr.wrappedReader.Read(p) +} + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func parseAnsiCommand(command []byte) *ansiCommand { + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + // last char is command character + lastCharIndex := len(command) - 1 + + retValue := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + retValue.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP) + } + return retValue +} + +func (c *ansiCommand) getParam(index int) string { + if len(c.Parameters) > index { + return c.Parameters[index] + } + return "" +} + +func parseInt16OrDefault(s string, defaultValue int16) (n int16, err error) { + if s == "" { + return defaultValue, nil + } + parsedValue, err := strconv.ParseInt(s, 10, 16) + if err != nil { + return defaultValue, err + } + return int16(parsedValue), nil +} diff --git a/pkg/term/winconsole/term_emulator_test.go b/pkg/term/winconsole/term_emulator_test.go new file mode 100644 index 0000000000..65de5a7933 --- /dev/null +++ b/pkg/term/winconsole/term_emulator_test.go @@ -0,0 +1,388 @@ +package winconsole + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "testing" +) + +const ( + WRITE_OPERATION = iota + COMMAND_OPERATION = iota +) + +var languages = []string{ + "Български", + "Català", + "Čeština", + "Ελληνικά", + "Español", + "Esperanto", + "Euskara", + "Français", + "Galego", + "한국어", + "ქართული", + "Latviešu", + "Lietuvių", + "Magyar", + "Nederlands", + "日本語", + "Norsk bokmål", + "Norsk nynorsk", + "Polski", + "Português", + "Română", + "Русский", + "Slovenčina", + "Slovenščina", + "Српски", + "српскохрватски", + "Suomi", + "Svenska", + "ไทย", + "Tiếng Việt", + "Türkçe", + "Українська", + "中文", +} + +// Mock terminal handler object +type mockTerminal struct { + OutputCommandSequence []terminalOperation +} + +// Used for recording the callback data +type terminalOperation struct { + Operation int + Data []byte + Str string +} + +func (mt *mockTerminal) record(operation int, data []byte) { + op := terminalOperation{ + Operation: operation, + Data: make([]byte, len(data)), + } + copy(op.Data, data) + op.Str = string(op.Data) + mt.OutputCommandSequence = append(mt.OutputCommandSequence, op) +} + +func (mt *mockTerminal) HandleOutputCommand(fd uintptr, command []byte) (n int, err error) { + mt.record(COMMAND_OPERATION, command) + return len(command), nil +} + +func (mt *mockTerminal) HandleInputSequence(fd uintptr, command []byte) (n int, err error) { + return 0, nil +} + +func (mt *mockTerminal) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) { + mt.record(WRITE_OPERATION, p) + return len(p), nil +} + +func (mt *mockTerminal) ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error) { + return len(p), nil +} + +func assertTrue(t *testing.T, cond bool, format string, args ...interface{}) { + if !cond { + t.Errorf(format, args...) + } +} + +// reflect.DeepEqual does not provide detailed information as to what excatly failed. +func assertBytesEqual(t *testing.T, expected, actual []byte, format string, args ...interface{}) { + match := true + mismatchIndex := 0 + if len(expected) == len(actual) { + for i := 0; i < len(expected); i++ { + if expected[i] != actual[i] { + match = false + mismatchIndex = i + break + } + } + } else { + match = false + t.Errorf("Lengths don't match Expected=%d Actual=%d", len(expected), len(actual)) + } + if !match { + t.Errorf("Mismatch at index %d ", mismatchIndex) + t.Errorf("\tActual String = %s", string(actual)) + t.Errorf("\tExpected String = %s", string(expected)) + t.Errorf("\tActual = %v", actual) + t.Errorf("\tExpected = %v", expected) + t.Errorf(format, args) + } +} + +// Just to make sure :) +func TestAssertEqualBytes(t *testing.T) { + data := []byte{9, 9, 1, 1, 1, 9, 9} + assertBytesEqual(t, data, data, "Self") + assertBytesEqual(t, data[1:4], data[1:4], "Self") + assertBytesEqual(t, []byte{1, 1}, []byte{1, 1}, "Simple match") + assertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 2, 3}, "content mismatch") + assertBytesEqual(t, []byte{1, 1, 1}, data[2:5], "slice match") +} + +/* +func TestAssertEqualBytesNegative(t *testing.T) { + AssertBytesEqual(t, []byte{1, 1}, []byte{1}, "Length mismatch") + AssertBytesEqual(t, []byte{1, 1}, []byte{1}, "Length mismatch") + AssertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 1, 1}, "content mismatch") +}*/ + +// Checks that the calls recieved +func assertHandlerOutput(t *testing.T, mock *mockTerminal, plainText string, commands ...string) { + text := make([]byte, 0, 3*len(plainText)) + cmdIndex := 0 + for opIndex := 0; opIndex < len(mock.OutputCommandSequence); opIndex++ { + op := mock.OutputCommandSequence[opIndex] + if op.Operation == WRITE_OPERATION { + t.Logf("\nThe data is[%d] == %s", opIndex, string(op.Data)) + text = append(text[:], op.Data...) + } else { + assertTrue(t, mock.OutputCommandSequence[opIndex].Operation == COMMAND_OPERATION, "Operation should be command : %s", fmt.Sprintf("%+v", mock)) + assertBytesEqual(t, StringToBytes(commands[cmdIndex]), mock.OutputCommandSequence[opIndex].Data, "Command data should match") + cmdIndex++ + } + } + assertBytesEqual(t, StringToBytes(plainText), text, "Command data should match %#v", mock) +} + +func StringToBytes(str string) []byte { + bytes := make([]byte, len(str)) + copy(bytes[:], str) + return bytes +} + +func TestParseAnsiCommand(t *testing.T) { + // Note: if the parameter does not exist then the empty value is returned + + c := parseAnsiCommand(StringToBytes("\x1Bm")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "" == c.getParam(0), "should return empty string") + assertTrue(t, "" == c.getParam(1), "should return empty string") + + // Escape sequence - ESC[ + c = parseAnsiCommand(StringToBytes("\x1B[m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "" == c.getParam(0), "should return empty string") + assertTrue(t, "" == c.getParam(1), "should return empty string") + + // Escape sequence With empty parameters- ESC[ + c = parseAnsiCommand(StringToBytes("\x1B[;m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "" == c.getParam(0), "should return empty string") + assertTrue(t, "" == c.getParam(1), "should return empty string") + assertTrue(t, "" == c.getParam(2), "should return empty string") + + // Escape sequence With empty muliple parameters- ESC[ + c = parseAnsiCommand(StringToBytes("\x1B[;;m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "" == c.getParam(0), "") + assertTrue(t, "" == c.getParam(1), "") + assertTrue(t, "" == c.getParam(2), "") + + // Escape sequence With muliple parameters- ESC[ + c = parseAnsiCommand(StringToBytes("\x1B[1;2;3m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "1" == c.getParam(0), "") + assertTrue(t, "2" == c.getParam(1), "") + assertTrue(t, "3" == c.getParam(2), "") + + // Escape sequence With muliple parameters- some missing + c = parseAnsiCommand(StringToBytes("\x1B[1;;3;;;6m")) + assertTrue(t, c.Command == "m", "Command should be m") + assertTrue(t, "1" == c.getParam(0), "") + assertTrue(t, "" == c.getParam(1), "") + assertTrue(t, "3" == c.getParam(2), "") + assertTrue(t, "" == c.getParam(3), "") + assertTrue(t, "" == c.getParam(4), "") + assertTrue(t, "6" == c.getParam(5), "") +} + +func newBufferedMockTerm() (stdOut io.Writer, stdErr io.Writer, stdIn io.ReadCloser, mock *mockTerminal) { + var input bytes.Buffer + var output bytes.Buffer + var err bytes.Buffer + + mock = &mockTerminal{ + OutputCommandSequence: make([]terminalOperation, 0, 256), + } + + stdOut = &terminalWriter{ + wrappedWriter: &output, + emulator: mock, + command: make([]byte, 0, 256), + } + stdErr = &terminalWriter{ + wrappedWriter: &err, + emulator: mock, + command: make([]byte, 0, 256), + } + stdIn = &terminalReader{ + wrappedReader: ioutil.NopCloser(&input), + emulator: mock, + command: make([]byte, 0, 256), + } + + return +} + +func TestOutputSimple(t *testing.T) { + stdOut, _, _, mock := newBufferedMockTerm() + + stdOut.Write(StringToBytes("Hello world")) + stdOut.Write(StringToBytes("\x1BmHello again")) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) + assertBytesEqual(t, StringToBytes("\x1Bm"), mock.OutputCommandSequence[1].Data, "Command data should match") + + assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[2].Data, "Write data should match") +} + +func TestOutputSplitCommand(t *testing.T) { + stdOut, _, _, mock := newBufferedMockTerm() + + stdOut.Write(StringToBytes("Hello world\x1B[1;2;3")) + stdOut.Write(StringToBytes("mHello again")) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) + assertBytesEqual(t, StringToBytes("\x1B[1;2;3m"), mock.OutputCommandSequence[1].Data, "Command data should match") + + assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[2].Data, "Write data should match") +} + +func TestOutputMultipleCommands(t *testing.T) { + stdOut, _, _, mock := newBufferedMockTerm() + + stdOut.Write(StringToBytes("Hello world")) + stdOut.Write(StringToBytes("\x1B[1;2;3m")) + stdOut.Write(StringToBytes("\x1B[J")) + stdOut.Write(StringToBytes("Hello again")) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) + assertBytesEqual(t, StringToBytes("\x1B[1;2;3m"), mock.OutputCommandSequence[1].Data, "Command data should match") + + assertTrue(t, mock.OutputCommandSequence[2].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) + assertBytesEqual(t, StringToBytes("\x1B[J"), mock.OutputCommandSequence[2].Data, "Command data should match") + + assertTrue(t, mock.OutputCommandSequence[3].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[3].Data, "Write data should match") +} + +// Splits the given data in two chunks , makes two writes and checks the split data is parsed correctly +// checks output write/command is passed to handler correctly +func helpsTestOutputSplitChunksAtIndex(t *testing.T, i int, data []byte) { + t.Logf("\ni=%d", i) + stdOut, _, _, mock := newBufferedMockTerm() + + t.Logf("\nWriting chunk[0] == %s", string(data[:i])) + t.Logf("\nWriting chunk[1] == %s", string(data[i:])) + stdOut.Write(data[:i]) + stdOut.Write(data[i:]) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[i:], mock.OutputCommandSequence[1].Data, "Write data should match") +} + +// Splits the given data in three chunks , makes three writes and checks the split data is parsed correctly +// checks output write/command is passed to handler correctly +func helpsTestOutputSplitThreeChunksAtIndex(t *testing.T, data []byte, i int, j int) { + stdOut, _, _, mock := newBufferedMockTerm() + + t.Logf("\nWriting chunk[0] == %s", string(data[:i])) + t.Logf("\nWriting chunk[1] == %s", string(data[i:j])) + t.Logf("\nWriting chunk[2] == %s", string(data[j:])) + stdOut.Write(data[:i]) + stdOut.Write(data[i:j]) + stdOut.Write(data[j:]) + + assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[i:j], mock.OutputCommandSequence[1].Data, "Write data should match") + + assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) + assertBytesEqual(t, data[j:], mock.OutputCommandSequence[2].Data, "Write data should match") +} + +// Splits the output into two parts and tests all such possible pairs +func helpsTestOutputSplitChunks(t *testing.T, data []byte) { + for i := 1; i < len(data)-1; i++ { + helpsTestOutputSplitChunksAtIndex(t, i, data) + } +} + +// Splits the output in three parts and tests all such possible triples +func helpsTestOutputSplitThreeChunks(t *testing.T, data []byte) { + for i := 1; i < len(data)-2; i++ { + for j := i + 1; j < len(data)-1; j++ { + helpsTestOutputSplitThreeChunksAtIndex(t, data, i, j) + } + } +} + +func helpsTestOutputSplitCommandsAtIndex(t *testing.T, data []byte, i int, plainText string, commands ...string) { + t.Logf("\ni=%d", i) + stdOut, _, _, mock := newBufferedMockTerm() + + stdOut.Write(data[:i]) + stdOut.Write(data[i:]) + assertHandlerOutput(t, mock, plainText, commands...) +} + +func helpsTestOutputSplitCommands(t *testing.T, data []byte, plainText string, commands ...string) { + for i := 1; i < len(data)-1; i++ { + helpsTestOutputSplitCommandsAtIndex(t, data, i, plainText, commands...) + } +} + +func injectCommandAt(data string, i int, command string) string { + retValue := make([]byte, len(data)+len(command)+4) + retValue = append(retValue, data[:i]...) + retValue = append(retValue, data[i:]...) + return string(retValue) +} + +func TestOutputSplitChunks(t *testing.T) { + data := StringToBytes("qwertyuiopasdfghjklzxcvbnm") + helpsTestOutputSplitChunks(t, data) + helpsTestOutputSplitChunks(t, StringToBytes("BBBBB")) + helpsTestOutputSplitThreeChunks(t, StringToBytes("ABCDE")) +} + +func TestOutputSplitChunksIncludingCommands(t *testing.T) { + helpsTestOutputSplitCommands(t, StringToBytes("Hello world.\x1B[mHello again."), "Hello world.Hello again.", "\x1B[m") + helpsTestOutputSplitCommandsAtIndex(t, StringToBytes("Hello world.\x1B[mHello again."), 2, "Hello world.Hello again.", "\x1B[m") +} + +func TestSplitChunkUnicode(t *testing.T) { + for _, l := range languages { + data := StringToBytes(l) + helpsTestOutputSplitChunks(t, data) + helpsTestOutputSplitThreeChunks(t, data) + } +} diff --git a/pkg/testutils/README.md b/pkg/testutils/README.md deleted file mode 100644 index a208a90e68..0000000000 --- a/pkg/testutils/README.md +++ /dev/null @@ -1,2 +0,0 @@ -`testutils` is a collection of utility functions to facilitate the writing -of tests. It is used in various places by the Docker test suite. diff --git a/pkg/testutils/utils.go b/pkg/testutils/utils.go deleted file mode 100644 index 9c664ff253..0000000000 --- a/pkg/testutils/utils.go +++ /dev/null @@ -1,37 +0,0 @@ -package testutils - -import ( - "math/rand" - "testing" - "time" -) - -const chars = "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " - -// Timeout calls f and waits for 100ms for it to complete. -// If it doesn't, it causes the tests to fail. -// t must be a valid testing context. -func Timeout(t *testing.T, f func()) { - onTimeout := time.After(100 * time.Millisecond) - onDone := make(chan bool) - go func() { - f() - close(onDone) - }() - select { - case <-onTimeout: - t.Fatalf("timeout") - case <-onDone: - } -} - -// RandomString returns random string of specified length -func RandomString(length int) string { - res := make([]byte, length) - for i := 0; i < length; i++ { - res[i] = chars[rand.Intn(len(chars))] - } - return string(res) -} diff --git a/pkg/version/version.go b/pkg/version/version.go index cc802a654c..bd5ec7a835 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -37,7 +37,7 @@ func (v Version) compareTo(other Version) int { return 0 } -// LessThan checks if a version is less than another version +// LessThan checks if a version is less than another func (v Version) LessThan(other Version) bool { return v.compareTo(other) == -1 } @@ -47,12 +47,12 @@ func (v Version) LessThanOrEqualTo(other Version) bool { return v.compareTo(other) <= 0 } -// GreaterThan checks if a version is greater than another one +// GreaterThan checks if a version is greater than another func (v Version) GreaterThan(other Version) bool { return v.compareTo(other) == 1 } -// GreaterThanOrEqualTo checks ia version is greater than or equal to another +// GreaterThanOrEqualTo checks if a version is greater than or equal to another func (v Version) GreaterThanOrEqualTo(other Version) bool { return v.compareTo(other) >= 0 } diff --git a/project/ISSUE-TRIAGE.md b/project/ISSUE-TRIAGE.md index bee7a89827..528dea9c60 100644 --- a/project/ISSUE-TRIAGE.md +++ b/project/ISSUE-TRIAGE.md @@ -54,30 +54,51 @@ that the user can easily script and know the reason why the command failed. ### Step 3: Classify the Issue Classifications help both to inform readers about an issue's priority and how to resolve it. -This is also helpful for identifying new, critical issues. Classifications types are -applied to the issue or pull request using labels. +This is also helpful for identifying new, critical issues. "Kinds of" are +applied to the issue or pull request using labels. You can apply one or more labels. -Types of classification: +Kinds of classifications: -| Type | Description | -|-------------|---------------------------------------------------------------------------------------------------------------------------------| -| improvement | improvements are not bugs or new features but can drastically improve usability. | -| regression | regressions are usually easy fixes as hopefully the action worked previously and git history can be used to propose a solution. | -| bug | bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | -| feature | features are new and shinny. They are things that the project does not currently support. | +| Kind | Description | +|------------------|---------------------------------------------------------------------------------------------------------------------------------| +| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | +| kind/cleanup | Refactoring code or otherwise clarifying documentation. | +| kind/content | Content that is not documentation such as help or error messages. | +| kind/graphics | Work involving graphics skill | +| kind/regression | Regressions are usually easy fixes as hopefully the action worked previously and git history can be used to propose a solution. | +| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | +| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shinny. | +| kind/question | Contains a user or contributor question requiring a response. | +| kind/usecase | A description of a user or contributor situation requiring a response perhaps in code or documentation. | +| kind/writing | Writing documentation, man pages, articles, blogs, or other significant word-driven task. | +| kind/test | Tests or test infrastructure needs adding or updating. | -### Step 4: Estimate the Difficulty -Difficulty is a way for a contributor to find an issue based on their skill set. Difficulty types are -applied to the issue or pull request using labels. +Contributors can add labels by using a `+kind/bug` in an issue or pull request comment. -Difficulty +### Step 4: Estimate the experience level required + +Experience level is a way for a contributor to find an issue based on their +skill set. Experience types are applied to the issue or pull request using +labels. + +| Level | Experience level guideline | +|------------------|--------------------------------------------------------------------------------------------------------------------------| +| exp/beginner | You have made less than 10 contributions in your life time to any open source project. | +| exp/novice | You have made more than 10 contributions to an open source project or at least 5 contributions to Docker. | +| exp/proficient | You have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines. | +| exp/expert | You have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines. | +| exp/master | You have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines. | + +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert or exp/master level task. + +Contributors can add labels by using a `+exp/expert` format in issue comment. -| Type | Description | -|--------------|-----------------------------------------------------------------------------| -| white-belt | Simple, non-time consuming issue, easy first task to accomplish | -| black-belt | Expert at the subject matter or someone who likes pain | And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. diff --git a/project/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md index d9382b901c..10af71c81d 100644 --- a/project/RELEASE-CHECKLIST.md +++ b/project/RELEASE-CHECKLIST.md @@ -364,7 +364,17 @@ echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER: Again, get two maintainers to validate, then merge, then push that pretty blue button to delete your branch. -### 13. Rejoice and Evangelize! +### 13. Update the API docs and VERSION files + +Now that version X.Y.Z is out, time to start working on the next! Update the +content of the `VERSION` file to be the next minor (incrementing Y) and add the +`-dev` suffix. For example, after 1.5.0 release, the `VERSION` file gets +updated to `1.6.0-dev` (as in "1.6.0 in the making"). + +Also create a new entry in `docs/sources/reference/api/` by copying the latest +and bumping the version number (in both the file's name and content). + +### 14. Rejoice and Evangelize! Congratulations! You're done. diff --git a/registry/session.go b/registry/session.go index 470aeab4cb..82338252eb 100644 --- a/registry/session.go +++ b/registry/session.go @@ -280,7 +280,9 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. - if res.StatusCode != 200 { + if res.StatusCode == 404 { + return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { log.Debugf("Error reading response body: %s", err) diff --git a/registry/session_v2.go b/registry/session_v2.go index c5bee11bc6..22f39317be 100644 --- a/registry/session_v2.go +++ b/registry/session_v2.go @@ -1,13 +1,16 @@ package registry import ( + "bytes" "encoding/json" "fmt" "io" "io/ioutil" + "net/http" "strconv" log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" "github.com/docker/docker/registry/v2" "github.com/docker/docker/utils" ) @@ -95,11 +98,12 @@ func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, au return nil, "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res) } - buf, err := ioutil.ReadAll(res.Body) + manifestBytes, err := ioutil.ReadAll(res.Body) if err != nil { return nil, "", fmt.Errorf("Error while reading the http response: %s", err) } - return buf, res.Header.Get(DockerDigestHeader), nil + + return manifestBytes, res.Header.Get(DockerDigestHeader), nil } // - Succeeded to head image blob (already exists) @@ -209,29 +213,14 @@ func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum str // 'layer' is an uncompressed reader of the blob to be pushed. // The server will generate it's own checksum calculation. func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string, blobRdr io.Reader, auth *RequestAuthorization) error { - routeURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName) + location, err := r.initiateBlobUpload(ep, imageName, auth) if err != nil { return err } - log.Debugf("[registry] Calling %q %s", "POST", routeURL) - req, err := r.reqFactory.NewRequest("POST", routeURL, nil) - if err != nil { - return err - } - - if err := auth.Authorize(req); err != nil { - return err - } - res, _, err := r.doRequest(req) - if err != nil { - return err - } - location := res.Header.Get("Location") - method := "PUT" log.Debugf("[registry] Calling %q %s", method, location) - req, err = r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr)) + req, err := r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr)) if err != nil { return err } @@ -241,7 +230,7 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string if err := auth.Authorize(req); err != nil { return err } - res, _, err = r.doRequest(req) + res, _, err := r.doRequest(req) if err != nil { return err } @@ -262,8 +251,53 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string return nil } +// initiateBlobUpload gets the blob upload location for the given image name. +func (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *RequestAuthorization) (location string, err error) { + routeURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName) + if err != nil { + return "", err + } + + log.Debugf("[registry] Calling %q %s", "POST", routeURL) + req, err := r.reqFactory.NewRequest("POST", routeURL, nil) + if err != nil { + return "", err + } + + if err := auth.Authorize(req); err != nil { + return "", err + } + res, _, err := r.doRequest(req) + if err != nil { + return "", err + } + + if res.StatusCode != http.StatusAccepted { + if res.StatusCode == http.StatusUnauthorized { + return "", errLoginRequired + } + if res.StatusCode == http.StatusNotFound { + return "", ErrDoesNotExist + } + + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", err + } + + log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) + return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res) + } + + if location = res.Header.Get("Location"); location == "" { + return "", fmt.Errorf("registry did not return a Location header for resumable blob upload for image %s", imageName) + } + + return +} + // Finally Push the (signed) manifest of the blobs we've just pushed -func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, manifestRdr io.Reader, auth *RequestAuthorization) (string, error) { +func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, signedManifest, rawManifest []byte, auth *RequestAuthorization) (digest.Digest, error) { routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName) if err != nil { return "", err @@ -271,7 +305,7 @@ func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, ma method := "PUT" log.Debugf("[registry] Calling %q %s", method, routeURL) - req, err := r.reqFactory.NewRequest(method, routeURL, manifestRdr) + req, err := r.reqFactory.NewRequest(method, routeURL, bytes.NewReader(signedManifest)) if err != nil { return "", err } @@ -297,12 +331,29 @@ func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, ma return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) } - return res.Header.Get(DockerDigestHeader), nil + hdrDigest, err := digest.ParseDigest(res.Header.Get(DockerDigestHeader)) + if err != nil { + return "", fmt.Errorf("invalid manifest digest from registry: %s", err) + } + + dgstVerifier, err := digest.NewDigestVerifier(hdrDigest) + if err != nil { + return "", fmt.Errorf("invalid manifest digest from registry: %s", err) + } + + dgstVerifier.Write(rawManifest) + + if !dgstVerifier.Verified() { + computedDigest, _ := digest.FromBytes(rawManifest) + return "", fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", hdrDigest, computedDigest) + } + + return hdrDigest, nil } type remoteTags struct { - name string - tags []string + Name string `json:"name"` + Tags []string `json:"tags"` } // Given a repository name, returns a json array of string tags @@ -342,5 +393,5 @@ func (r *Session) GetV2RemoteTags(ep *Endpoint, imageName string, auth *RequestA if err != nil { return nil, fmt.Errorf("Error while decoding the http response: %s", err) } - return remote.tags, nil + return remote.Tags, nil } diff --git a/runconfig/compare.go b/runconfig/compare.go index 5c1bf46575..60a21a79c0 100644 --- a/runconfig/compare.go +++ b/runconfig/compare.go @@ -19,6 +19,7 @@ func Compare(a, b *Config) bool { } if len(a.Cmd) != len(b.Cmd) || len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || len(a.Entrypoint) != len(b.Entrypoint) || @@ -36,6 +37,11 @@ func Compare(a, b *Config) bool { return false } } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } for i := 0; i < len(a.PortSpecs); i++ { if a.PortSpecs[i] != b.PortSpecs[i] { return false diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md index e755e7c180..512f26e5ea 100644 --- a/vendor/src/github.com/Sirupsen/logrus/README.md +++ b/vendor/src/github.com/Sirupsen/logrus/README.md @@ -82,7 +82,7 @@ func init() { // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(&logrus_airbrake.AirbrakeHook{}) + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) // Output to stderr instead of stdout, could also be a file. log.SetOutput(os.Stderr) @@ -164,43 +164,8 @@ You can add hooks for logging levels. For example to send errors to an exception tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to multiple places simultaneously, e.g. syslog. -```go -// Not the real implementation of the Airbrake hook. Just a simple sample. -import ( - log "github.com/Sirupsen/logrus" -) - -func init() { - log.AddHook(new(AirbrakeHook)) -} - -type AirbrakeHook struct{} - -// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains -// the fields for the entry. See the Fields section of the README. -func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { - err := airbrake.Notify(entry.Data["error"].(error)) - if err != nil { - log.WithFields(log.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Info("Failed to send error to Airbrake") - } - - return nil -} - -// `Levels()` returns a slice of `Levels` the hook is fired for. -func (hook *AirbrakeHook) Levels() []log.Level { - return []log.Level{ - log.ErrorLevel, - log.FatalLevel, - log.PanicLevel, - } -} -``` - -Logrus comes with built-in hooks. Add those, or your custom hook, in `init`: +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: ```go import ( @@ -211,7 +176,7 @@ import ( ) func init() { - log.AddHook(new(logrus_airbrake.AirbrakeHook)) + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { @@ -233,6 +198,9 @@ func init() { Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. +* [`github.com/Sirupsen/logrus/hooks/bugsnag`](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) + Send errors to the Bugsnag exception tracking service. + * [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus) Send errors to a channel in hipchat. @@ -321,6 +289,11 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true` * `logrus.JSONFormatter`. Logs fields as JSON. +* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). + + ```go + logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) + ``` Third party logging formatters: diff --git a/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go index 42e7a4c982..cb5759a35c 100644 --- a/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go +++ b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -3,21 +3,16 @@ package main import ( "github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus/hooks/airbrake" - "github.com/tobi/airbrake-go" ) var log = logrus.New() func init() { log.Formatter = new(logrus.TextFormatter) // default - log.Hooks.Add(new(logrus_airbrake.AirbrakeHook)) + log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development")) } func main() { - airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml" - airbrake.ApiKey = "whatever" - airbrake.Environment = "production" - log.WithFields(logrus.Fields{ "animal": "walrus", "size": 10, diff --git a/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go new file mode 100644 index 0000000000..34b1ccbca6 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go @@ -0,0 +1,48 @@ +package logstash + +import ( + "encoding/json" + "fmt" + "github.com/Sirupsen/logrus" + "time" +) + +// Formatter generates json in logstash format. +// Logstash site: http://logstash.net/ +type LogstashFormatter struct { + Type string // if not empty use for logstash type field. +} + +func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { + entry.Data["@version"] = 1 + entry.Data["@timestamp"] = entry.Time.Format(time.RFC3339) + + // set message field + v, ok := entry.Data["message"] + if ok { + entry.Data["fields.message"] = v + } + entry.Data["message"] = entry.Message + + // set level field + v, ok = entry.Data["level"] + if ok { + entry.Data["fields.level"] = v + } + entry.Data["level"] = entry.Level.String() + + // set type field + if f.Type != "" { + v, ok = entry.Data["type"] + if ok { + entry.Data["fields.type"] = v + } + entry.Data["type"] = f.Type + } + + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go new file mode 100644 index 0000000000..d8814a0eae --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go @@ -0,0 +1,52 @@ +package logstash + +import ( + "bytes" + "encoding/json" + "github.com/Sirupsen/logrus" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestLogstashFormatter(t *testing.T) { + assert := assert.New(t) + + lf := LogstashFormatter{Type: "abc"} + + fields := logrus.Fields{ + "message": "def", + "level": "ijk", + "type": "lmn", + "one": 1, + "pi": 3.14, + "bool": true, + } + + entry := logrus.WithFields(fields) + entry.Message = "msg" + entry.Level = logrus.InfoLevel + + b, _ := lf.Format(entry) + + var data map[string]interface{} + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + dec.Decode(&data) + + // base fields + assert.Equal(json.Number("1"), data["@version"]) + assert.NotEmpty(data["@timestamp"]) + assert.Equal("abc", data["type"]) + assert.Equal("msg", data["message"]) + assert.Equal("info", data["level"]) + + // substituted fields + assert.Equal("def", data["fields.message"]) + assert.Equal("ijk", data["fields.level"]) + assert.Equal("lmn", data["fields.type"]) + + // formats + assert.Equal(json.Number("1"), data["one"]) + assert.Equal(json.Number("3.14"), data["pi"]) + assert.Equal(true, data["bool"]) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go index 75f4db1513..b0502c335a 100644 --- a/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go @@ -1,51 +1,51 @@ -package logrus_airbrake +package airbrake import ( + "errors" + "fmt" + "github.com/Sirupsen/logrus" "github.com/tobi/airbrake-go" ) // AirbrakeHook to send exceptions to an exception-tracking service compatible -// with the Airbrake API. You must set: -// * airbrake.Endpoint -// * airbrake.ApiKey -// * airbrake.Environment -// -// Before using this hook, to send an error. Entries that trigger an Error, -// Fatal or Panic should now include an "error" field to send to Airbrake. -type AirbrakeHook struct{} +// with the Airbrake API. +type airbrakeHook struct { + APIKey string + Endpoint string + Environment string +} -func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { - if entry.Data["error"] == nil { - entry.Logger.WithFields(logrus.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Warn("Exceptions sent to Airbrake must have an 'error' key with the error") - return nil +func NewHook(endpoint, apiKey, env string) *airbrakeHook { + return &airbrakeHook{ + APIKey: apiKey, + Endpoint: endpoint, + Environment: env, } +} +func (hook *airbrakeHook) Fire(entry *logrus.Entry) error { + airbrake.ApiKey = hook.APIKey + airbrake.Endpoint = hook.Endpoint + airbrake.Environment = hook.Environment + + var notifyErr error err, ok := entry.Data["error"].(error) - if !ok { - entry.Logger.WithFields(logrus.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`") - return nil + if ok { + notifyErr = err + } else { + notifyErr = errors.New(entry.Message) } - airErr := airbrake.Notify(err) + airErr := airbrake.Notify(notifyErr) if airErr != nil { - entry.Logger.WithFields(logrus.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - "error": airErr, - }).Warn("Failed to send error to Airbrake") + return fmt.Errorf("Failed to send error to Airbrake: %s", airErr) } return nil } -func (hook *AirbrakeHook) Levels() []logrus.Level { +func (hook *airbrakeHook) Levels() []logrus.Level { return []logrus.Level{ logrus.ErrorLevel, logrus.FatalLevel, diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go new file mode 100644 index 0000000000..058a91e343 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go @@ -0,0 +1,133 @@ +package airbrake + +import ( + "encoding/xml" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Sirupsen/logrus" +) + +type notice struct { + Error NoticeError `xml:"error"` +} +type NoticeError struct { + Class string `xml:"class"` + Message string `xml:"message"` +} + +type customErr struct { + msg string +} + +func (e *customErr) Error() string { + return e.msg +} + +const ( + testAPIKey = "abcxyz" + testEnv = "development" + expectedClass = "*airbrake.customErr" + expectedMsg = "foo" + unintendedMsg = "Airbrake will not see this string" +) + +var ( + noticeError = make(chan NoticeError, 1) +) + +// TestLogEntryMessageReceived checks if invoking Logrus' log.Error +// method causes an XML payload containing the log entry message is received +// by a HTTP server emulating an Airbrake-compatible endpoint. +func TestLogEntryMessageReceived(t *testing.T) { + log := logrus.New() + ts := startAirbrakeServer(t) + defer ts.Close() + + hook := NewHook(ts.URL, testAPIKey, "production") + log.Hooks.Add(hook) + + log.Error(expectedMsg) + + select { + case received := <-noticeError: + if received.Message != expectedMsg { + t.Errorf("Unexpected message received: %s", received.Message) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Airbrake API") + } +} + +// TestLogEntryMessageReceived confirms that, when passing an error type using +// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the +// error message returned by the Error() method on the error interface +// rather than the logrus.Entry.Message string. +func TestLogEntryWithErrorReceived(t *testing.T) { + log := logrus.New() + ts := startAirbrakeServer(t) + defer ts.Close() + + hook := NewHook(ts.URL, testAPIKey, "production") + log.Hooks.Add(hook) + + log.WithFields(logrus.Fields{ + "error": &customErr{expectedMsg}, + }).Error(unintendedMsg) + + select { + case received := <-noticeError: + if received.Message != expectedMsg { + t.Errorf("Unexpected message received: %s", received.Message) + } + if received.Class != expectedClass { + t.Errorf("Unexpected error class: %s", received.Class) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Airbrake API") + } +} + +// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a +// non-error type using logrus.Fields, a HTTP server emulating an Airbrake +// endpoint receives the logrus.Entry.Message string. +// +// Only error types are supported when setting the 'error' field using +// logrus.WithFields(). +func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) { + log := logrus.New() + ts := startAirbrakeServer(t) + defer ts.Close() + + hook := NewHook(ts.URL, testAPIKey, "production") + log.Hooks.Add(hook) + + log.WithFields(logrus.Fields{ + "error": expectedMsg, + }).Error(unintendedMsg) + + select { + case received := <-noticeError: + if received.Message != unintendedMsg { + t.Errorf("Unexpected message received: %s", received.Message) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Airbrake API") + } +} + +func startAirbrakeServer(t *testing.T) *httptest.Server { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var notice notice + if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil { + t.Error(err) + } + r.Body.Close() + + noticeError <- notice.Error + })) + + return ts +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go new file mode 100644 index 0000000000..d20a0f54ab --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go @@ -0,0 +1,68 @@ +package logrus_bugsnag + +import ( + "errors" + + "github.com/Sirupsen/logrus" + "github.com/bugsnag/bugsnag-go" +) + +type bugsnagHook struct{} + +// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before +// bugsnag.Configure. Bugsnag must be configured before the hook. +var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook") + +// ErrBugsnagSendFailed indicates that the hook failed to submit an error to +// bugsnag. The error was successfully generated, but `bugsnag.Notify()` +// failed. +type ErrBugsnagSendFailed struct { + err error +} + +func (e ErrBugsnagSendFailed) Error() string { + return "failed to send error to Bugsnag: " + e.err.Error() +} + +// NewBugsnagHook initializes a logrus hook which sends exceptions to an +// exception-tracking service compatible with the Bugsnag API. Before using +// this hook, you must call bugsnag.Configure(). The returned object should be +// registered with a log via `AddHook()` +// +// Entries that trigger an Error, Fatal or Panic should now include an "error" +// field to send to Bugsnag. +func NewBugsnagHook() (*bugsnagHook, error) { + if bugsnag.Config.APIKey == "" { + return nil, ErrBugsnagUnconfigured + } + return &bugsnagHook{}, nil +} + +// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the +// "error" field (or the Message if the error isn't present) and sends it off. +func (hook *bugsnagHook) Fire(entry *logrus.Entry) error { + var notifyErr error + err, ok := entry.Data["error"].(error) + if ok { + notifyErr = err + } else { + notifyErr = errors.New(entry.Message) + } + + bugsnagErr := bugsnag.Notify(notifyErr) + if bugsnagErr != nil { + return ErrBugsnagSendFailed{bugsnagErr} + } + + return nil +} + +// Levels enumerates the log levels on which the error should be forwarded to +// bugsnag: everything at or above the "Error" level. +func (hook *bugsnagHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go new file mode 100644 index 0000000000..e9ea298d89 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go @@ -0,0 +1,64 @@ +package logrus_bugsnag + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Sirupsen/logrus" + "github.com/bugsnag/bugsnag-go" +) + +type notice struct { + Events []struct { + Exceptions []struct { + Message string `json:"message"` + } `json:"exceptions"` + } `json:"events"` +} + +func TestNoticeReceived(t *testing.T) { + msg := make(chan string, 1) + expectedMsg := "foo" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var notice notice + data, _ := ioutil.ReadAll(r.Body) + if err := json.Unmarshal(data, ¬ice); err != nil { + t.Error(err) + } + _ = r.Body.Close() + + msg <- notice.Events[0].Exceptions[0].Message + })) + defer ts.Close() + + hook := &bugsnagHook{} + + bugsnag.Configure(bugsnag.Configuration{ + Endpoint: ts.URL, + ReleaseStage: "production", + APIKey: "12345678901234567890123456789012", + Synchronous: true, + }) + + log := logrus.New() + log.Hooks.Add(hook) + + log.WithFields(logrus.Fields{ + "error": errors.New(expectedMsg), + }).Error("Bugsnag will not see this string") + + select { + case received := <-msg: + if received != expectedMsg { + t.Errorf("Unexpected message received: %s", received) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Bugsnag API") + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go index 0e38a61919..5c4c44bbe5 100644 --- a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go +++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go @@ -11,11 +11,12 @@ type JSONFormatter struct{} func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+3) for k, v := range entry.Data { - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - if err, ok := v.(error); ok { - data[k] = err.Error() - } else { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: data[k] = v } } diff --git a/vendor/src/github.com/Sirupsen/logrus/logger.go b/vendor/src/github.com/Sirupsen/logrus/logger.go index b392e547a7..da928a3750 100644 --- a/vendor/src/github.com/Sirupsen/logrus/logger.go +++ b/vendor/src/github.com/Sirupsen/logrus/logger.go @@ -65,11 +65,15 @@ func (logger *Logger) WithFields(fields Fields) *Entry { } func (logger *Logger) Debugf(format string, args ...interface{}) { - NewEntry(logger).Debugf(format, args...) + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } } func (logger *Logger) Infof(format string, args ...interface{}) { - NewEntry(logger).Infof(format, args...) + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } } func (logger *Logger) Printf(format string, args ...interface{}) { @@ -77,31 +81,45 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } } func (logger *Logger) Warningf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } } func (logger *Logger) Errorf(format string, args ...interface{}) { - NewEntry(logger).Errorf(format, args...) + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } } func (logger *Logger) Fatalf(format string, args ...interface{}) { - NewEntry(logger).Fatalf(format, args...) + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } } func (logger *Logger) Panicf(format string, args ...interface{}) { - NewEntry(logger).Panicf(format, args...) + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } } func (logger *Logger) Debug(args ...interface{}) { - NewEntry(logger).Debug(args...) + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } } func (logger *Logger) Info(args ...interface{}) { - NewEntry(logger).Info(args...) + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } } func (logger *Logger) Print(args ...interface{}) { @@ -109,31 +127,45 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - NewEntry(logger).Warn(args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } } func (logger *Logger) Warning(args ...interface{}) { - NewEntry(logger).Warn(args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } } func (logger *Logger) Error(args ...interface{}) { - NewEntry(logger).Error(args...) + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } } func (logger *Logger) Fatal(args ...interface{}) { - NewEntry(logger).Fatal(args...) + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } } func (logger *Logger) Panic(args ...interface{}) { - NewEntry(logger).Panic(args...) + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } } func (logger *Logger) Debugln(args ...interface{}) { - NewEntry(logger).Debugln(args...) + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } } func (logger *Logger) Infoln(args ...interface{}) { - NewEntry(logger).Infoln(args...) + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } } func (logger *Logger) Println(args ...interface{}) { @@ -141,21 +173,31 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - NewEntry(logger).Warnln(args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } } func (logger *Logger) Warningln(args ...interface{}) { - NewEntry(logger).Warnln(args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } } func (logger *Logger) Errorln(args ...interface{}) { - NewEntry(logger).Errorln(args...) + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } } func (logger *Logger) Fatalln(args ...interface{}) { - NewEntry(logger).Fatalln(args...) + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } } func (logger *Logger) Panicln(args ...interface{}) { - NewEntry(logger).Panicln(args...) + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } } diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go index d238bfa0b4..af609a53d6 100644 --- a/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_openbsd.go @@ -1,4 +1,3 @@ - package logrus import "syscall" diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go index 71dcb6617a..0a06a1105f 100644 --- a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go @@ -3,7 +3,6 @@ package logrus import ( "bytes" "fmt" - "regexp" "sort" "strings" "time" @@ -21,7 +20,6 @@ const ( var ( baseTimestamp time.Time isTerminal bool - noQuoteNeeded *regexp.Regexp ) func init() { diff --git a/vendor/src/github.com/Sirupsen/logrus/writer.go b/vendor/src/github.com/Sirupsen/logrus/writer.go index 90d3e01b45..1e30b1c753 100644 --- a/vendor/src/github.com/Sirupsen/logrus/writer.go +++ b/vendor/src/github.com/Sirupsen/logrus/writer.go @@ -6,7 +6,7 @@ import ( "runtime" ) -func (logger *Logger) Writer() (*io.PipeWriter) { +func (logger *Logger) Writer() *io.PipeWriter { reader, writer := io.Pipe() go logger.writerScanner(reader) diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go index f6c0d7d597..5cb8467c78 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go @@ -99,12 +99,11 @@ func (m *Manager) Apply(pid int) error { // created then join consists of writing the process pids to cgroup.procs p, err := d.path(name) if err != nil { + if cgroups.IsNotFound(err) { + continue + } return err } - if !cgroups.PathExists(p) { - continue - } - paths[name] = p } m.Paths = paths diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go index 01da5d7fc7..8e132643bb 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go @@ -17,12 +17,8 @@ type BlkioGroup struct { func (s *BlkioGroup) Apply(d *data) error { dir, err := d.join("blkio") - if err != nil { - if cgroups.IsNotFound(err) { - return nil - } else { - return err - } + if err != nil && !cgroups.IsNotFound(err) { + return err } if err := s.Set(dir, d.c); err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go index 42386fd847..1fbf7b1540 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go @@ -18,11 +18,7 @@ func (s *CpuGroup) Apply(d *data) error { // on a container basis dir, err := d.join("cpu") if err != nil { - if cgroups.IsNotFound(err) { - return nil - } else { - return err - } + return err } if err := s.Set(dir, d.c); err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go index fab8323e93..16e00b1c73 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go @@ -11,11 +11,7 @@ type DevicesGroup struct { func (s *DevicesGroup) Apply(d *data) error { dir, err := d.join("devices") if err != nil { - if cgroups.IsNotFound(err) { - return nil - } else { - return err - } + return err } if err := s.Set(dir, d.c); err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/freezer.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/freezer.go index 5e08e05302..fc8241d1bf 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/freezer.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/freezer.go @@ -13,12 +13,8 @@ type FreezerGroup struct { func (s *FreezerGroup) Apply(d *data) error { dir, err := d.join("freezer") - if err != nil { - if cgroups.IsNotFound(err) { - return nil - } else { - return err - } + if err != nil && !cgroups.IsNotFound(err) { + return err } if err := s.Set(dir, d.c); err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go index 68e930fdc5..b99f81687a 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go @@ -16,12 +16,9 @@ type MemoryGroup struct { func (s *MemoryGroup) Apply(d *data) error { dir, err := d.join("memory") - if err != nil { - if cgroups.IsNotFound(err) { - return nil - } else { - return err - } + // only return an error for memory if it was specified + if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) { + return err } defer func() { if err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go index f4358e1a64..f353640697 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -91,7 +91,7 @@ func UseSystemd() bool { ddf := newProp("DefaultDependencies", false) if _, err := theConn.StartTransientUnit("docker-systemd-test-default-dependencies.scope", "replace", ddf); err != nil { if dbusError, ok := err.(dbus.Error); ok { - if dbusError.Name == "org.freedesktop.DBus.Error.PropertyReadOnly" { + if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") { hasTransientDefaultDependencies = false } } diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go index 3ecb81fb78..c438ec300f 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -659,7 +659,7 @@ func networkSetNsAction(iface *net.Interface, rtattr *RtAttr) error { } // Move a particular network interface to a particular network namespace -// specified by PID. This is idential to running: ip link set dev $name netns $pid +// specified by PID. This is identical to running: ip link set dev $name netns $pid func NetworkSetNsPid(iface *net.Interface, nspid int) error { data := uint32Attr(syscall.IFLA_NET_NS_PID, uint32(nspid)) return networkSetNsAction(iface, data) @@ -673,7 +673,7 @@ func NetworkSetNsFd(iface *net.Interface, fd int) error { return networkSetNsAction(iface, data) } -// Rname a particular interface to a different name +// Rename a particular interface to a different name // !!! Note that you can't rename an active interface. You need to bring it down before renaming it. // This is identical to running: ip link set dev ${oldName} name ${newName} func NetworkChangeName(iface *net.Interface, newName string) error {