Merge branch 'master' of github.com:docker/docker into btrfs

Docker-DCO-1.1-Signed-off-by: Dan Walsh <dwalsh@redhat.com> (github: rhatdan)
This commit is contained in:
Dan Walsh 2015-03-24 14:13:56 -04:00
commit 25c4a92342
130 changed files with 4466 additions and 863 deletions

View File

@ -235,8 +235,8 @@ Docker platform to broaden its application and utility.
If you know of another project underway that should be listed here, please help
us keep this list up-to-date by submitting a PR.
* [Docker Registry](https://github.com/docker/docker-registry): Registry
server for Docker (hosting/delivering of repositories and images)
* [Docker Registry](https://github.com/docker/distribution): Registry
server for Docker (hosting/delivery of repositories and images)
* [Docker Machine](https://github.com/docker/machine): Machine management
for a container-centric world
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering

View File

@ -137,19 +137,12 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
if tlsConfig != nil {
scheme = "https"
}
if in != nil {
if file, ok := in.(*os.File); ok {
inFd = file.Fd()
isTerminalIn = term.IsTerminal(inFd)
}
inFd, isTerminalIn = term.GetFdInfo(in)
}
if out != nil {
if file, ok := out.(*os.File); ok {
outFd = file.Fd()
isTerminalOut = term.IsTerminal(outFd)
}
outFd, isTerminalOut = term.GetFdInfo(out)
}
if err == nil {

View File

@ -37,11 +37,11 @@ import (
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/networkfs/resolvconf"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/filters"
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/resolvconf"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/term"
@ -232,6 +232,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
return err
}
}
// windows: show error message about modified file permissions
// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
if runtime.GOOS == "windows" {
log.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
}
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoying to use
@ -443,17 +450,18 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
if err != nil {
return err
}
var out2 engine.Env
err = out2.Decode(stream)
if err != nil {
var response types.AuthResponse
if err := json.NewDecoder(stream).Decode(response); err != nil {
cli.configFile, _ = registry.LoadConfig(homedir.Get())
return err
}
registry.SaveConfig(cli.configFile)
fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s.\n", path.Join(homedir.Get(), registry.CONFIGFILE))
if out2.Get("Status") != "" {
fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
if response.Status != "" {
fmt.Fprintf(cli.out, "%s\n", response.Status)
}
return nil
}
@ -762,18 +770,6 @@ func (cli *DockerCli) CmdStart(args ...string) error {
cmd.Require(flag.Min, 1)
utils.ParseFlags(cmd, args, true)
hijacked := make(chan io.Closer)
// Block the return until the chan gets closed
defer func() {
log.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
log.Errorf("Hijack did not finish (chan still open)")
}
if *openStdin || *attach {
cli.in.Close()
}
}()
if *attach || *openStdin {
if cmd.NArg() > 1 {
return fmt.Errorf("You cannot start and attach multiple containers at once.")
@ -809,26 +805,34 @@ func (cli *DockerCli) CmdStart(args ...string) error {
v.Set("stdout", "1")
v.Set("stderr", "1")
hijacked := make(chan io.Closer)
// Block the return until the chan gets closed
defer func() {
log.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
log.Errorf("Hijack did not finish (chan still open)")
}
cli.in.Close()
}()
cErr = promise.Go(func() error {
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil)
})
} else {
close(hijacked)
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that the hijack gets closed when returning (results
// in closing the hijack chan and freeing server's goroutines)
if closer != nil {
defer closer.Close()
}
case err := <-cErr:
if err != nil {
return err
}
}
}
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that the hijack gets closed when returning (results
// in closing the hijack chan and freeing server's goroutines)
if closer != nil {
defer closer.Close()
}
case err := <-cErr:
if err != nil {
return err
}
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false))
@ -2492,6 +2496,14 @@ func (cli *DockerCli) CmdRun(args ...string) error {
}
}
defer func() {
if *flAutoRemove {
if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, false)); err != nil {
log.Errorf("Error deleting container: %s", err)
}
}
}()
//start the container
if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, false)); err != nil {
return err
@ -2529,9 +2541,6 @@ func (cli *DockerCli) CmdRun(args ...string) error {
if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
return err
}
if _, _, err := readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, false)); err != nil {
return err
}
} else {
// No Autoremove: Simply retrieve the exit code
if !config.Tty {
@ -2669,12 +2678,15 @@ func (cli *DockerCli) CmdExec(args ...string) error {
return err
}
var execResult engine.Env
if err := execResult.Decode(stream); err != nil {
var response types.ContainerExecCreateResponse
if err := json.NewDecoder(stream).Decode(&response); err != nil {
return err
}
for _, warning := range response.Warnings {
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
}
execID := execResult.Get("Id")
execID := response.ID
if execID == "" {
fmt.Fprintf(cli.out, "exec ID empty")

View File

@ -192,7 +192,9 @@ func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter
if status := engine.Tail(stdoutBuffer, 1); status != "" {
var env engine.Env
env.Set("Status", status)
return writeJSONEnv(w, http.StatusOK, env)
return writeJSON(w, http.StatusOK, &types.AuthResponse{
Status: status,
})
}
w.WriteHeader(http.StatusNoContent)
return nil
@ -1087,6 +1089,20 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
job.Setenv("cpusetcpus", r.FormValue("cpusetcpus"))
job.Setenv("cpushares", r.FormValue("cpushares"))
// Job cancellation. Note: not all job types support this.
if closeNotifier, ok := w.(http.CloseNotifier); ok {
finished := make(chan struct{})
defer close(finished)
go func() {
select {
case <-finished:
case <-closeNotifier.CloseNotify():
log.Infof("Client disconnected, cancelling job: %v", job)
job.Cancel()
}
}()
}
if err := job.Run(); err != nil {
if !job.Stdout.Used() {
return err
@ -1141,10 +1157,11 @@ func postContainerExecCreate(eng *engine.Engine, version version.Version, w http
return nil
}
var (
out engine.Env
name = vars["name"]
job = eng.Job("execCreate", name)
stdoutBuffer = bytes.NewBuffer(nil)
outWarnings []string
warnings = bytes.NewBuffer(nil)
)
if err := job.DecodeEnv(r.Body); err != nil {
@ -1152,15 +1169,23 @@ func postContainerExecCreate(eng *engine.Engine, version version.Version, w http
}
job.Stdout.Add(stdoutBuffer)
// Read warnings from stderr
job.Stderr.Add(warnings)
// Register an instance of Exec in container.
if err := job.Run(); err != nil {
fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err)
return err
}
// Return the ID
out.Set("Id", engine.Tail(stdoutBuffer, 1))
// Parse warnings from stderr
scanner := bufio.NewScanner(warnings)
for scanner.Scan() {
outWarnings = append(outWarnings, scanner.Text())
}
return writeJSONEnv(w, http.StatusCreated, out)
return writeJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{
ID: engine.Tail(stdoutBuffer, 1),
Warnings: outWarnings,
})
}
// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start.
@ -1578,7 +1603,15 @@ func ServeApi(job *engine.Job) engine.Status {
chErrors <- err
return
}
chErrors <- srv.Serve()
job.Eng.OnShutdown(func() {
if err := srv.Close(); err != nil {
log.Error(err)
}
})
if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
err = nil
}
chErrors <- err
}()
}

View File

@ -9,3 +9,18 @@ type ContainerCreateResponse struct {
// Warnings are any warnings encountered during the creation of the container.
Warnings []string `json:"Warnings"`
}
// POST /containers/{name:.*}/exec
type ContainerExecCreateResponse struct {
// ID is the exec ID.
ID string `json:"Id"`
// Warnings are any warnings encountered during the execution of the command.
Warnings []string `json:"Warnings"`
}
// POST /auth
type AuthResponse struct {
// Status is the authentication status
Status string `json:"Status"`
}

View File

@ -427,6 +427,10 @@ func volume(b *Builder, args []string, attributes map[string]bool, original stri
b.Config.Volumes = map[string]struct{}{}
}
for _, v := range args {
v = strings.TrimSpace(v)
if v == "" {
return fmt.Errorf("Volume specified can not be an empty string")
}
b.Config.Volumes[v] = struct{}{}
}
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {

View File

@ -1,4 +1,4 @@
// builder is the evaluation step in the Dockerfile parse/evaluate pipeline.
// Package builder is the evaluation step in the Dockerfile parse/evaluate pipeline.
//
// It incorporates a dispatch table based on the parser.Node values (see the
// parser package for more information) that are yielded from the parser itself.
@ -20,7 +20,6 @@
package builder
import (
"errors"
"fmt"
"io"
"os"
@ -42,10 +41,6 @@ import (
"github.com/docker/docker/utils"
)
var (
ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
)
// Environment variable interpolation will happen on these statements only.
var replaceEnvAllowed = map[string]struct{}{
command.Env: {},
@ -131,6 +126,8 @@ type Builder struct {
cpuShares int64
memory int64
memorySwap int64
cancelled <-chan struct{} // When closed, job was cancelled.
}
// Run the builder with the context. This is the lynchpin of this package. This
@ -166,6 +163,14 @@ func (b *Builder) Run(context io.Reader) (string, error) {
b.TmpContainers = map[string]struct{}{}
for i, n := range b.dockerfile.Children {
select {
case <-b.cancelled:
log.Debug("Builder: build cancelled!")
fmt.Fprintf(b.OutStream, "Build cancelled")
return "", fmt.Errorf("Build cancelled")
default:
// Not cancelled yet, keep going...
}
if err := b.dispatch(i, n); err != nil {
if b.ForceRemove {
b.clearTmp()
@ -215,7 +220,7 @@ func (b *Builder) readDockerfile() error {
return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile)
}
if fi.Size() == 0 {
return ErrDockerfileEmpty
return fmt.Errorf("The Dockerfile (%s) cannot be empty", origFile)
}
f, err := os.Open(filename)
@ -302,7 +307,11 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
var str string
str = ast.Value
if _, ok := replaceEnvAllowed[cmd]; ok {
str = b.replaceEnv(ast.Value)
var err error
str, err = ProcessWord(ast.Value, b.Config.Env)
if err != nil {
return err
}
}
strList[i+l] = str
msgList[i] = ast.Value

View File

@ -581,6 +581,17 @@ func (b *Builder) run(c *daemon.Container) error {
return err
}
finished := make(chan struct{})
defer close(finished)
go func() {
select {
case <-b.cancelled:
log.Debugln("Build cancelled, killing container:", c.ID)
c.Kill()
case <-finished:
}
}()
if b.Verbose {
// Block on reading output from container, stop on err or chan closed
if err := <-errCh; err != nil {

View File

@ -153,6 +153,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
cpuSetCpus: cpuSetCpus,
memory: memory,
memorySwap: memorySwap,
cancelled: job.WaitCancelled(),
}
id, err := builder.Run(context)

View File

@ -90,7 +90,7 @@ func parseNameVal(rest string, key string) (*Node, map[string]bool, error) {
if blankOK || len(word) > 0 {
words = append(words, word)
// Look for = and if no there assume
// Look for = and if not there assume
// we're doing the old stuff and
// just read the rest of the line
if !strings.Contains(word, "=") {
@ -107,12 +107,15 @@ func parseNameVal(rest string, key string) (*Node, map[string]bool, error) {
quote = ch
blankOK = true
phase = inQuote
continue
}
if ch == '\\' {
if pos+1 == len(rest) {
continue // just skip \ at end
}
// If we're not quoted and we see a \, then always just
// add \ plus the char to the word, even if the char
// is a quote.
word += string(ch)
pos++
ch = rune(rest[pos])
}
@ -122,15 +125,17 @@ func parseNameVal(rest string, key string) (*Node, map[string]bool, error) {
if phase == inQuote {
if ch == quote {
phase = inWord
continue
}
if ch == '\\' {
// \ is special except for ' quotes - can't escape anything for '
if ch == '\\' && quote != '\'' {
if pos+1 == len(rest) {
phase = inWord
continue // just skip \ at end
}
pos++
ch = rune(rest[pos])
nextCh := rune(rest[pos])
word += string(ch)
ch = nextCh
}
word += string(ch)
}
@ -234,17 +239,18 @@ func parseJSON(rest string) (*Node, map[string]bool, error) {
var top, prev *Node
for _, str := range myJson {
if s, ok := str.(string); !ok {
s, ok := str.(string)
if !ok {
return nil, nil, errDockerfileNotStringArray
} else {
node := &Node{Value: s}
if prev == nil {
top = node
} else {
prev.Next = node
}
prev = node
}
node := &Node{Value: s}
if prev == nil {
top = node
} else {
prev.Next = node
}
prev = node
}
return top, map[string]bool{"json": true}, nil

View File

@ -7,6 +7,14 @@ ENV name=value\ value2
ENV name="value'quote space'value2"
ENV name='value"double quote"value2'
ENV name=value\ value2 name2=value2\ value3
ENV name="a\"b"
ENV name="a\'b"
ENV name='a\'b'
ENV name='a\'b''
ENV name='a\"b'
ENV name="''"
# don't put anything after the next line - it must be the last line of the
# Dockerfile and it must end with \
ENV name=value \
name1=value1 \
name2="value2a \

View File

@ -2,9 +2,15 @@
(env "name" "value")
(env "name" "value")
(env "name" "value" "name2" "value2")
(env "name" "value value1")
(env "name" "value value2")
(env "name" "value'quote space'value2")
(env "name" "value\"double quote\"value2")
(env "name" "value value2" "name2" "value2 value3")
(env "name" "value" "name1" "value1" "name2" "value2a value2b" "name3" "value3an\"value3b\"" "name4" "value4a\\nvalue4b")
(env "name" "\"value value1\"")
(env "name" "value\\ value2")
(env "name" "\"value'quote space'value2\"")
(env "name" "'value\"double quote\"value2'")
(env "name" "value\\ value2" "name2" "value2\\ value3")
(env "name" "\"a\\\"b\"")
(env "name" "\"a\\'b\"")
(env "name" "'a\\'b'")
(env "name" "'a\\'b''")
(env "name" "'a\\\"b'")
(env "name" "\"''\"")
(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"")

208
builder/shell_parser.go Normal file
View File

@ -0,0 +1,208 @@
package builder
// This will take a single word and an array of env variables and
// process all quotes (" and ') as well as $xxx and ${xxx} env variable
// tokens. Tries to mimic bash shell process.
// It doesn't support all flavors of ${xx:...} formats but new ones can
// be added by adding code to the "special ${} format processing" section
import (
"fmt"
"strings"
"unicode"
)
type shellWord struct {
word string
envs []string
pos int
}
func ProcessWord(word string, env []string) (string, error) {
sw := &shellWord{
word: word,
envs: env,
pos: 0,
}
return sw.process()
}
func (sw *shellWord) process() (string, error) {
return sw.processStopOn('\000')
}
// Process the word, starting at 'pos', and stop when we get to the
// end of the word or the 'stopChar' character
func (sw *shellWord) processStopOn(stopChar rune) (string, error) {
var result string
var charFuncMapping = map[rune]func() (string, error){
'\'': sw.processSingleQuote,
'"': sw.processDoubleQuote,
'$': sw.processDollar,
}
for sw.pos < len(sw.word) {
ch := sw.peek()
if stopChar != '\000' && ch == stopChar {
sw.next()
break
}
if fn, ok := charFuncMapping[ch]; ok {
// Call special processing func for certain chars
tmp, err := fn()
if err != nil {
return "", err
}
result += tmp
} else {
// Not special, just add it to the result
ch = sw.next()
if ch == '\\' {
// '\' escapes, except end of line
ch = sw.next()
if ch == '\000' {
continue
}
}
result += string(ch)
}
}
return result, nil
}
func (sw *shellWord) peek() rune {
if sw.pos == len(sw.word) {
return '\000'
}
return rune(sw.word[sw.pos])
}
func (sw *shellWord) next() rune {
if sw.pos == len(sw.word) {
return '\000'
}
ch := rune(sw.word[sw.pos])
sw.pos++
return ch
}
func (sw *shellWord) processSingleQuote() (string, error) {
// All chars between single quotes are taken as-is
// Note, you can't escape '
var result string
sw.next()
for {
ch := sw.next()
if ch == '\000' || ch == '\'' {
break
}
result += string(ch)
}
return result, nil
}
func (sw *shellWord) processDoubleQuote() (string, error) {
// All chars up to the next " are taken as-is, even ', except any $ chars
// But you can escape " with a \
var result string
sw.next()
for sw.pos < len(sw.word) {
ch := sw.peek()
if ch == '"' {
sw.next()
break
}
if ch == '$' {
tmp, err := sw.processDollar()
if err != nil {
return "", err
}
result += tmp
} else {
ch = sw.next()
if ch == '\\' {
chNext := sw.peek()
if chNext == '\000' {
// Ignore \ at end of word
continue
}
if chNext == '"' || chNext == '$' {
// \" and \$ can be escaped, all other \'s are left as-is
ch = sw.next()
}
}
result += string(ch)
}
}
return result, nil
}
func (sw *shellWord) processDollar() (string, error) {
sw.next()
ch := sw.peek()
if ch == '{' {
sw.next()
name := sw.processName()
ch = sw.peek()
if ch == '}' {
// Normal ${xx} case
sw.next()
return sw.getEnv(name), nil
}
return "", fmt.Errorf("Unsupported ${} substitution: %s", sw.word)
}
// $xxx case
name := sw.processName()
if name == "" {
return "$", nil
}
return sw.getEnv(name), nil
}
func (sw *shellWord) processName() string {
// Read in a name (alphanumeric or _)
// If it starts with a numeric then just return $#
var name string
for sw.pos < len(sw.word) {
ch := sw.peek()
if len(name) == 0 && unicode.IsDigit(ch) {
ch = sw.next()
return string(ch)
}
if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
break
}
ch = sw.next()
name += string(ch)
}
return name
}
func (sw *shellWord) getEnv(name string) string {
for _, env := range sw.envs {
i := strings.Index(env, "=")
if i < 0 {
if name == env {
// Should probably never get here, but just in case treat
// it like "var" and "var=" are the same
return ""
}
continue
}
if name != env[:i] {
continue
}
return env[i+1:]
}
return ""
}

View File

@ -0,0 +1,51 @@
package builder
import (
"bufio"
"os"
"strings"
"testing"
)
func TestShellParser(t *testing.T) {
file, err := os.Open("words")
if err != nil {
t.Fatalf("Can't open 'words': %s", err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
envs := []string{"PWD=/home", "SHELL=bash"}
for scanner.Scan() {
line := scanner.Text()
// Trim comments and blank lines
i := strings.Index(line, "#")
if i >= 0 {
line = line[:i]
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
words := strings.Split(line, "|")
if len(words) != 2 {
t.Fatalf("Error in 'words' - should be 2 words:%q", words)
}
words[0] = strings.TrimSpace(words[0])
words[1] = strings.TrimSpace(words[1])
newWord, err := ProcessWord(words[0], envs)
if err != nil {
newWord = "error"
}
if newWord != words[1] {
t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1])
}
}
}

View File

@ -1,50 +1,9 @@
package builder
import (
"regexp"
"strings"
)
var (
// `\\\\+|[^\\]|\b|\A` - match any number of "\\" (ie, properly-escaped backslashes), or a single non-backslash character, or a word boundary, or beginning-of-line
// `\$` - match literal $
// `[[:alnum:]_]+` - match things like `$SOME_VAR`
// `{[[:alnum:]_]+}` - match things like `${SOME_VAR}`
tokenEnvInterpolation = regexp.MustCompile(`(\\|\\\\+|[^\\]|\b|\A)\$([[:alnum:]_]+|{[[:alnum:]_]+})`)
// this intentionally punts on more exotic interpolations like ${SOME_VAR%suffix} and lets the shell handle those directly
)
// handle environment replacement. Used in dispatcher.
func (b *Builder) replaceEnv(str string) string {
for _, match := range tokenEnvInterpolation.FindAllString(str, -1) {
idx := strings.Index(match, "\\$")
if idx != -1 {
if idx+2 >= len(match) {
str = strings.Replace(str, match, "\\$", -1)
continue
}
prefix := match[:idx]
stripped := match[idx+2:]
str = strings.Replace(str, match, prefix+"$"+stripped, -1)
continue
}
match = match[strings.Index(match, "$"):]
matchKey := strings.Trim(match, "${}")
for _, keyval := range b.Config.Env {
tmp := strings.SplitN(keyval, "=", 2)
if tmp[0] == matchKey {
str = strings.Replace(str, match, tmp[1], -1)
break
}
}
}
return str
}
func handleJsonArgs(args []string, attributes map[string]bool) []string {
if len(args) == 0 {
return []string{}

43
builder/words Normal file
View File

@ -0,0 +1,43 @@
hello | hello
he'll'o | hello
he'llo | hello
he\'llo | he'llo
he\\'llo | he\llo
abc\tdef | abctdef
"abc\tdef" | abc\tdef
'abc\tdef' | abc\tdef
hello\ | hello
hello\\ | hello\
"hello | hello
"hello\" | hello"
"hel'lo" | hel'lo
'hello | hello
'hello\' | hello\
"''" | ''
$. | $.
$1 |
he$1x | hex
he$.x | he$.x
he$pwd. | he.
he$PWD | he/home
he\$PWD | he$PWD
he\\$PWD | he\/home
he\${} | he${}
he\${}xx | he${}xx
he${} | he
he${}xx | hexx
he${hi} | he
he${hi}xx | hexx
he${PWD} | he/home
he${.} | error
'he${XX}' | he${XX}
"he${PWD}" | he/home
"he'$PWD'" | he'/home'
"$PWD" | /home
'$PWD' | $PWD
'\$PWD' | \$PWD
'"hello"' | "hello"
he\$PWD | he$PWD
"he\$PWD" | he$PWD
'he\$PWD' | he\$PWD
he${PWD | error

View File

@ -325,7 +325,7 @@ _docker_cp() {
(( counter++ ))
if [ $cword -eq $counter ]; then
_filedir
_filedir -d
return
fi
;;

View File

@ -16,7 +16,7 @@
function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
for i in (commandline -opc)
if contains -- $i attach build commit cp create diff events exec export history images import info insert inspect kill load login logout logs pause port ps pull push restart rm rmi run save search start stop tag top unpause version wait
if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait
return 1
end
end

View File

@ -270,11 +270,6 @@ __docker_subcommand () {
{-q,--quiet}'[Only show numeric IDs]' \
':repository:__docker_repositories'
;;
(inspect)
_arguments \
{-f,--format=-}'[Format the output using the given go template]:template: ' \
'*:containers:__docker_containers'
;;
(import)
_arguments \
':URL:(- http:// file://)' \
@ -282,15 +277,10 @@ __docker_subcommand () {
;;
(info)
;;
(import)
(inspect)
_arguments \
':URL:(- http:// file://)' \
':repository:__docker_repositories_with_tags'
;;
(insert)
_arguments '1:containers:__docker_containers' \
'2:URL:(http:// file://)' \
'3:file:_files'
{-f,--format=-}'[Format the output using the given go template]:template: ' \
'*:containers:__docker_containers'
;;
(kill)
_arguments \

View File

@ -83,7 +83,7 @@ func (config *Config) InstallFlags() {
opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon")
config.Ulimits = make(map[string]*ulimit.Ulimit)
opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers")
flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Containers logging driver(json-file/none)")
flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Containers logging driver")
}
func getDefaultNetworkMtu() int {

View File

@ -23,6 +23,7 @@ import (
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/logger/jsonfilelog"
"github.com/docker/docker/daemon/logger/syslog"
"github.com/docker/docker/engine"
"github.com/docker/docker/image"
"github.com/docker/docker/links"
@ -31,10 +32,10 @@ import (
"github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/common"
"github.com/docker/docker/pkg/directory"
"github.com/docker/docker/pkg/etchosts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/networkfs/etchosts"
"github.com/docker/docker/pkg/networkfs/resolvconf"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/resolvconf"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/ulimit"
"github.com/docker/docker/runconfig"
@ -1223,6 +1224,7 @@ func (container *Container) initializeNetworking() error {
if err != nil {
return err
}
container.HostnamePath = nc.HostnamePath
container.HostsPath = nc.HostsPath
container.ResolvConfPath = nc.ResolvConfPath
container.Config.Hostname = nc.Config.Hostname
@ -1379,6 +1381,12 @@ func (container *Container) startLogging() error {
return err
}
l = dl
case "syslog":
dl, err := syslog.New(container.ID[:12])
if err != nil {
return err
}
l = dl
case "none":
return nil
default:

View File

@ -33,7 +33,7 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
hostConfig.Memory = 0
}
if hostConfig.Memory > 0 && !daemon.SystemConfig().SwapLimit {
if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit {
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
hostConfig.MemorySwap = -1
}

View File

@ -35,9 +35,9 @@ import (
"github.com/docker/docker/pkg/graphdb"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/namesgenerator"
"github.com/docker/docker/pkg/networkfs/resolvconf"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/resolvconf"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/runconfig"

View File

@ -1,4 +1,4 @@
// +build linux
// +build linux,!btrfs_noversion
package btrfs

View File

@ -0,0 +1,42 @@
package syslog
import (
"fmt"
"log/syslog"
"os"
"path"
"github.com/docker/docker/daemon/logger"
)
type Syslog struct {
writer *syslog.Writer
tag string
}
func New(tag string) (logger.Logger, error) {
log, err := syslog.New(syslog.LOG_USER, path.Base(os.Args[0]))
if err != nil {
return nil, err
}
return &Syslog{
writer: log,
tag: tag,
}, nil
}
func (s *Syslog) Log(msg *logger.Message) error {
logMessage := fmt.Sprintf("%s: %s", s.tag, msg.Line)
if msg.Source == "stderr" {
return s.writer.Err(logMessage)
}
return s.writer.Info(logMessage)
}
func (s *Syslog) Close() error {
return s.writer.Close()
}
func (s *Syslog) Name() string {
return "Syslog"
}

View File

@ -17,8 +17,8 @@ import (
"github.com/docker/docker/engine"
"github.com/docker/docker/nat"
"github.com/docker/docker/pkg/iptables"
"github.com/docker/docker/pkg/networkfs/resolvconf"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/resolvconf"
"github.com/docker/libcontainer/netlink"
)

View File

@ -50,12 +50,21 @@ var (
)
var (
mutex sync.Mutex
defaultIP = net.ParseIP("0.0.0.0")
globalMap = ipMapping{}
defaultIP = net.ParseIP("0.0.0.0")
defaultPortAllocator = New()
)
type PortAllocator struct {
mutex sync.Mutex
ipMap ipMapping
}
func New() *PortAllocator {
return &PortAllocator{
ipMap: ipMapping{},
}
}
type ErrPortAlreadyAllocated struct {
ip string
port int
@ -70,10 +79,11 @@ func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated {
func init() {
const portRangeKernelParam = "/proc/sys/net/ipv4/ip_local_port_range"
portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", beginPortRange, endPortRange)
file, err := os.Open(portRangeKernelParam)
if err != nil {
log.Warnf("Failed to read %s kernel parameter: %v", portRangeKernelParam, err)
log.Warnf("port allocator - %s due to error: %v", portRangeFallback, err)
return
}
var start, end int
@ -82,7 +92,7 @@ func init() {
if err == nil {
err = fmt.Errorf("unexpected count of parsed numbers (%d)", n)
}
log.Errorf("Failed to parse port range from %s: %v", portRangeKernelParam, err)
log.Errorf("port allocator - failed to parse system ephemeral port range from %s - %s: %v", portRangeKernelParam, portRangeFallback, err)
return
}
beginPortRange = start
@ -109,12 +119,9 @@ func (e ErrPortAlreadyAllocated) Error() string {
return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port)
}
// RequestPort requests new port from global ports pool for specified ip and proto.
// If port is 0 it returns first free port. Otherwise it cheks port availability
// in pool and return that port or error if port is already busy.
func RequestPort(ip net.IP, proto string, port int) (int, error) {
mutex.Lock()
defer mutex.Unlock()
func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
if proto != "tcp" && proto != "udp" {
return 0, ErrUnknownProtocol
@ -124,10 +131,10 @@ func RequestPort(ip net.IP, proto string, port int) (int, error) {
ip = defaultIP
}
ipstr := ip.String()
protomap, ok := globalMap[ipstr]
protomap, ok := p.ipMap[ipstr]
if !ok {
protomap = newProtoMap()
globalMap[ipstr] = protomap
p.ipMap[ipstr] = protomap
}
mapping := protomap[proto]
if port > 0 {
@ -145,15 +152,22 @@ func RequestPort(ip net.IP, proto string, port int) (int, error) {
return port, nil
}
// RequestPort requests new port from global ports pool for specified ip and proto.
// If port is 0 it returns first free port. Otherwise it cheks port availability
// in pool and return that port or error if port is already busy.
func RequestPort(ip net.IP, proto string, port int) (int, error) {
return defaultPortAllocator.RequestPort(ip, proto, port)
}
// ReleasePort releases port from global ports pool for specified ip and proto.
func ReleasePort(ip net.IP, proto string, port int) error {
mutex.Lock()
defer mutex.Unlock()
func (p *PortAllocator) ReleasePort(ip net.IP, proto string, port int) error {
p.mutex.Lock()
defer p.mutex.Unlock()
if ip == nil {
ip = defaultIP
}
protomap, ok := globalMap[ip.String()]
protomap, ok := p.ipMap[ip.String()]
if !ok {
return nil
}
@ -161,14 +175,22 @@ func ReleasePort(ip net.IP, proto string, port int) error {
return nil
}
func ReleasePort(ip net.IP, proto string, port int) error {
return defaultPortAllocator.ReleasePort(ip, proto, port)
}
// ReleaseAll releases all ports for all ips.
func ReleaseAll() error {
mutex.Lock()
globalMap = ipMapping{}
mutex.Unlock()
func (p *PortAllocator) ReleaseAll() error {
p.mutex.Lock()
p.ipMap = ipMapping{}
p.mutex.Unlock()
return nil
}
func ReleaseAll() error {
return defaultPortAllocator.ReleaseAll()
}
func (pm *portMap) findPort() (int, error) {
port := pm.last
for i := 0; i <= endPortRange-beginPortRange; i++ {

View File

@ -10,14 +10,10 @@ func init() {
endPortRange = DefaultPortRangeEnd
}
func reset() {
ReleaseAll()
}
func TestRequestNewPort(t *testing.T) {
defer reset()
p := New()
port, err := RequestPort(defaultIP, "tcp", 0)
port, err := p.RequestPort(defaultIP, "tcp", 0)
if err != nil {
t.Fatal(err)
}
@ -28,9 +24,9 @@ func TestRequestNewPort(t *testing.T) {
}
func TestRequestSpecificPort(t *testing.T) {
defer reset()
p := New()
port, err := RequestPort(defaultIP, "tcp", 5000)
port, err := p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
t.Fatal(err)
}
@ -40,9 +36,9 @@ func TestRequestSpecificPort(t *testing.T) {
}
func TestReleasePort(t *testing.T) {
defer reset()
p := New()
port, err := RequestPort(defaultIP, "tcp", 5000)
port, err := p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
t.Fatal(err)
}
@ -50,15 +46,15 @@ func TestReleasePort(t *testing.T) {
t.Fatalf("Expected port 5000 got %d", port)
}
if err := ReleasePort(defaultIP, "tcp", 5000); err != nil {
if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil {
t.Fatal(err)
}
}
func TestReuseReleasedPort(t *testing.T) {
defer reset()
p := New()
port, err := RequestPort(defaultIP, "tcp", 5000)
port, err := p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
t.Fatal(err)
}
@ -66,20 +62,20 @@ func TestReuseReleasedPort(t *testing.T) {
t.Fatalf("Expected port 5000 got %d", port)
}
if err := ReleasePort(defaultIP, "tcp", 5000); err != nil {
if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil {
t.Fatal(err)
}
port, err = RequestPort(defaultIP, "tcp", 5000)
port, err = p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
t.Fatal(err)
}
}
func TestReleaseUnreadledPort(t *testing.T) {
defer reset()
p := New()
port, err := RequestPort(defaultIP, "tcp", 5000)
port, err := p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
t.Fatal(err)
}
@ -87,7 +83,7 @@ func TestReleaseUnreadledPort(t *testing.T) {
t.Fatalf("Expected port 5000 got %d", port)
}
port, err = RequestPort(defaultIP, "tcp", 5000)
port, err = p.RequestPort(defaultIP, "tcp", 5000)
switch err.(type) {
case ErrPortAlreadyAllocated:
@ -97,18 +93,16 @@ func TestReleaseUnreadledPort(t *testing.T) {
}
func TestUnknowProtocol(t *testing.T) {
defer reset()
if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol {
if _, err := New().RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol {
t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err)
}
}
func TestAllocateAllPorts(t *testing.T) {
defer reset()
p := New()
for i := 0; i <= endPortRange-beginPortRange; i++ {
port, err := RequestPort(defaultIP, "tcp", 0)
port, err := p.RequestPort(defaultIP, "tcp", 0)
if err != nil {
t.Fatal(err)
}
@ -118,21 +112,21 @@ func TestAllocateAllPorts(t *testing.T) {
}
}
if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated {
if _, err := p.RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated {
t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err)
}
_, err := RequestPort(defaultIP, "udp", 0)
_, err := p.RequestPort(defaultIP, "udp", 0)
if err != nil {
t.Fatal(err)
}
// release a port in the middle and ensure we get another tcp port
port := beginPortRange + 5
if err := ReleasePort(defaultIP, "tcp", port); err != nil {
if err := p.ReleasePort(defaultIP, "tcp", port); err != nil {
t.Fatal(err)
}
newPort, err := RequestPort(defaultIP, "tcp", 0)
newPort, err := p.RequestPort(defaultIP, "tcp", 0)
if err != nil {
t.Fatal(err)
}
@ -142,10 +136,10 @@ func TestAllocateAllPorts(t *testing.T) {
// now pm.last == newPort, release it so that it's the only free port of
// the range, and ensure we get it back
if err := ReleasePort(defaultIP, "tcp", newPort); err != nil {
if err := p.ReleasePort(defaultIP, "tcp", newPort); err != nil {
t.Fatal(err)
}
port, err = RequestPort(defaultIP, "tcp", 0)
port, err = p.RequestPort(defaultIP, "tcp", 0)
if err != nil {
t.Fatal(err)
}
@ -155,11 +149,11 @@ func TestAllocateAllPorts(t *testing.T) {
}
func BenchmarkAllocatePorts(b *testing.B) {
defer reset()
p := New()
for i := 0; i < b.N; i++ {
for i := 0; i <= endPortRange-beginPortRange; i++ {
port, err := RequestPort(defaultIP, "tcp", 0)
port, err := p.RequestPort(defaultIP, "tcp", 0)
if err != nil {
b.Fatal(err)
}
@ -168,21 +162,21 @@ func BenchmarkAllocatePorts(b *testing.B) {
b.Fatalf("Expected port %d got %d", expected, port)
}
}
reset()
p.ReleaseAll()
}
}
func TestPortAllocation(t *testing.T) {
defer reset()
p := New()
ip := net.ParseIP("192.168.0.1")
ip2 := net.ParseIP("192.168.0.2")
if port, err := RequestPort(ip, "tcp", 80); err != nil {
if port, err := p.RequestPort(ip, "tcp", 80); err != nil {
t.Fatal(err)
} else if port != 80 {
t.Fatalf("Acquire(80) should return 80, not %d", port)
}
port, err := RequestPort(ip, "tcp", 0)
port, err := p.RequestPort(ip, "tcp", 0)
if err != nil {
t.Fatal(err)
}
@ -190,41 +184,41 @@ func TestPortAllocation(t *testing.T) {
t.Fatalf("Acquire(0) should return a non-zero port")
}
if _, err := RequestPort(ip, "tcp", port); err == nil {
if _, err := p.RequestPort(ip, "tcp", port); err == nil {
t.Fatalf("Acquiring a port already in use should return an error")
}
if newPort, err := RequestPort(ip, "tcp", 0); err != nil {
if newPort, err := p.RequestPort(ip, "tcp", 0); err != nil {
t.Fatal(err)
} else if newPort == port {
t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
}
if _, err := RequestPort(ip, "tcp", 80); err == nil {
if _, err := p.RequestPort(ip, "tcp", 80); err == nil {
t.Fatalf("Acquiring a port already in use should return an error")
}
if _, err := RequestPort(ip2, "tcp", 80); err != nil {
if _, err := p.RequestPort(ip2, "tcp", 80); err != nil {
t.Fatalf("It should be possible to allocate the same port on a different interface")
}
if _, err := RequestPort(ip2, "tcp", 80); err == nil {
if _, err := p.RequestPort(ip2, "tcp", 80); err == nil {
t.Fatalf("Acquiring a port already in use should return an error")
}
if err := ReleasePort(ip, "tcp", 80); err != nil {
if err := p.ReleasePort(ip, "tcp", 80); err != nil {
t.Fatal(err)
}
if _, err := RequestPort(ip, "tcp", 80); err != nil {
if _, err := p.RequestPort(ip, "tcp", 80); err != nil {
t.Fatal(err)
}
port, err = RequestPort(ip, "tcp", 0)
port, err = p.RequestPort(ip, "tcp", 0)
if err != nil {
t.Fatal(err)
}
port2, err := RequestPort(ip, "tcp", port+1)
port2, err := p.RequestPort(ip, "tcp", port+1)
if err != nil {
t.Fatal(err)
}
port3, err := RequestPort(ip, "tcp", 0)
port3, err := p.RequestPort(ip, "tcp", 0)
if err != nil {
t.Fatal(err)
}
@ -234,15 +228,15 @@ func TestPortAllocation(t *testing.T) {
}
func TestNoDuplicateBPR(t *testing.T) {
defer reset()
p := New()
if port, err := RequestPort(defaultIP, "tcp", beginPortRange); err != nil {
if port, err := p.RequestPort(defaultIP, "tcp", beginPortRange); err != nil {
t.Fatal(err)
} else if port != beginPortRange {
t.Fatalf("Expected port %d got %d", beginPortRange, port)
}
if port, err := RequestPort(defaultIP, "tcp", 0); err != nil {
if port, err := p.RequestPort(defaultIP, "tcp", 0); err != nil {
t.Fatal(err)
} else if port == beginPortRange {
t.Fatalf("Acquire(0) allocated the same port twice: %d", port)

View File

@ -24,6 +24,7 @@ type Mount struct {
Writable bool
copyData bool
from *Container
isBind bool
}
func (mnt *Mount) Export(resource string) (io.ReadCloser, error) {
@ -79,7 +80,7 @@ func (m *Mount) initialize() error {
if hostPath, exists := m.container.Volumes[m.MountToPath]; exists {
// If this is a bind-mount/volumes-from, maybe it was passed in at start instead of create
// We need to make sure bind-mounts/volumes-from passed on start can override existing ones.
if !m.volume.IsBindMount && m.from == nil {
if (!m.volume.IsBindMount && !m.isBind) && m.from == nil {
return nil
}
if m.volume.Path == hostPath {
@ -172,6 +173,7 @@ func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error)
volume: vol,
MountToPath: mountToPath,
Writable: writable,
isBind: true, // in case the volume itself is a normal volume, but is being mounted in as a bindmount here
}
}

View File

@ -186,8 +186,9 @@ func mainDaemon() {
errAPI := <-serveAPIWait
// If we have an error here it is unique to API (as daemonErr would have
// exited the daemon process above)
if errAPI != nil {
log.Errorf("Shutting down due to ServeAPI error: %v", errAPI)
}
eng.Shutdown()
if errAPI != nil {
log.Fatalf("Shutting down due to ServeAPI error: %v", errAPI)
}
}

View File

@ -14,6 +14,7 @@ import (
"github.com/docker/docker/autogen/dockerversion"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/utils"
)
@ -29,6 +30,11 @@ func main() {
return
}
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
initLogging(stderr)
flag.Parse()
// FIXME: validate daemon flags here
@ -42,16 +48,16 @@ func main() {
if err != nil {
log.Fatalf("Unable to parse logging level: %s", *flLogLevel)
}
initLogging(lvl)
setLogLevel(lvl)
} else {
initLogging(log.InfoLevel)
setLogLevel(log.InfoLevel)
}
// -D, --debug, -l/--log-level=debug processing
// When/if -D is removed this block can be deleted
if *flDebug {
os.Setenv("DEBUG", "1")
initLogging(log.DebugLevel)
setLogLevel(log.DebugLevel)
}
if len(flHosts) == 0 {
@ -124,9 +130,9 @@ func main() {
}
if *flTls || *flTlsVerify {
cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], &tlsConfig)
cli = client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], &tlsConfig)
} else {
cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], nil)
cli = client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], nil)
}
if err := cli.Cmd(flag.Args()...); err != nil {

View File

@ -1,12 +1,14 @@
package main
import (
"os"
log "github.com/Sirupsen/logrus"
"io"
)
func initLogging(lvl log.Level) {
log.SetOutput(os.Stderr)
func setLogLevel(lvl log.Level) {
log.SetLevel(lvl)
}
func initLogging(stderr io.Writer) {
log.SetOutput(stderr)
}

View File

@ -1,156 +1,255 @@
# Docker Documentation
The source for Docker documentation is here under `sources/` and uses extended
Markdown, as implemented by [MkDocs](http://mkdocs.org).
The source for Docker documentation is in this directory under `sources/`. Our
documentation uses extended Markdown, as implemented by
[MkDocs](http://mkdocs.org). The current release of the Docker documentation
resides on [http://docs.docker.com](http://docs.docker.com).
The HTML files are built and hosted on
[http://docs.docker.com](http://docs.docker.com), and update automatically
after each change to the `docs` branch of [Docker on
GitHub](https://github.com/docker/docker) thanks to post-commit hooks.
## Understanding the documentation branches and processes
## Contributing
Be sure to follow the [contribution guidelines](../CONTRIBUTING.md).
In particular, [remember to sign your work!](../CONTRIBUTING.md#sign-your-work)
## Getting Started
Docker documentation builds are done in a Docker container, which installs all
the required tools, adds the local `docs/` directory and builds the HTML docs.
It then starts a HTTP server on port 8000 so that you can connect and see your
changes.
In the root of the `docker` source directory:
$ make docs
.... (lots of output) ....
docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve
Running at: http://0.0.0.0:8000/
Live reload enabled.
Hold ctrl+c to quit.
If you have any issues you need to debug, you can use `make docs-shell` and then
run `mkdocs serve`
## Testing the links
You can use `make docs-test` to generate a report of missing links that are referenced in
the documentation - there should be none.
## Adding a new document
New document (`.md`) files are added to the documentation builds by adding them
to the menu definition in the `docs/mkdocs.yml` file.
## Style guide
If you have questions about how to write for Docker's documentation (e.g.,
questions about grammar, syntax, formatting, styling, language, or tone) please
see the [style guide](sources/contributing/docs_style-guide.md). If something
isn't clear in the guide, please submit a PR to help us improve it.
## Working using GitHub's file editor
Alternatively, for small changes and typos you might want to use GitHub's built-
in file editor. It allows you to preview your changes right on-line (though
there can be some differences between GitHub Markdown and [MkDocs
Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). Just be
careful not to create many commits. And you must still [sign your
work!](../CONTRIBUTING.md#sign-your-work)
## Branches
Docker has two primary branches for documentation:
| Branch | Description | URL (published via commit-hook) |
|----------|--------------------------------|------------------------------------------------------------------------------|
| `docs` | Official release documentation | [http://docs.docker.com](http://docs.docker.com) |
| `master` | Unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) |
| `master` | Merged but unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) |
**There are two branches related to editing docs**: `master` and `docs`. You
should always edit the documentation on a local branch of the `master` branch,
and send a PR against `master`. That way your fixes will automatically get
included in later releases, and docs maintainers can easily cherry-pick your
changes into the `docs` release branch. In the rare case where your change is
not forward-compatible, you may need to base your changes on the `docs` branch.
Additions and updates to upcoming releases are made in a feature branch off of
the `master` branch. The Docker maintainers also support a `docs` branch that
contains the last release of documentation.
Also, since there is a separate `docs` branch, we can keep
[http://docs.docker.com](http://docs.docker.com) up to date with any bugs found
between Docker code releases.
After a release, documentation updates are continually merged into `master` as
they occur. This work includes new documentation for forthcoming features, bug
fixes, and other updates. Docker's CI system automatically builds and updates
the `master` documentation after each merge and posts it to
[http://docs.master.dockerproject.com](http://docs.master.dockerproject.com).
## Publishing Documentation
Periodically, the Docker maintainers update `docs.docker.com` between official
releases of Docker. They do this by cherry-picking commits from `master`,
merging them into `docs`, and then publishing the result.
To publish a copy of the documentation you need to have Docker up and running on
your machine. You'll also need a `docs/awsconfig` file containing the settings
you need to access the AWS bucket you'll be deploying to.
In the rare case where a change is not forward-compatible, changes may be made
on other branches by special arrangement with the Docker maintainers.
The release script will create an s3 if needed, and will then push the files to it.
### Quickstart for documentation contributors
[profile dowideit-docs]
aws_access_key_id = IHOIUAHSIDH234rwf....
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
region = ap-southeast-2
If you are a new or beginner contributor, we encourage you to read through the
[our detailed contributors
guide](https://docs.docker.com/project/who-written-for/). The guide explains in
detail, with examples, how to contribute. If you are an experienced contributor
this quickstart should be enough to get you started.
The `profile` name must be the same as the name of the bucket you are deploying
to - which you call from the `docker` directory:
The following is the essential workflow for contributing to the documentation:
make AWS_S3_BUCKET=dowideit-docs docs-release
1. Fork the `docker/docker` repository.
This will publish _only_ to the `http://bucket-url/v1.2/` version of the
documentation.
2. Clone the repository to your local machine.
If you're publishing the current release's documentation, you need to
also update the root docs pages by running
3. Select an issue from `docker/docker` to work on or submit a proposal of your
own.
make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release
4. Create a feature branch from `master` in which to work.
> **Note:**
> if you are using Boot2Docker on OSX and the above command returns an error,
> `Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2:
> dial unix /var/run/docker.sock: no such file or directory', you need to set the Docker
> host. Run `eval "$(boot2docker shellinit)"` to see the correct variable to set. The command
> will return the full `export` command, so you can just cut and paste.
By basing from `master` your work is automatically included in the next
release. It also allows docs maintainers to easily cherry-pick your changes
into the `docs` release branch.
4. Modify existing or add new `.md` files to the `docs/sources` directory.
If you add a new document (`.md`) file, you must also add it to the
appropriate section of the `docs/mkdocs.yml` file in this repository.
5. As you work, build the documentation site locally to see your changes.
The `docker/docker` repository contains a `Dockerfile` and a `Makefile`.
Together, these create a development environment in which you can build and
run a container running the Docker documentation website. To build the
documentation site, enter `make docs` at the root of your `docker/docker`
fork:
$ make docs
.... (lots of output) ....
docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve
Running at: http://0.0.0.0:8000/
Live reload enabled.
Hold ctrl+c to quit.
The build creates an image containing all the required tools, adds the local
`docs/` directory and generates the HTML files. Then, it runs a Docker
container with this image.
The container exposes port 8000 on the localhost so that you can connect and
see your changes. If you are running Boot2Docker, use the `boot2docker ip`
to get the address of your server.
6. Check your writing for style and mechanical errors.
Use our [documentation style
guide](https://docs.docker.com/project/doc-style/) to check style. There are
several [good grammar and spelling online
checkers](http://www.hemingwayapp.com/) that can check your writing
mechanics.
7. Squash your commits on your branch.
8. Make a pull request from your fork back to Docker's `master` branch.
9. Work with the reviewers until your change is approved and merged.
### Debugging and testing
If you have any issues you need to debug, you can use `make docs-shell` and then
run `mkdocs serve`. You can use `make docs-test` to generate a report of missing
links that are referenced in the documentation&mdash;there should be none.
## Style guide
If you have questions about how to write for Docker's documentation, please see
the [style guide](sources/project/doc-style.md). The style guide provides
guidance about grammar, syntax, formatting, styling, language, or tone. If
something isn't clear in the guide, please submit an issue to let us know or
submit a pull request to help us improve it.
## Publishing documentation (for Docker maintainers)
To publish Docker's documentation you need to have Docker up and running on your
machine. You'll also need a `docs/awsconfig` file containing the settings you
need to access the AWS bucket you'll be deploying to.
The process for publishing is to build first to an AWS bucket, verify the build,
and then publish the final release.
1. Have Docker installed and running on your machine.
2. Ask the core maintainers for the `awsconfig` file.
3. Copy the `awsconfig` file to the `docs/` directory.
The `awsconfig` file contains the profiles of the S3 buckets for our
documentation sites. (If needed, the release script creates an S3 bucket and
pushes the files to it.) Each profile has this format:
[profile dowideit-docs]
aws_access_key_id = IHOIUAHSIDH234rwf....
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
region = ap-southeast-2
The `profile` name must be the same as the name of the bucket you are
deploying to.
4. Call the `make` from the `docker` directory.
$ make AWS_S3_BUCKET=dowideit-docs docs-release
This publishes _only_ to the `http://bucket-url/v1.2/` version of the
documentation.
5. If you're publishing the current release's documentation, you need to also
update the root docs pages by running
$ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release
### Errors publishing using Boot2Docker
Sometimes, in a Boot2Docker environment, the publishing procedure returns this
error:
Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2:
dial unix /var/run/docker.sock: no such file or directory.
If this happens, set the Docker host. Run the following command to set the
variables in your shell:
$ eval "$(boot2docker shellinit)"
## Cherry-picking documentation changes to update an existing release.
Whenever the core team makes a release, they publish the documentation based
on the `release` branch (which is copied into the `docs` branch). The
documentation team can make updates in the meantime, by cherry-picking changes
from `master` into any of the docs branches.
Whenever the core team makes a release, they publish the documentation based on
the `release` branch. At that time, the `release` branch is copied into the
`docs` branch. The documentation team makes updates between Docker releases by
cherry-picking changes from `master` into any of the documentation branches.
Typically, we cherry-pick into the `docs` branch.
For example, to update the current release's docs:
For example, to update the current release's docs, do the following:
git fetch upstream
git checkout -b post-1.2.0-docs-update-1 upstream/docs
# Then go through the Merge commit linked to PR's (making sure they apply
to that release)
# see https://github.com/docker/docker/commits/master
git cherry-pick -x fe845c4
# Repeat until you have cherry picked everything you will propose to be merged
git push upstream post-1.2.0-docs-update-1
1. Go to your `docker/docker` fork and get the latest from master.
Then make a pull request to merge into the `docs` branch, __NOT__ into master.
$ git fetch upstream
2. Checkout a new branch based on `upstream/docs`.
Once the PR has the needed `LGTM`s, merge it, then publish to our beta server
to test:
You should give your new branch a descriptive name.
git fetch upstream
git checkout docs
git reset --hard upstream/docs
make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
$ git checkout -b post-1.2.0-docs-update-1 upstream/docs
3. In a browser window, open [https://github.com/docker/docker/commits/master].
Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/
to view your results and make sure what you published is what you wanted.
4. Locate the merges you want to publish.
When you're happy with it, publish the docs to our live site:
You should only cherry-pick individual commits; do not cherry-pick merge
commits. To minimize merge conflicts, start with the oldest commit and work
your way forward in time.
make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release
5. Copy the commit SHA from GitHub.
Test the uncached version of the live docs at http://docs.docker.com.s3-website-us-east-1.amazonaws.com/
6. Cherry-pick the commit.
$ git cherry-pick -x fe845c4
7. Repeat until you have cherry-picked everything you want to merge.
8. Push your changes to your fork.
$ git push origin post-1.2.0-docs-update-1
9. Make a pull request to merge into the `docs` branch.
Do __NOT__ merge into `master`.
10. Have maintainers review your pull request.
11. Once the PR has the needed "LGTMs", merge it on GitHub.
12. Return to your local fork and make sure you are still on the `docs` branch.
$ git checkout docs
13. Fetch your merged pull request from `docs`.
$ git fetch upstream/docs
14. Ensure your branch is clean and set to the latest.
$ git reset --hard upstream/docs
Note that the new docs will not appear live on the site until the cache (a complex,
distributed CDN system) is flushed. The `make docs-release` command will do this
_if_ the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID (ask the meta
team) - this will take at least 15 minutes to run and you can check its progress
with the CDN Cloudfront Chrome addin.
15. Copy the `awsconfig` file into the `docs` directory.
16. Make the beta documentation
$ make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
17. Open [the beta
website](http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/) site
and make sure what you published is correct.
19. When you're happy with your content, publish the docs to our live site:
$ make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes
DISTRIBUTION_ID=C2K6......FL2F docs-release
20. Test the uncached version of the live docs at [http://docs.docker.com.s3-website-us-east-1.amazonaws.com/]
### Caching and the docs
New docs do not appear live on the site until the cache (a complex, distributed
CDN system) is flushed. The `make docs-release` command flushes the cache _if_
the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID. The cache flush
can take at least 15 minutes to run and you can check its progress with the CDN
Cloudfront Purge Tool Chrome app.
## Removing files from the docs.docker.com site

View File

@ -273,8 +273,15 @@ A Dockerfile is similar to a Makefile.
**USER**
-- `USER daemon`
The **USER** instruction sets the username or UID that is used when running the
image.
Sets the username or UID used for running subsequent commands.
The **USER** instruction can optionally be used to set the group or GID. The
followings examples are all valid:
USER [user | user:group | uid | uid:gid | user:gid | uid:group ]
Until the **USER** instruction is set, instructions will be run as root. The USER
instruction can be used any number of times in a Dockerfile, and will only affect
subsequent commands.
**WRKDIR**
-- `WORKDIR /path/to/workdir`

View File

@ -121,7 +121,7 @@ IMAGE [COMMAND] [ARG...]
**--lxc-conf**=[]
(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
**--log-driver**="|*json-file*|*none*"
**--log-driver**="|*json-file*|*syslog*|*none*"
Logging driver for container. Default is defined by daemon `--log-driver` flag.
**Warning**: `docker logs` command works only for `json-file` logging driver.

View File

@ -17,6 +17,9 @@ Register or Login to a docker registry server, if no server is
specified "https://index.docker.io/v1/" is the default. If you want to
login to a private registry you can specify this by adding the server name.
This stores encoded credentials in `$HOME/.dockercfg` on Linux or `%USERPROFILE%/.dockercfg`
on Windows.
# OPTIONS
**-e**, **--email**=""
Email

View File

@ -222,7 +222,7 @@ which interface and port to use.
**--lxc-conf**=[]
(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
**--log-driver**="|*json-file*|*none*"
**--log-driver**="|*json-file*|*syslog*|*none*"
Logging driver for container. Default is defined by daemon `--log-driver` flag.
**Warning**: `docker logs` command works only for `json-file` logging driver.
@ -341,7 +341,12 @@ The **-t** option is incompatible with a redirection of the docker client
standard input.
**-u**, **--user**=""
Username or UID
Sets the username or UID used and optionally the groupname or GID for the specified command.
The followings examples are all valid:
--user [user | user:group | uid | uid:gid | user:gid | uid:group ]
Without this argument the command will be run as root in the container.
**-v**, **--volume**=[]
Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)

View File

@ -89,7 +89,7 @@ unix://[/path/to/socket] to use.
**--label**="[]"
Set key=value labels to the daemon (displayed in `docker info`)
**--log-driver**="*json-file*|*none*"
**--log-driver**="*json-file*|*syslog*|*none*"
Container's logging driver. Default is `default`.
**Warning**: `docker logs` command works only for `json-file` logging driver.

View File

@ -45,6 +45,8 @@ pages:
- ['installation/google.md', 'Installation', 'Google Cloud Platform']
- ['installation/gentoolinux.md', 'Installation', 'Gentoo']
- ['installation/softlayer.md', 'Installation', 'IBM Softlayer']
- ['installation/joyent.md', 'Installation', 'Joyent Compute Service']
- ['installation/azure.md', 'Installation', 'Microsoft Azure']
- ['installation/rackspace.md', 'Installation', 'Rackspace Cloud']
- ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux']
- ['installation/oracle.md', 'Installation', 'Oracle Linux']

View File

@ -63,13 +63,15 @@ public or private GitHub repositories with a `Dockerfile`.
### GitHub Submodules
If your GitHub repository contains links to private submodules, you'll
need to add a deploy key from your Docker Hub repository.
If your GitHub repository contains links to private submodules, you'll get an
error message in your build.
Your Docker Hub deploy key is located under the "Build Details"
menu on the Automated Build's main page in the Hub. Add this key
to your GitHub submodule by visiting the Settings page for the
repository on GitHub and selecting "Deploy keys".
Normally, the Docker Hub sets up a deploy key in your GitHub repository.
Unfortunately, GitHub only allows a repository deploy key to access a single repository.
To work around this, you need to create a dedicated user account in GitHub and attach
the automated build's deploy key that account. This dedicated build account
can be limited to read-only access to just the repositories required to build.
<table class="table table-bordered">
<thead>
@ -82,15 +84,33 @@ repository on GitHub and selecting "Deploy keys".
<tbody>
<tr>
<td>1.</td>
<td><img src="/docker-hub/hub-images/deploy_key.png"></td>
<td>Your automated build's deploy key is in the "Build Details" menu
under "Deploy keys".</td>
<td><img src="/docker-hub/hub-images/gh_org_members.png"></td>
<td>First, create the new account in GitHub. It should be given read-only
access to the main repository and all submodules that are needed.</td>
</tr>
<tr>
<td>2.</td>
<td><img src="/docker-hub/hub-images/github_deploy_key.png"></td>
<td>In your GitHub submodule's repository Settings page, add the
deploy key from your Docker Hub Automated Build.</td>
<td><img src="/docker-hub/hub-images/gh_team_members.png"></td>
<td>This can be accomplished by adding the account to a read-only team in
the organization(s) where the main GitHub repository and all submodule
repositories are kept.</td>
</tr>
<tr>
<td>3.</td>
<td><img src="/docker-hub/hub-images/gh_repo_deploy_key.png"></td>
<td>Next, remove the deploy key from the main GitHub repository. This can be done in the GitHub repository's "Deploy keys" Settings section.</td>
</tr>
<tr>
<td>4.</td>
<td><img src="/docker-hub/hub-images/deploy_key.png"></td>
<td>Your automated build's deploy key is in the "Build Details" menu
under "Deploy keys".</td>
</tr>
<tr>
<td>5.</td>
<td><img src="/docker-hub/hub-images/gh_add_ssh_user_key.png"></td>
<td>In your dedicated GitHub User account, add the deploy key from your
Docker Hub Automated Build.</td>
</tr>
</tbody>
</table>

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

View File

@ -23,4 +23,5 @@ techniques for installing Docker all the time.
- [Amazon EC2](amazon/)
- [Rackspace Cloud](rackspace/)
- [Google Cloud Platform](google/)
- [Joyent Compute Service](joyent/)
- [Binaries](binaries/)

View File

@ -0,0 +1,27 @@
page_title: Installation on Microsoft Azure Platform
page_description: Instructions for creating a Docker-ready virtual machine on Microsoft Azure cloud platform.
page_keywords: Docker, Docker documentation, installation, azure, microsoft
# Microsoft Azure
## Creating a Docker host machine on Azure
Please check out to the following detailed tutorials on [Microsoft Azure][0]
website to find out different ways to create a Docker-ready Linux virtual
machines on Azure:
* [Docker Virtual Machine Extensions on Azure][1]
* [How to use the Docker VM Extension from Azure Cross-Platform Interface][2]
* [How to use the Docker VM Extension with the Azure Portal][3]
* [Using Docker Machine with Azure][4]
## What next?
Continue with the [User Guide](/userguide/).
[0]: http://azure.microsoft.com/
[1]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-vm-extension/
[2]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-xplat-cli/
[3]: http://azure.microsoft.com/documentation/articles/virtual-machines-docker-with-portal/
[4]: http://azure.microsoft.com/en-us/documentation/articles/virtual-machines-docker-machine/

View File

@ -0,0 +1,23 @@
page_title: Install on Joyent Public Cloud
page_description: Installation instructions for Docker on the Joyent Compute Service.
page_keywords: Docker, Docker documentation, installation, joyent, Joyent Public Cloud, Joyent Compute Service, Joyent Container Service
## Install on Joyent Public Cloud
1. Sign in to the [Joyent customer portal](https://my.joyent.com/)
2. [Create a Docker host](https://docs.joyent.com/jpc/managing-docker-containers/creating-a-docker-host).
## Start and manage containers
1. [Start containers in the web UI](https://docs.joyent.com/jpc/managing-docker-containers/starting-a-container)
2. [Configure the Docker CLI on your laptop](https://docs.joyent.com/jpc/managing-docker-containers/access-your-jpc-docker-hosts-from-the-docker-cli) to connect to the remote host to launch and manage containers.
3. SSH into the Docker host.
4. Launch containers using the Docker CLI.
## Where to go next
Continue with the [Docker user guide](/userguide/), read Joyent's [getting started blog post](https://www.joyent.com/blog/first-steps-with-joyents-container-service), and [full documentation](https://docs.joyent.com/jpc/managing-docker-containers).

View File

@ -4,6 +4,20 @@ page_keywords: Docker, Docker documentation, requirements, boot2docker, VirtualB
# Install Docker on Mac OS X
You can install Docker using Boot2Docker to run `docker` commands at your command-line.
Choose this installation if you are familiar with the command-line or plan to
contribute to the Docker project on GitHub.
Alternatively, you may want to try <a id="inlinelink" href="https://kitematic.com/"
target="_blank">Kitematic</a>, an application that lets you set up Docker and
run containers using a graphical user interface (GUI).
<a id="graphic" href="https://kitematic.com/" target="_blank"><img
src="/installation/images/kitematic.png" alt="Download Kitematic"></a>
## Command-line Docker with Boot2Docker
Because the Docker daemon uses Linux-specific kernel features, you can't run
Docker natively in OS X. Instead, you must install the Boot2Docker application.
The application includes a VirtualBox Virtual Machine (VM), Docker itself, and the
@ -17,16 +31,7 @@ completely from RAM, is a small ~24MB download, and boots in approximately 5s.
Your Mac must be running OS X 10.6 "Snow Leopard" or newer to run Boot2Docker.
## How do you want to work with Docker?
You can set up Docker using the command line with Boot2Docker and the guide
below. Alternatively, you may want to try <a href="https://kitematic.com/" target="_blank">Kitematic</a>,
an application that lets you set up Docker and run containers using a graphical
user interface (GUI).
<a href="https://kitematic.com/" target="_blank"><img src="/installation/images/kitematic.png" alt="Download Kitematic"></a>
## Learn the key concepts before installing
### Learn the key concepts before installing
In a Docker installation on Linux, your machine is both the localhost and the
Docker host. In networking, localhost means your computer. The Docker host is
@ -50,7 +55,7 @@ When you start the `boot2docker` process, the VM is assigned an IP address. Unde
practice, work through the exercises on this page.
## Install Boot2Docker
### Install Boot2Docker
1. Go to the [boot2docker/osx-installer ](
https://github.com/boot2docker/osx-installer/releases/latest) release page.
@ -319,4 +324,4 @@ at [Boot2Docker repository](https://github.com/boot2docker/boot2docker).
Thanks to Chris Jones whose [blog](http://goo.gl/Be6cCk) inspired me to redo
this page.
Continue with the [Docker User Guide](/userguide/).
Continue with the [Docker User Guide](/userguide/).

View File

@ -1,6 +1,6 @@
page_title: Create a pull request (PR)
page_description: Basic workflow for Docker contributions
page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
# Create a pull request (PR)

View File

@ -1,6 +1,6 @@
page_title: Make a project contribution
page_description: Basic workflow for Docker contributions
page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
page_keywords: contribute, pull request, review, workflow, beginner, expert, squash, commit
<style type="text/css">
@ -8,7 +8,7 @@ page_keywords: contribute, pull request, review, workflow, white-belt, black-bel
.gh-label {
display: inline-block;
padding: 3px 4px;
font-size: 11px;
font-size: 12px;
font-weight: bold;
line-height: 1;
color: #fff;
@ -16,11 +16,25 @@ page_keywords: contribute, pull request, review, workflow, white-belt, black-bel
box-shadow: inset 0 -1px 0 rgba(0,0,0,0.12);
}
.gh-label.black-belt { background-color: #000000; color: #ffffff; }
.gh-label.bug { background-color: #fc2929; color: #ffffff; }
.gh-label.improvement { background-color: #bfe5bf; color: #2a332a; }
.gh-label.project-doc { background-color: #207de5; color: #ffffff; }
.gh-label.white-belt { background-color: #ffffff; color: #333333; }
/* Experience */
.gh-label.beginner { background-color: #B5E0B5; color: #333333; }
.gh-label.expert { background-color: #599898; color: #ffffff; }
.gh-label.master { background-color: #306481; color: #ffffff; }
.gh-label.novice { background-color: #D6F2AC; color: #333333; }
.gh-label.proficient { background-color: #8DC7A9; color: #333333; }
/* Kind */
.gh-label.bug { background-color: #FF9DA4; color: #333333; }
.gh-label.cleanup { background-color: #FFB7B3; color: #333333; }
.gh-label.content { background-color: #CDD3C2; color: #333333; }
.gh-label.feature { background-color: #B7BEB7; color: #333333; }
.gh-label.graphics { background-color: #E1EFCB; color: #333333; }
.gh-label.improvement { background-color: #EBD2BB; color: #333333; }
.gh-label.proposal { background-color: #FFD9C0; color: #333333; }
.gh-label.question { background-color: #EEF1D1; color: #333333; }
.gh-label.usecase { background-color: #F0E4C2; color: #333333; }
.gh-label.writing { background-color: #B5E9D5; color: #333333; }
</style>
@ -37,20 +51,44 @@ An existing issue is something reported by a Docker user. As issues come in,
our maintainers triage them. Triage is its own topic. For now, it is important
for you to know that triage includes ranking issues according to difficulty.
Triaged issues have either a <strong class="gh-label white-belt">white-belt</strong>
or <strong class="gh-label black-belt">black-belt</strong> label.
A <strong class="gh-label white-belt">white-belt</strong> issue is considered
an easier issue. Issues can have more than one label, for example,
<strong class="gh-label bug">bug</strong>,
<strong class="gh-label improvement">improvement</strong>,
<strong class="gh-label project-doc">project/doc</strong>, and so forth.
These other labels are there for filtering purposes but you might also find
them helpful.
Triaged issues have one of these labels:
<table class="tg">
<tr>
<td class="tg-031e">Level</td>
<td class="tg-031e">Experience level guideline</td>
</tr>
<tr>
<td class="tg-031e"><strong class="gh-label beginner">exp/beginner</strong></td>
<td class="tg-031e">You have made less than 10 contributions in your life time to any open source project.</td>
</tr>
<tr>
<td class="tg-031e"><strong class="gh-label novice">exp/novice</strong></td>
<td class="tg-031e">You have made more than 10 contributions to an open source project or at least 5 contributions to Docker. </td>
</tr>
<tr>
<td class="tg-031e"><strong class="gh-label proficient">exp/proficient</strong></td>
<td class="tg-031e">You have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines. </td>
</tr>
<tr>
<td class="tg-031e"><strong class="gh-label expert">exp/expert</strong></td>
<td class="tg-031e">You have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines. </td>
</tr>
<tr>
<td class="tg-031e"><strong class="gh-label master">exp/master</strong></td>
<td class="tg-031e">You have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines.</td>
</tr>
</table>
## Claim a white-belt issue
As the table states, these labels are meant as guidelines. You might have
written a whole plugin for Docker in a personal project and never contributed to
Docker. With that kind of experience, you could take on an <strong
class="gh-label expert">exp/expert</strong> or <strong class="gh-label
master">exp/master</strong> level task.
In this section, you find and claim an open white-belt issue.
## Claim a beginner or novice issue
In this section, you find and claim an open documentation lines issue.
1. Go to the `docker/docker` <a
@ -62,11 +100,11 @@ In this section, you find and claim an open white-belt issue.
![Open issues](/project/images/issue_list.png)
3. Look for the <strong class="gh-label white-belt">white-belt</strong> items on the list.
3. Look for the <strong class="gh-label beginner">exp/beginner</strong> items on the list.
4. Click on the "labels" dropdown and select <strong class="gh-label white-belt">white-belt</strong>.
4. Click on the "labels" dropdown and select <strong class="gh-label beginner">exp/beginner</strong>.
The system filters to show only open <strong class="gh-label white-belt">white-belt</strong> issues.
The system filters to show only open <strong class="gh-label beginner">exp/beginner</strong> issues.
5. Open an issue that interests you.
@ -75,21 +113,18 @@ In this section, you find and claim an open white-belt issue.
6. Make sure that no other user has chosen to work on the issue.
We don't allow external contributors to assign issues to themselves, so you
need to read the comments to find if a user claimed an issue by saying:
- "I'd love to give this a try~"
- "I'll work on this!"
- "I'll take this."
The community is very good about claiming issues explicitly.
We don't allow external contributors to assign issues to themselves. So, you
need to read the comments to find if a user claimed the issue by leaving a
`#dibs` comment on the issue.
7. When you find an open issue that both interests you and is unclaimed, claim it yourself by adding a comment.
7. When you find an open issue that both interests you and is unclaimed, add a
`#dibs` comment.
![Easy issue](/project/images/easy_issue.png)
This example uses issue 11038. Your issue # will be different depending on
what you claimed.
what you claimed. After a moment, Gordon the Docker bot, changes the issue
status to claimed.
8. Make a note of the issue number; you'll need it later.
@ -131,7 +166,7 @@ To sync your repository:
5. Fetch all the changes from the `upstream/master` branch.
$ git fetch upstream/master
$ git fetch upstream
remote: Counting objects: 141, done.
remote: Compressing objects: 100% (29/29), done.
remote: Total 141 (delta 52), reused 46 (delta 46), pack-reused 66

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 112 KiB

View File

@ -16,7 +16,7 @@ process simple so you'll want to contribute frequently.
## The basic contribution workflow
In this guide, you work through Docker's basic contribution workflow by fixing a
single *white-belt* issue in the `docker/docker` repository. The workflow
single *beginner* issue in the `docker/docker` repository. The workflow
for fixing simple issues looks like this:
![Simple process](/project/images/existing_issue.png)

View File

@ -1,6 +1,6 @@
page_title: Participate in the PR Review
page_description: Basic workflow for Docker contributions
page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
# Participate in the PR Review
@ -117,8 +117,7 @@ see the GitHub help on deleting branches</a>.
## Where to go next
At this point, you have completed all the basic tasks in our contributors guide.
If you enjoyed contributing, let us know by completing another
<strong class="gh-label white-belt">white-belt</strong>
If you enjoyed contributing, let us know by completing another beginner
issue or two. We really appreciate the help.
If you are very experienced and want to make a major change, go on to

View File

@ -15,7 +15,7 @@ You use the `docker` repository and its `Dockerfile` to create a Docker image,
run a Docker container, and develop code in the container. Docker itself builds,
tests, and releases new Docker versions using this container.
If you followed the procedures that <a href="./set-up-prereqs" target="_blank">
If you followed the procedures that <a href="./software-required" target="_blank">
set up the prerequisites</a>, you should have a fork of the `docker/docker`
repository. You also created a branch called `dry-run-test`. In this section,
you continue working with your fork on this branch.

View File

@ -138,7 +138,7 @@ As you change code in your fork, you make your changes on a repository branch.
The branch name should reflect what you are working on. In this section, you
create a branch, make a change, and push it up to your fork.
This branch is just for testing your config for this guide. The changes arepart
This branch is just for testing your config for this guide. The changes are part
of a dry run so the branch name is going to be dry-run-test. To create an push
the branch to your fork on GitHub:

View File

@ -169,7 +169,7 @@ To run the same test inside your Docker development container, you do this:
root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-run ^TestBuild$' hack/make.sh
## If test under Boot2Docker fail do to space errors
## If tests under Boot2Docker fail due to disk space errors
Running the tests requires about 2GB of memory. If you are running your
container on bare metal, that is you are not running with Boot2Docker, your

View File

@ -1,6 +1,6 @@
page_title: Work on your issue
page_description: Basic workflow for Docker contributions
page_keywords: contribute, pull request, review, workflow, white-belt, black-belt, squash, commit
page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
# Work on your issue

View File

@ -76,6 +76,11 @@ Builds can now set resource constraints for all containers created for the build
(`CgroupParent`) can be passed in the host config to setup container cgroups under a specific cgroup.
`POST /build`
**New!**
Closing the HTTP request will now cause the build to be canceled.
## v1.17
### Full Documentation

View File

@ -259,7 +259,7 @@ Json Parameters:
`Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}`
- **LogConfig** - Logging configuration to container, format
`{ "Type": "<driver_name>", "Config": {"key1": "val1"}}
Available types: `json-file`, `none`.
Available types: `json-file`, `syslog`, `none`.
`json-file` logging driver.
- **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
@ -1144,6 +1144,9 @@ The archive may include any number of other files,
which will be accessible in the build context (See the [*ADD build
command*](/reference/builder/#dockerbuilder)).
The build will also be canceled if the client drops the connection by quitting
or being killed.
Query Parameters:
- **dockerfile** - path within the build context to the Dockerfile. This is

View File

@ -146,6 +146,17 @@ The instructions that handle environment variables in the `Dockerfile` are:
`ONBUILD` instructions are **NOT** supported for environment replacement, even
the instructions above.
Environment variable subtitution will use the same value for each variable
throughout the entire command. In other words, in this example:
ENV abc=hello
ENV abc=bye def=$abc
ENV ghi=$abc
will result in `def` having a value of `hello`, not `bye`. However,
`ghi` will have a value of `bye` because it is not part of the same command
that set `abc` to `bye`.
## The `.dockerignore` file
If a file named `.dockerignore` exists in the source repository, then it

View File

@ -599,6 +599,12 @@ in cases where the same set of files are used for multiple builds. The path
must be to a file within the build context. If a relative path is specified
then it must to be relative to the current directory.
If the Docker client loses connection to the daemon, the build is canceled.
This happens if you interrupt the Docker client with `ctrl-c` or if the Docker
client is killed for any reason.
> **Note:** Currently only the "run" phase of the build can be canceled until
> pull cancelation is implemented).
See also:
@ -1514,14 +1520,6 @@ just a specific mapping:
$ sudo docker port test 7890
0.0.0.0:4321
## rename
Usage: docker rename OLD_NAME NEW_NAME
rename a existing container to a NEW_NAME
The `docker rename` command allows the container to be renamed to a different name.
## ps
Usage: docker ps [OPTIONS]
@ -1617,6 +1615,14 @@ use `docker pull`:
Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com)
registry or to a self-hosted one.
## rename
Usage: docker rename OLD_NAME NEW_NAME
rename a existing container to a NEW_NAME
The `docker rename` command allows the container to be renamed to a different name.
## restart
Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...]
@ -1655,6 +1661,8 @@ containers removing all network communication.
The main process inside the container referenced under the link `/redis` will receive
`SIGKILL`, then the container will be removed.
$ docker rm $(docker ps -a -q)
This command will delete all stopped containers. The command `docker ps
-a -q` will return all existing container IDs and pass them to the `rm`
command which will delete them. Any running containers will not be

View File

@ -102,9 +102,10 @@ specify to which of the three standard streams (`STDIN`, `STDOUT`,
$ sudo docker run -a stdin -a stdout -i -t ubuntu /bin/bash
For interactive processes (like a shell), you must use `-i -t` together in
order to allocate a tty for the container process. Specifying `-t` is however
forbidden when the client standard output is redirected or pipe, such as in:
`echo test | docker run -i busybox cat`.
order to allocate a tty for the container process. `-i -t` is often written `-it`
as you'll see in later examples. Specifying `-t` is forbidden when the client
standard output is redirected or piped, such as in:
`echo test | sudo docker run -i busybox cat`.
## Container identification
@ -289,7 +290,7 @@ running the `redis-cli` command and connecting to the Redis server over the
$ sudo docker run -d --name redis example/redis --bind 127.0.0.1
$ # use the redis container's network stack to access localhost
$ sudo docker run --rm -ti --net container:redis example/redis-cli -h 127.0.0.1
$ sudo docker run --rm -it --net container:redis example/redis-cli -h 127.0.0.1
### Managing /etc/hosts
@ -297,7 +298,7 @@ Your container will have lines in `/etc/hosts` which define the hostname of the
container itself as well as `localhost` and a few other common things. The
`--add-host` flag can be used to add additional lines to `/etc/hosts`.
$ /docker run -ti --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts
$ sudo docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts
172.17.0.22 09d03f76bf2c
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
@ -656,6 +657,11 @@ this driver.
Default logging driver for Docker. Writes JSON messages to file. `docker logs`
command is available only for this logging driver
## Logging driver: syslog
Syslog logging driver for Docker. Writes log messages to syslog. `docker logs`
command is not available for this logging driver
## Overriding Dockerfile image defaults
When a developer builds an image from a [*Dockerfile*](/reference/builder)

View File

@ -52,6 +52,27 @@ This will create a new volume inside a container at `/webapp`.
> You can also use the `VOLUME` instruction in a `Dockerfile` to add one or
> more new volumes to any container created from that image.
### Locating a volume
You can locate the volume on the host by utilizing the 'docker inspect' command.
$ docker inspect web
The output will provide details on the container configurations including the
volumes. The output should look something similar to the following:
...
"Volumes": {
"/webapp": "/var/lib/docker/volumes/fac362...80535"
},
"VolumesRW": {
"/webapp": true
}
...
You will notice in the above 'Volumes' is specifying the location on the host and
'VolumesRW' is specifying that the volume is read/write.
### Mount a Host Directory as a Data Volume
In addition to creating a volume using the `-v` flag you can also mount a

View File

@ -298,7 +298,7 @@ and won't need it again. So let's remove it using the `docker rm` command.
Error: Impossible to remove a running container, please stop it first or use -f
2014/05/24 08:12:56 Error: failed to remove one or more containers
What's happened? We can't actually remove a running container. This protects
What happened? We can't actually remove a running container. This protects
you from accidentally removing a running container you might need. Let's try
this again by stopping the container first.

View File

@ -46,18 +46,19 @@ func unregister(name string) {
// It acts as a store for *containers*, and allows manipulation of these
// containers by executing *jobs*.
type Engine struct {
handlers map[string]Handler
catchall Handler
hack Hack // data for temporary hackery (see hack.go)
id string
Stdout io.Writer
Stderr io.Writer
Stdin io.Reader
Logging bool
tasks sync.WaitGroup
l sync.RWMutex // lock for shutdown
shutdown bool
onShutdown []func() // shutdown handlers
handlers map[string]Handler
catchall Handler
hack Hack // data for temporary hackery (see hack.go)
id string
Stdout io.Writer
Stderr io.Writer
Stdin io.Reader
Logging bool
tasks sync.WaitGroup
l sync.RWMutex // lock for shutdown
shutdownWait sync.WaitGroup
shutdown bool
onShutdown []func() // shutdown handlers
}
func (eng *Engine) Register(name string, handler Handler) error {
@ -123,6 +124,8 @@ func (eng *Engine) Job(name string, args ...string) *Job {
Stderr: NewOutput(),
env: &Env{},
closeIO: true,
cancelled: make(chan struct{}),
}
if eng.Logging {
job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr))
@ -143,6 +146,7 @@ func (eng *Engine) Job(name string, args ...string) *Job {
func (eng *Engine) OnShutdown(h func()) {
eng.l.Lock()
eng.onShutdown = append(eng.onShutdown, h)
eng.shutdownWait.Add(1)
eng.l.Unlock()
}
@ -156,6 +160,7 @@ func (eng *Engine) Shutdown() {
eng.l.Lock()
if eng.shutdown {
eng.l.Unlock()
eng.shutdownWait.Wait()
return
}
eng.shutdown = true
@ -180,17 +185,15 @@ func (eng *Engine) Shutdown() {
// Call shutdown handlers, if any.
// Timeout after 10 seconds.
var wg sync.WaitGroup
for _, h := range eng.onShutdown {
wg.Add(1)
go func(h func()) {
defer wg.Done()
h()
eng.shutdownWait.Done()
}(h)
}
done := make(chan struct{})
go func() {
wg.Wait()
eng.shutdownWait.Wait()
close(done)
}()
select {

View File

@ -3,12 +3,24 @@ package engine
import (
"bytes"
"encoding/json"
"math/rand"
"testing"
"time"
"github.com/docker/docker/pkg/testutils"
)
const chars = "abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
// RandomString returns random string of specified length
func RandomString(length int) string {
res := make([]byte, length)
for i := 0; i < length; i++ {
res[i] = chars[rand.Intn(len(chars))]
}
return string(res)
}
func TestEnvLenZero(t *testing.T) {
env := &Env{}
if env.Len() != 0 {
@ -185,7 +197,7 @@ func TestMultiMap(t *testing.T) {
func testMap(l int) [][2]string {
res := make([][2]string, l)
for i := 0; i < l; i++ {
t := [2]string{testutils.RandomString(5), testutils.RandomString(20)}
t := [2]string{RandomString(5), RandomString(20)}
res[i] = t
}
return res

View File

@ -5,6 +5,7 @@ import (
"fmt"
"io"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
@ -34,6 +35,12 @@ type Job struct {
status Status
end time.Time
closeIO bool
// When closed, the job has been cancelled.
// Note: not all jobs implement cancellation.
// See Job.Cancel() and Job.WaitCancelled()
cancelled chan struct{}
cancelOnce sync.Once
}
type Status int
@ -248,3 +255,15 @@ func (job *Job) StatusCode() int {
func (job *Job) SetCloseIO(val bool) {
job.closeIO = val
}
// When called, causes the Job.WaitCancelled channel to unblock.
func (job *Job) Cancel() {
job.cancelOnce.Do(func() {
close(job.cancelled)
})
}
// Returns a channel which is closed ("never blocks") when the job is cancelled.
func (job *Job) WaitCancelled() <-chan struct{} {
return job.cancelled
}

View File

@ -6,8 +6,10 @@ import (
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/engine"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
"github.com/docker/libtrust"
)
@ -16,7 +18,7 @@ import (
// contains no signatures by a trusted key for the name in the manifest, the
// image is not considered verified. The parsed manifest object and a boolean
// for whether the manifest is verified is returned.
func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte) (*registry.ManifestData, bool, error) {
func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) {
sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures")
if err != nil {
return nil, false, fmt.Errorf("error parsing payload: %s", err)
@ -32,6 +34,31 @@ func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte) (*regi
return nil, false, fmt.Errorf("error retrieving payload: %s", err)
}
var manifestDigest digest.Digest
if dgst != "" {
manifestDigest, err = digest.ParseDigest(dgst)
if err != nil {
return nil, false, fmt.Errorf("invalid manifest digest from registry: %s", err)
}
dgstVerifier, err := digest.NewDigestVerifier(manifestDigest)
if err != nil {
return nil, false, fmt.Errorf("unable to verify manifest digest from registry: %s", err)
}
dgstVerifier.Write(payload)
if !dgstVerifier.Verified() {
computedDigest, _ := digest.FromBytes(payload)
return nil, false, fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", manifestDigest, computedDigest)
}
}
if utils.DigestReference(ref) && ref != manifestDigest.String() {
return nil, false, fmt.Errorf("mismatching image manifest digest: got %q, expected %q", manifestDigest, ref)
}
var manifest registry.ManifestData
if err := json.Unmarshal(payload, &manifest); err != nil {
return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err)

View File

@ -74,7 +74,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
logName = utils.ImageReference(logName, tag)
}
if len(repoInfo.Index.Mirrors) == 0 && ((repoInfo.Official && repoInfo.Index.Official) || endpoint.Version == registry.APIVersion2) {
if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) {
if repoInfo.Official {
j := job.Eng.Job("trust_update_base")
if err = j.Run(); err != nil {
@ -430,12 +430,15 @@ func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) {
log.Debugf("Pulling tag from V2 registry: %q", tag)
manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
if err != nil {
return false, err
}
manifest, verified, err := s.loadManifest(eng, manifestBytes)
// loadManifest ensures that the manifest payload has the expected digest
// if the tag is a digest reference.
manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag)
if err != nil {
return false, fmt.Errorf("error verifying manifest: %s", err)
}
@ -605,7 +608,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri
out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
}
if len(manifestDigest) > 0 {
if manifestDigest != "" {
out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest))
}

View File

@ -1,7 +1,6 @@
package graph
import (
"bytes"
"crypto/sha256"
"encoding/json"
"errors"
@ -432,14 +431,12 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o
log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID())
// push the manifest
digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, bytes.NewReader(signedBody), auth)
digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, signedBody, mBytes, auth)
if err != nil {
return err
}
if len(digest) > 0 {
out.Write(sf.FormatStatus("", "Digest: %s", digest))
}
out.Write(sf.FormatStatus("", "Digest: %s", digest))
}
return nil
}
@ -542,7 +539,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
return job.Errorf("Repository does not exist: %s", repoInfo.LocalName)
}
if endpoint.Version == registry.APIVersion2 {
if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 {
err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf)
if err == nil {
return engine.StatusOK

View File

@ -53,7 +53,7 @@ clone hg code.google.com/p/gosqlite 74691fb6f837
clone git github.com/docker/libtrust 230dfd18c232
clone git github.com/Sirupsen/logrus v0.6.6
clone git github.com/Sirupsen/logrus v0.7.1
clone git github.com/go-fsnotify/fsnotify v1.0.4
@ -75,7 +75,7 @@ rm -rf src/github.com/docker/distribution
mkdir -p src/github.com/docker/distribution
mv tmp-digest src/github.com/docker/distribution/digest
clone git github.com/docker/libcontainer 4a72e540feb67091156b907c4700e580a99f5a9d
clone git github.com/docker/libcontainer fd0087d3acdc4c5865de1829d4accee5e3ebb658
# see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file)
rm -rf src/github.com/docker/libcontainer/vendor
eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli' | grep -v 'github.com/Sirupsen/logrus')"

View File

@ -516,3 +516,40 @@ func TestBuildApiDockerfileSymlink(t *testing.T) {
logDone("container REST API - check build w/bad Dockerfile symlink path")
}
// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume
func TestPostContainerBindNormalVolume(t *testing.T) {
defer deleteAllContainers()
out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=one", "busybox"))
if err != nil {
t.Fatal(err, out)
}
fooDir, err := inspectFieldMap("one", "Volumes", "/foo")
if err != nil {
t.Fatal(err)
}
out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=two", "busybox"))
if err != nil {
t.Fatal(err, out)
}
bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}}
_, err = sockRequest("POST", "/containers/two/start", bindSpec)
if err != nil && !strings.Contains(err.Error(), "204 No Content") {
t.Fatal(err)
}
fooDir2, err := inspectFieldMap("two", "Volumes", "/foo")
if err != nil {
t.Fatal(err)
}
if fooDir2 != fooDir {
t.Fatal("expected volume path to be %s, got: %s", fooDir, fooDir2)
}
logDone("container REST API - can use path from normal volume as bind-mount to overwrite another volume")
}

View File

@ -2,6 +2,7 @@ package main
import (
"archive/tar"
"bufio"
"bytes"
"encoding/json"
"fmt"
@ -14,6 +15,7 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"testing"
"text/template"
"time"
@ -239,9 +241,18 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) {
_, err := buildImage(name,
`
FROM scratch
ENV foo foo
FROM busybox
ENV foo zzz
ENV bar ${foo}
ENV abc1='$foo'
ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}"
RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo)
ENV abc2="\$foo"
RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo)
ENV abc3 '$foo'
RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo)
ENV abc4 "\$foo"
RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo)
`, true)
if err != nil {
@ -260,13 +271,19 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) {
}
found := false
envCount := 0
for _, env := range envResult {
parts := strings.SplitN(env, "=", 2)
if parts[0] == "bar" {
found = true
if parts[1] != "foo" {
t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1])
if parts[1] != "zzz" {
t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "zzz" {
t.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
}
}
}
@ -275,6 +292,10 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) {
t.Fatal("Never found the `bar` env variable")
}
if envCount != 4 {
t.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult)
}
logDone("build - env environment replacement")
}
@ -361,8 +382,8 @@ func TestBuildHandleEscapes(t *testing.T) {
t.Fatal(err)
}
if _, ok := result[`\\\\\\${FOO}`]; !ok {
t.Fatal(`Could not find volume \\\\\\${FOO} set from env foo in volumes table`)
if _, ok := result[`\\\${FOO}`]; !ok {
t.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result)
}
logDone("build - handle escapes")
@ -1924,6 +1945,132 @@ func TestBuildForceRm(t *testing.T) {
logDone("build - ensure --force-rm doesn't leave containers behind")
}
// Test that an infinite sleep during a build is killed if the client disconnects.
// This test is fairly hairy because there are lots of ways to race.
// Strategy:
// * Monitor the output of docker events starting from before
// * Run a 1-year-long sleep from a docker build.
// * When docker events sees container start, close the "docker build" command
// * Wait for docker events to emit a dying event.
func TestBuildCancelationKillsSleep(t *testing.T) {
// TODO(jfrazelle): Make this work on Windows.
testRequires(t, SameHostDaemon)
name := "testbuildcancelation"
defer deleteImages(name)
// (Note: one year, will never finish)
ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil)
if err != nil {
t.Fatal(err)
}
defer ctx.Close()
var wg sync.WaitGroup
defer wg.Wait()
finish := make(chan struct{})
defer close(finish)
eventStart := make(chan struct{})
eventDie := make(chan struct{})
// Start one second ago, to avoid rounding problems
startEpoch := time.Now().Add(-1 * time.Second)
// Goroutine responsible for watching start/die events from `docker events`
wg.Add(1)
go func() {
defer wg.Done()
// Watch for events since epoch.
eventsCmd := exec.Command(dockerBinary, "events",
"-since", fmt.Sprint(startEpoch.Unix()))
stdout, err := eventsCmd.StdoutPipe()
err = eventsCmd.Start()
if err != nil {
t.Fatalf("failed to start 'docker events': %s", err)
}
go func() {
<-finish
eventsCmd.Process.Kill()
}()
var started, died bool
matchStart := regexp.MustCompile(" \\(from busybox\\:latest\\) start$")
matchDie := regexp.MustCompile(" \\(from busybox\\:latest\\) die$")
//
// Read lines of `docker events` looking for container start and stop.
//
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
if ok := matchStart.MatchString(scanner.Text()); ok {
if started {
t.Fatal("assertion fail: more than one container started")
}
close(eventStart)
started = true
}
if ok := matchDie.MatchString(scanner.Text()); ok {
if died {
t.Fatal("assertion fail: more than one container died")
}
close(eventDie)
died = true
}
}
err = eventsCmd.Wait()
if err != nil && !IsKilled(err) {
t.Fatalf("docker events had bad exit status: %s", err)
}
}()
buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".")
buildCmd.Dir = ctx.Dir
buildCmd.Stdout = os.Stdout
err = buildCmd.Start()
if err != nil {
t.Fatalf("failed to run build: %s", err)
}
select {
case <-time.After(30 * time.Second):
t.Fatal("failed to observe build container start in timely fashion")
case <-eventStart:
// Proceeds from here when we see the container fly past in the
// output of "docker events".
// Now we know the container is running.
}
// Send a kill to the `docker build` command.
// Causes the underlying build to be cancelled due to socket close.
err = buildCmd.Process.Kill()
if err != nil {
t.Fatalf("error killing build command: %s", err)
}
// Get the exit status of `docker build`, check it exited because killed.
err = buildCmd.Wait()
if err != nil && !IsKilled(err) {
t.Fatalf("wait failed during build run: %T %s", err, err)
}
select {
case <-time.After(30 * time.Second):
// If we don't get here in a timely fashion, it wasn't killed.
t.Fatal("container cancel did not succeed")
case <-eventDie:
// We saw the container shut down in the `docker events` stream,
// as expected.
}
logDone("build - ensure canceled job finishes immediately")
}
func TestBuildRm(t *testing.T) {
name := "testbuildrm"
defer deleteImages(name)
@ -2128,7 +2275,7 @@ func TestBuildRelativeWorkdir(t *testing.T) {
func TestBuildWorkdirWithEnvVariables(t *testing.T) {
name := "testbuildworkdirwithenvvariables"
expected := "/test1/test2/$MISSING_VAR"
expected := "/test1/test2"
defer deleteImages(name)
_, err := buildImage(name,
`FROM busybox
@ -3357,7 +3504,7 @@ func TestBuildFailsDockerfileEmpty(t *testing.T) {
defer deleteImages(name)
_, err := buildImage(name, ``, true)
if err != nil {
if !strings.Contains(err.Error(), "Dockerfile cannot be empty") {
if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") {
t.Fatalf("Wrong error %v, must be about empty Dockerfile", err)
}
} else {
@ -3897,9 +4044,9 @@ ENV abc=zzz TO=/docker/world/hello
ADD $FROM $TO
RUN [ "$(cat $TO)" = "hello" ]
ENV abc "zzz"
RUN [ $abc = \"zzz\" ]
RUN [ $abc = "zzz" ]
ENV abc 'yyy'
RUN [ $abc = \'yyy\' ]
RUN [ $abc = 'yyy' ]
ENV abc=
RUN [ "$abc" = "" ]
@ -3915,13 +4062,34 @@ RUN [ "$abc" = "'foo'" ]
ENV abc=\"foo\"
RUN [ "$abc" = "\"foo\"" ]
ENV abc "foo"
RUN [ "$abc" = "\"foo\"" ]
RUN [ "$abc" = "foo" ]
ENV abc 'foo'
RUN [ "$abc" = "'foo'" ]
RUN [ "$abc" = 'foo' ]
ENV abc \'foo\'
RUN [ "$abc" = "\\'foo\\'" ]
RUN [ "$abc" = "'foo'" ]
ENV abc \"foo\"
RUN [ "$abc" = "\\\"foo\\\"" ]
RUN [ "$abc" = '"foo"' ]
ENV e1=bar
ENV e2=$e1
ENV e3=$e11
ENV e4=\$e1
ENV e5=\$e11
RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ]
ENV ee1 bar
ENV ee2 $ee1
ENV ee3 $ee11
ENV ee4 \$ee1
ENV ee5 \$ee11
RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ]
ENV eee1="foo"
ENV eee2='foo'
ENV eee3 "foo"
ENV eee4 'foo'
RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ]
`
ctx, err := fakeContext(dockerfile, map[string]string{
"hello/docker/world": "hello",
@ -4585,14 +4753,29 @@ func TestBuildLabelsCache(t *testing.T) {
`FROM busybox
LABEL Vendor=Acme1`, true)
if err != nil || id1 == id2 {
t.Fatalf("Build 2 should have worked & NOT used cache(%s,%s): %v", id1, id2, err)
t.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err)
}
id2, err = buildImage(name,
`FROM busybox
LABEL Vendor Acme`, true) // Note: " " and "=" should be same
if err != nil || id1 != id2 {
t.Fatalf("Build 3 should have worked & used cache(%s,%s): %v", id1, id2, err)
t.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err)
}
// Now make sure the cache isn't used by mistake
id1, err = buildImage(name,
`FROM busybox
LABEL f1=b1 f2=b2`, false)
if err != nil {
t.Fatalf("Build 5 should have worked: %q", err)
}
id2, err = buildImage(name,
`FROM busybox
LABEL f1="b1 f2=b2"`, true)
if err != nil || id1 == id2 {
t.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err)
}
logDone("build - label cache")
@ -4608,8 +4791,19 @@ func TestBuildStderr(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if stderr != "" {
t.Fatalf("Stderr should have been empty, instead its: %q", stderr)
if runtime.GOOS == "windows" {
// stderr might contain a security warning on windows
lines := strings.Split(stderr, "\n")
for _, v := range lines {
if v != "" && !strings.Contains(v, "SECURITY WARNING:") {
t.Fatalf("Stderr contains unexpected output line: %q", v)
}
}
} else {
if stderr != "" {
t.Fatalf("Stderr should have been empty, instead its: %q", stderr)
}
}
logDone("build - testing stderr")
}
@ -5098,9 +5292,13 @@ func TestBuildSpaces(t *testing.T) {
t.Fatal("Build 2 was supposed to fail, but didn't")
}
removeLogTimestamps := func(s string) string {
return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`)
}
// Skip over the times
e1 := err1.Error()[strings.Index(err1.Error(), `level=`):]
e2 := err2.Error()[strings.Index(err1.Error(), `level=`):]
e1 := removeLogTimestamps(err1.Error())
e2 := removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
@ -5113,8 +5311,8 @@ func TestBuildSpaces(t *testing.T) {
}
// Skip over the times
e1 = err1.Error()[strings.Index(err1.Error(), `level=`):]
e2 = err2.Error()[strings.Index(err1.Error(), `level=`):]
e1 = removeLogTimestamps(err1.Error())
e2 = removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
@ -5127,8 +5325,8 @@ func TestBuildSpaces(t *testing.T) {
}
// Skip over the times
e1 = err1.Error()[strings.Index(err1.Error(), `level=`):]
e2 = err2.Error()[strings.Index(err1.Error(), `level=`):]
e1 = removeLogTimestamps(err1.Error())
e2 = removeLogTimestamps(err2.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
@ -5289,7 +5487,7 @@ func TestBuildRUNoneJSON(t *testing.T) {
name := "testbuildrunonejson"
defer deleteAllContainers()
defer deleteImages(name)
defer deleteImages(name, "hello-world")
ctx, err := fakeContext(`FROM hello-world:frozen
RUN [ "/hello" ]`, map[string]string{})
@ -5315,7 +5513,7 @@ RUN [ "/hello" ]`, map[string]string{})
func TestBuildResourceConstraintsAreUsed(t *testing.T) {
name := "testbuildresourceconstraints"
defer deleteAllContainers()
defer deleteImages(name)
defer deleteImages(name, "hello-world")
ctx, err := fakeContext(`
FROM hello-world:frozen
@ -5382,3 +5580,19 @@ func TestBuildResourceConstraintsAreUsed(t *testing.T) {
logDone("build - resource constraints applied")
}
func TestBuildEmptyStringVolume(t *testing.T) {
name := "testbuildemptystringvolume"
defer deleteImages(name)
_, err := buildImage(name, `
FROM busybox
ENV foo=""
VOLUME $foo
`, false)
if err == nil {
t.Fatal("Should have failed to build")
}
logDone("build - empty string volume")
}

View File

@ -800,3 +800,31 @@ func TestDaemonDots(t *testing.T) {
logDone("daemon - test dots on INFO")
}
func TestDaemonUnixSockCleanedUp(t *testing.T) {
d := NewDaemon(t)
dir, err := ioutil.TempDir("", "socket-cleanup-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
sockPath := filepath.Join(dir, "docker.sock")
if err := d.Start("--host", "unix://"+sockPath); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(sockPath); err != nil {
t.Fatal("socket does not exist")
}
if err := d.Stop(); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) {
t.Fatal("unix socket is not cleaned up")
}
logDone("daemon - unix socket is cleaned up")
}

View File

@ -106,6 +106,8 @@ func TestPullNonExistingImage(t *testing.T) {
// pulling an image from the central registry using official names should work
// ensure all pulls result in the same image
func TestPullImageOfficialNames(t *testing.T) {
testRequires(t, Network)
names := []string{
"docker.io/hello-world",
"index.docker.io/hello-world",

View File

@ -20,7 +20,7 @@ import (
"time"
"github.com/docker/docker/nat"
"github.com/docker/docker/pkg/networkfs/resolvconf"
"github.com/docker/docker/pkg/resolvconf"
)
// "test123" should be printed by docker run
@ -412,6 +412,31 @@ func TestRunLinkToContainerNetMode(t *testing.T) {
logDone("run - link to a container which net mode is container success")
}
func TestRunModeNetContainerHostname(t *testing.T) {
defer deleteAllContainers()
cmd := exec.Command(dockerBinary, "run", "-i", "-d", "--name", "parent", "busybox", "top")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
t.Fatalf("failed to run container: %v, output: %q", err, out)
}
cmd = exec.Command(dockerBinary, "exec", "parent", "cat", "/etc/hostname")
out, _, err = runCommandWithOutput(cmd)
if err != nil {
t.Fatalf("failed to exec command: %v, output: %q", err, out)
}
cmd = exec.Command(dockerBinary, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname")
out1, _, err := runCommandWithOutput(cmd)
if err != nil {
t.Fatalf("failed to run container: %v, output: %q", err, out1)
}
if out1 != out {
t.Fatal("containers with shared net namespace should have same hostname")
}
logDone("run - containers with shared net namespace have same hostname")
}
// Regression test for #4741
func TestRunWithVolumesAsFiles(t *testing.T) {
defer deleteAllContainers()
@ -3323,3 +3348,46 @@ func TestRunVolumesFromRestartAfterRemoved(t *testing.T) {
logDone("run - can restart a volumes-from container after producer is removed")
}
// run container with --rm should remove container if exit code != 0
func TestRunContainerWithRmFlagExitCodeNotEqualToZero(t *testing.T) {
defer deleteAllContainers()
runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "ls", "/notexists")
out, _, err := runCommandWithOutput(runCmd)
if err == nil {
t.Fatal("Expected docker run to fail", out, err)
}
out, err = getAllContainers()
if err != nil {
t.Fatal(out, err)
}
if out != "" {
t.Fatal("Expected not to have containers", out)
}
logDone("run - container is removed if run with --rm and exit code != 0")
}
func TestRunContainerWithRmFlagCannotStartContainer(t *testing.T) {
defer deleteAllContainers()
runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "commandNotFound")
out, _, err := runCommandWithOutput(runCmd)
if err == nil {
t.Fatal("Expected docker run to fail", out, err)
}
out, err = getAllContainers()
if err != nil {
t.Fatal(out, err)
}
if out != "" {
t.Fatal("Expected not to have containers", out)
}
logDone("run - container is removed if run with --rm and cannot start")
}

View File

@ -109,19 +109,6 @@ func TestRunWithUlimits(t *testing.T) {
logDone("run - ulimits are set")
}
func getCgroupPaths(test string) map[string]string {
cgroupPaths := map[string]string{}
for _, line := range strings.Split(test, "\n") {
parts := strings.Split(line, ":")
if len(parts) != 3 {
fmt.Printf("unexpected file format for /proc/self/cgroup - %q\n", line)
continue
}
cgroupPaths[parts[1]] = parts[2]
}
return cgroupPaths
}
func TestRunContainerWithCgroupParent(t *testing.T) {
testRequires(t, NativeExecDriver)
defer deleteAllContainers()
@ -131,7 +118,7 @@ func TestRunContainerWithCgroupParent(t *testing.T) {
if err != nil {
t.Fatalf("failed to read '/proc/self/cgroup - %v", err)
}
selfCgroupPaths := getCgroupPaths(string(data))
selfCgroupPaths := parseCgroupPaths(string(data))
selfCpuCgroup, found := selfCgroupPaths["memory"]
if !found {
t.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths)
@ -141,7 +128,7 @@ func TestRunContainerWithCgroupParent(t *testing.T) {
if err != nil {
t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
cgroupPaths := getCgroupPaths(string(out))
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
t.Fatalf("unexpected output - %q", string(out))
}
@ -169,7 +156,7 @@ func TestRunContainerWithCgroupParentAbsPath(t *testing.T) {
if err != nil {
t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
cgroupPaths := getCgroupPaths(string(out))
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
t.Fatalf("unexpected output - %q", string(out))
}

View File

@ -240,3 +240,49 @@ func TestStartMultipleContainers(t *testing.T) {
logDone("start - start multiple containers continue on one failed")
}
func TestStartAttachMultipleContainers(t *testing.T) {
var cmd *exec.Cmd
defer deleteAllContainers()
// run multiple containers to test
for _, container := range []string{"test1", "test2", "test3"} {
cmd = exec.Command(dockerBinary, "run", "-d", "--name", container, "busybox", "top")
if out, _, err := runCommandWithOutput(cmd); err != nil {
t.Fatal(out, err)
}
}
// stop all the containers
for _, container := range []string{"test1", "test2", "test3"} {
cmd = exec.Command(dockerBinary, "stop", container)
if out, _, err := runCommandWithOutput(cmd); err != nil {
t.Fatal(out, err)
}
}
// test start and attach multiple containers at once, expected error
for _, option := range []string{"-a", "-i", "-ai"} {
cmd = exec.Command(dockerBinary, "start", option, "test1", "test2", "test3")
out, _, err := runCommandWithOutput(cmd)
if !strings.Contains(out, "You cannot start and attach multiple containers at once.") || err == nil {
t.Fatal("Expected error but got none")
}
}
// confirm the state of all the containers be stopped
for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} {
cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
t.Fatal(out, err)
}
out = strings.Trim(out, "\r\n")
if out != expected {
t.Fatal("Container running state wrong")
}
}
logDone("start - error on start and attach multiple containers at once")
}

View File

@ -395,9 +395,8 @@ func getSliceOfPausedContainers() ([]string, error) {
if err == nil {
slice := strings.Split(strings.TrimSpace(out), "\n")
return slice, err
} else {
return []string{out}, err
}
return []string{out}, err
}
func unpauseContainer(container string) error {

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"log"
"net/http"
"os/exec"
"strings"
"testing"
@ -32,6 +33,16 @@ var (
func() bool { return supportsExec },
"Test requires 'docker exec' capabilities on the tested daemon.",
}
Network = TestRequirement{
func() bool {
resp, err := http.Get("http://hub.docker.com")
if resp != nil {
resp.Body.Close()
}
return err == nil
},
"Test requires network availability, environment variable set to none to run in a non-network enabled mode.",
}
RegistryHosting = TestRequirement{
func() bool {
// for now registry binary is built only if we're running inside

View File

@ -6,6 +6,6 @@ const (
// identifies if test suite is running on a unix platform
isUnixCli = false
// this is the expected file permission set on windows: gh#11047
expectedFileChmod = "-rwx------"
// this is the expected file permission set on windows: gh#11395
expectedFileChmod = "-rwxr-xr-x"
)

View File

@ -42,6 +42,18 @@ func processExitCode(err error) (exitCode int) {
return
}
func IsKilled(err error) bool {
if exitErr, ok := err.(*exec.ExitError); ok {
sys := exitErr.ProcessState.Sys()
status, ok := sys.(syscall.WaitStatus)
if !ok {
return false
}
return status.Signaled() && status.Signal() == os.Kill
}
return false
}
func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) {
exitCode = 0
out, err := cmd.CombinedOutput()
@ -328,3 +340,17 @@ func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s
}
}
}
// Parses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns
// a map which cgroup name as key and path as value.
func parseCgroupPaths(procCgroupData string) map[string]string {
cgroupPaths := map[string]string{}
for _, line := range strings.Split(procCgroupData, "\n") {
parts := strings.Split(line, ":")
if len(parts) != 3 {
continue
}
cgroupPaths[parts[1]] = parts[2]
}
return cgroupPaths
}

View File

@ -192,9 +192,8 @@ func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
} else {
return val, nil
}
return val, nil
}
// Validates domain for resolvconf search configuration.

View File

@ -28,10 +28,9 @@ func CanonicalTarNameForPath(p string) (string, error) {
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
// Clear r/w on grp/others: no precise equivalen of group/others on NTFS.
perm &= 0711
perm &= 0755
// Add the x bit: make everything +x from windows
perm |= 0100
perm |= 0111
return perm
}

View File

@ -51,11 +51,11 @@ func TestChmodTarEntry(t *testing.T) {
cases := []struct {
in, expected os.FileMode
}{
{0000, 0100},
{0777, 0711},
{0644, 0700},
{0755, 0711},
{0444, 0500},
{0000, 0111},
{0777, 0755},
{0644, 0755},
{0755, 0755},
{0444, 0555},
}
for _, v := range cases {
if out := chmodTarEntry(v.in); out != v.expected {

View File

@ -220,8 +220,8 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
oldStat.Gid() != newStat.Gid() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Size() != newStat.Size() && oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR) ||
!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) ||
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) ||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
change := Change{
Path: newChild.path(),

View File

@ -218,7 +218,6 @@ func TestChangesDirsMutated(t *testing.T) {
expectedChanges := []Change{
{"/dir1", ChangeDelete},
{"/dir2", ChangeModify},
{"/dir3", ChangeModify},
{"/dirnew", ChangeAdd},
{"/file1", ChangeDelete},
{"/file2", ChangeModify},

View File

@ -2,8 +2,11 @@ package ioutils
import (
"bytes"
"crypto/rand"
"io"
"math/big"
"sync"
"time"
)
type readCloserWrapper struct {
@ -42,20 +45,40 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
}
}
// bufReader allows the underlying reader to continue to produce
// output by pre-emptively reading from the wrapped reader.
// This is achieved by buffering this data in bufReader's
// expanding buffer.
type bufReader struct {
sync.Mutex
buf *bytes.Buffer
reader io.Reader
err error
wait sync.Cond
drainBuf []byte
buf *bytes.Buffer
reader io.Reader
err error
wait sync.Cond
drainBuf []byte
reuseBuf []byte
maxReuse int64
resetTimeout time.Duration
bufLenResetThreshold int64
maxReadDataReset int64
}
func NewBufReader(r io.Reader) *bufReader {
var timeout int
if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil {
timeout = int(randVal.Int64()) + 180
} else {
timeout = 300
}
reader := &bufReader{
buf: &bytes.Buffer{},
drainBuf: make([]byte, 1024),
reader: r,
buf: &bytes.Buffer{},
drainBuf: make([]byte, 1024),
reuseBuf: make([]byte, 4096),
maxReuse: 1000,
resetTimeout: time.Second * time.Duration(timeout),
bufLenResetThreshold: 100 * 1024,
maxReadDataReset: 10 * 1024 * 1024,
reader: r,
}
reader.wait.L = &reader.Mutex
go reader.drain()
@ -74,14 +97,94 @@ func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *
}
func (r *bufReader) drain() {
var (
duration time.Duration
lastReset time.Time
now time.Time
reset bool
bufLen int64
dataSinceReset int64
maxBufLen int64
reuseBufLen int64
reuseCount int64
)
reuseBufLen = int64(len(r.reuseBuf))
lastReset = time.Now()
for {
n, err := r.reader.Read(r.drainBuf)
dataSinceReset += int64(n)
r.Lock()
bufLen = int64(r.buf.Len())
if bufLen > maxBufLen {
maxBufLen = bufLen
}
// Avoid unbounded growth of the buffer over time.
// This has been discovered to be the only non-intrusive
// solution to the unbounded growth of the buffer.
// Alternative solutions such as compression, multiple
// buffers, channels and other similar pieces of code
// were reducing throughput, overall Docker performance
// or simply crashed Docker.
// This solution releases the buffer when specific
// conditions are met to avoid the continuous resizing
// of the buffer for long lived containers.
//
// Move data to the front of the buffer if it's
// smaller than what reuseBuf can store
if bufLen > 0 && reuseBufLen >= bufLen {
n, _ := r.buf.Read(r.reuseBuf)
r.buf.Write(r.reuseBuf[0:n])
// Take action if the buffer has been reused too many
// times and if there's data in the buffer.
// The timeout is also used as means to avoid doing
// these operations more often or less often than
// required.
// The various conditions try to detect heavy activity
// in the buffer which might be indicators of heavy
// growth of the buffer.
} else if reuseCount >= r.maxReuse && bufLen > 0 {
now = time.Now()
duration = now.Sub(lastReset)
timeoutReached := duration >= r.resetTimeout
// The timeout has been reached and the
// buffered data couldn't be moved to the front
// of the buffer, so the buffer gets reset.
if timeoutReached && bufLen > reuseBufLen {
reset = true
}
// The amount of buffered data is too high now,
// reset the buffer.
if timeoutReached && maxBufLen >= r.bufLenResetThreshold {
reset = true
}
// Reset the buffer if a certain amount of
// data has gone through the buffer since the
// last reset.
if timeoutReached && dataSinceReset >= r.maxReadDataReset {
reset = true
}
// The buffered data is moved to a fresh buffer,
// swap the old buffer with the new one and
// reset all counters.
if reset {
newbuf := &bytes.Buffer{}
newbuf.ReadFrom(r.buf)
r.buf = newbuf
lastReset = now
reset = false
dataSinceReset = 0
maxBufLen = 0
reuseCount = 0
}
}
if err != nil {
r.err = err
} else {
r.buf.Write(r.drainBuf[0:n])
}
reuseCount++
r.wait.Signal()
r.Unlock()
if err != nil {

View File

@ -32,3 +32,61 @@ func TestBufReader(t *testing.T) {
t.Error(string(output))
}
}
type repeatedReader struct {
readCount int
maxReads int
data []byte
}
func newRepeatedReader(max int, data []byte) *repeatedReader {
return &repeatedReader{0, max, data}
}
func (r *repeatedReader) Read(p []byte) (int, error) {
if r.readCount >= r.maxReads {
return 0, io.EOF
}
r.readCount++
n := copy(p, r.data)
return n, nil
}
func testWithData(data []byte, reads int) {
reader := newRepeatedReader(reads, data)
bufReader := NewBufReader(reader)
io.Copy(ioutil.Discard, bufReader)
}
func Benchmark1M10BytesReads(b *testing.B) {
reads := 1000000
readSize := int64(10)
data := make([]byte, readSize)
b.SetBytes(readSize * int64(reads))
b.ResetTimer()
for i := 0; i < b.N; i++ {
testWithData(data, reads)
}
}
func Benchmark1M1024BytesReads(b *testing.B) {
reads := 1000000
readSize := int64(1024)
data := make([]byte, readSize)
b.SetBytes(readSize * int64(reads))
b.ResetTimer()
for i := 0; i < b.N; i++ {
testWithData(data, reads)
}
}
func Benchmark10k32KBytesReads(b *testing.B) {
reads := 10000
readSize := int64(32 * 1024)
data := make([]byte, readSize)
b.SetBytes(readSize * int64(reads))
b.ResetTimer()
for i := 0; i < b.N; i++ {
testWithData(data, reads)
}
}

View File

@ -1,87 +0,0 @@
// +build windows
package term
import (
"syscall"
"unsafe"
)
const (
// Consts for Get/SetConsoleMode function
// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
ENABLE_ECHO_INPUT = 0x0004
ENABLE_INSERT_MODE = 0x0020
ENABLE_LINE_INPUT = 0x0002
ENABLE_MOUSE_INPUT = 0x0010
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_QUICK_EDIT_MODE = 0x0040
ENABLE_WINDOW_INPUT = 0x0008
// If parameter is a screen buffer handle, additional values
ENABLE_PROCESSED_OUTPUT = 0x0001
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
)
var kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
var (
setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
)
func GetConsoleMode(fileDesc uintptr) (uint32, error) {
var mode uint32
err := syscall.GetConsoleMode(syscall.Handle(fileDesc), &mode)
return mode, err
}
func SetConsoleMode(fileDesc uintptr, mode uint32) error {
r, _, err := setConsoleModeProc.Call(fileDesc, uintptr(mode), 0)
if r == 0 {
if err != nil {
return err
}
return syscall.EINVAL
}
return nil
}
// types for calling GetConsoleScreenBufferInfo
// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx
type (
SHORT int16
SMALL_RECT struct {
Left SHORT
Top SHORT
Right SHORT
Bottom SHORT
}
COORD struct {
X SHORT
Y SHORT
}
WORD uint16
CONSOLE_SCREEN_BUFFER_INFO struct {
dwSize COORD
dwCursorPosition COORD
wAttributes WORD
srWindow SMALL_RECT
dwMaximumWindowSize COORD
}
)
func GetConsoleScreenBufferInfo(fileDesc uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
var info CONSOLE_SCREEN_BUFFER_INFO
r, _, err := getConsoleScreenBufferInfoProc.Call(uintptr(fileDesc), uintptr(unsafe.Pointer(&info)), 0)
if r == 0 {
if err != nil {
return nil, err
}
return nil, syscall.EINVAL
}
return &info, nil
}

View File

@ -4,6 +4,7 @@ package term
import (
"errors"
"io"
"os"
"os/signal"
"syscall"
@ -25,6 +26,20 @@ type Winsize struct {
y uint16
}
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
return os.Stdin, os.Stdout, os.Stderr
}
func GetFdInfo(in interface{}) (uintptr, bool) {
var inFd uintptr
var isTerminalIn bool
if file, ok := in.(*os.File); ok {
inFd = file.Fd()
isTerminalIn = IsTerminal(inFd)
}
return inFd, isTerminalIn
}
func GetWinsize(fd uintptr) (*Winsize, error) {
ws := &Winsize{}
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))

View File

@ -1,11 +1,19 @@
// +build windows
package term
import (
"io"
"os"
"github.com/docker/docker/pkg/term/winconsole"
)
// State holds the console mode for the terminal.
type State struct {
mode uint32
}
// Winsize is used for window size.
type Winsize struct {
Height uint16
Width uint16
@ -13,15 +21,17 @@ type Winsize struct {
y uint16
}
// GetWinsize gets the window size of the given terminal
func GetWinsize(fd uintptr) (*Winsize, error) {
ws := &Winsize{}
var info *CONSOLE_SCREEN_BUFFER_INFO
info, err := GetConsoleScreenBufferInfo(fd)
var info *winconsole.CONSOLE_SCREEN_BUFFER_INFO
info, err := winconsole.GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil, err
}
ws.Height = uint16(info.srWindow.Right - info.srWindow.Left + 1)
ws.Width = uint16(info.srWindow.Bottom - info.srWindow.Top + 1)
ws.Width = uint16(info.Window.Right - info.Window.Left + 1)
ws.Height = uint16(info.Window.Bottom - info.Window.Top + 1)
ws.x = 0 // todo azlinux -- this is the pixel size of the Window, and not currently used by any caller
ws.y = 0
@ -29,37 +39,44 @@ func GetWinsize(fd uintptr) (*Winsize, error) {
return ws, nil
}
// SetWinsize sets the terminal connected to the given file descriptor to a
// given size.
func SetWinsize(fd uintptr, ws *Winsize) error {
return nil
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
_, e := GetConsoleMode(fd)
_, e := winconsole.GetConsoleMode(fd)
return e == nil
}
// Restore restores the terminal connected to the given file descriptor to a
// RestoreTerminal restores the terminal connected to the given file descriptor to a
// previous state.
func RestoreTerminal(fd uintptr, state *State) error {
return SetConsoleMode(fd, state.mode)
return winconsole.SetConsoleMode(fd, state.mode)
}
// SaveState saves the state of the given console
func SaveState(fd uintptr) (*State, error) {
mode, e := GetConsoleMode(fd)
mode, e := winconsole.GetConsoleMode(fd)
if e != nil {
return nil, e
}
return &State{mode}, nil
}
// DisableEcho disbales the echo for given file descriptor and returns previous state
// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings
func DisableEcho(fd uintptr, state *State) error {
state.mode &^= (ENABLE_ECHO_INPUT)
state.mode |= (ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT)
return SetConsoleMode(fd, state.mode)
state.mode &^= (winconsole.ENABLE_ECHO_INPUT)
state.mode |= (winconsole.ENABLE_PROCESSED_INPUT | winconsole.ENABLE_LINE_INPUT)
return winconsole.SetConsoleMode(fd, state.mode)
}
// SetRawTerminal puts the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func SetRawTerminal(fd uintptr) (*State, error) {
oldState, err := MakeRaw(fd)
if err != nil {
@ -79,11 +96,42 @@ func MakeRaw(fd uintptr) (*State, error) {
return nil, err
}
// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings
state.mode &^= (ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT)
err = SetConsoleMode(fd, state.mode)
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
// All three input modes, along with processed output mode, are designed to work together.
// It is best to either enable or disable all of these modes as a group.
// When all are enabled, the application is said to be in "cooked" mode, which means that most of the processing is handled for the application.
// When all are disabled, the application is in "raw" mode, which means that input is unfiltered and any processing is left to the application.
state.mode = 0
err = winconsole.SetConsoleMode(fd, state.mode)
if err != nil {
return nil, err
}
return state, nil
}
// GetFdInfo returns file descriptor and bool indicating whether the file is a terminal
func GetFdInfo(in interface{}) (uintptr, bool) {
return winconsole.GetHandleInfo(in)
}
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
var shouldEmulateANSI bool
switch {
case os.Getenv("ConEmuANSI") == "ON":
// ConEmu shell, ansi emulated by default and ConEmu does an extensively
// good emulation.
shouldEmulateANSI = false
case os.Getenv("MSYSTEM") != "":
// MSYS (mingw) cannot fully emulate well and still shows escape characters
// mostly because it's still running on cmd.exe window.
shouldEmulateANSI = true
default:
shouldEmulateANSI = true
}
if shouldEmulateANSI {
return winconsole.StdStreams()
}
return os.Stdin, os.Stdout, os.Stderr
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,232 @@
// +build windows
package winconsole
import (
"fmt"
"testing"
)
func helpsTestParseInt16OrDefault(t *testing.T, expectedValue int16, shouldFail bool, input string, defaultValue int16, format string, args ...string) {
value, err := parseInt16OrDefault(input, defaultValue)
if nil != err && !shouldFail {
t.Errorf("Unexpected error returned %v", err)
t.Errorf(format, args)
}
if nil == err && shouldFail {
t.Errorf("Should have failed as expected\n\tReturned value = %d", value)
t.Errorf(format, args)
}
if expectedValue != value {
t.Errorf("The value returned does not macth expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value)
t.Errorf(format, args)
}
}
func TestParseInt16OrDefault(t *testing.T) {
// empty string
helpsTestParseInt16OrDefault(t, 0, false, "", 0, "Empty string returns default")
helpsTestParseInt16OrDefault(t, 2, false, "", 2, "Empty string returns default")
// normal case
helpsTestParseInt16OrDefault(t, 0, false, "0", 0, "0 handled correctly")
helpsTestParseInt16OrDefault(t, 111, false, "111", 2, "Normal")
helpsTestParseInt16OrDefault(t, 111, false, "+111", 2, "+N")
helpsTestParseInt16OrDefault(t, -111, false, "-111", 2, "-N")
helpsTestParseInt16OrDefault(t, 0, false, "+0", 11, "+0")
helpsTestParseInt16OrDefault(t, 0, false, "-0", 12, "-0")
// ill formed strings
helpsTestParseInt16OrDefault(t, 0, true, "abc", 0, "Invalid string")
helpsTestParseInt16OrDefault(t, 42, true, "+= 23", 42, "Invalid string")
helpsTestParseInt16OrDefault(t, 42, true, "123.45", 42, "float like")
}
func helpsTestGetNumberOfChars(t *testing.T, expected uint32, fromCoord COORD, toCoord COORD, screenSize COORD, format string, args ...interface{}) {
actual := getNumberOfChars(fromCoord, toCoord, screenSize)
mesg := fmt.Sprintf(format, args)
assertTrue(t, expected == actual, fmt.Sprintf("%s Expected=%d, Actual=%d, Parameters = { fromCoord=%+v, toCoord=%+v, screenSize=%+v", mesg, expected, actual, fromCoord, toCoord, screenSize))
}
func TestGetNumberOfChars(t *testing.T) {
// Note: The columns and lines are 0 based
// Also that interval is "inclusive" means will have both start and end chars
// This test only tests the number opf characters being written
// all four corners
maxWindow := COORD{X: 80, Y: 50}
leftTop := COORD{X: 0, Y: 0}
rightTop := COORD{X: 79, Y: 0}
leftBottom := COORD{X: 0, Y: 49}
rightBottom := COORD{X: 79, Y: 49}
// same position
helpsTestGetNumberOfChars(t, 1, COORD{X: 1, Y: 14}, COORD{X: 1, Y: 14}, COORD{X: 80, Y: 50}, "Same position random line")
// four corners
helpsTestGetNumberOfChars(t, 1, leftTop, leftTop, maxWindow, "Same position- leftTop")
helpsTestGetNumberOfChars(t, 1, rightTop, rightTop, maxWindow, "Same position- rightTop")
helpsTestGetNumberOfChars(t, 1, leftBottom, leftBottom, maxWindow, "Same position- leftBottom")
helpsTestGetNumberOfChars(t, 1, rightBottom, rightBottom, maxWindow, "Same position- rightBottom")
// from this char to next char on same line
helpsTestGetNumberOfChars(t, 2, COORD{X: 0, Y: 0}, COORD{X: 1, Y: 0}, maxWindow, "Next position on same line")
helpsTestGetNumberOfChars(t, 2, COORD{X: 1, Y: 14}, COORD{X: 2, Y: 14}, maxWindow, "Next position on same line")
// from this char to next 10 chars on same line
helpsTestGetNumberOfChars(t, 11, COORD{X: 0, Y: 0}, COORD{X: 10, Y: 0}, maxWindow, "Next position on same line")
helpsTestGetNumberOfChars(t, 11, COORD{X: 1, Y: 14}, COORD{X: 11, Y: 14}, maxWindow, "Next position on same line")
helpsTestGetNumberOfChars(t, 5, COORD{X: 3, Y: 11}, COORD{X: 7, Y: 11}, maxWindow, "To and from on same line")
helpsTestGetNumberOfChars(t, 8, COORD{X: 0, Y: 34}, COORD{X: 7, Y: 34}, maxWindow, "Start of line to middle")
helpsTestGetNumberOfChars(t, 4, COORD{X: 76, Y: 34}, COORD{X: 79, Y: 34}, maxWindow, "Middle to end of line")
// multiple lines - 1
helpsTestGetNumberOfChars(t, 81, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 1}, maxWindow, "one line below same X")
helpsTestGetNumberOfChars(t, 81, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 11}, maxWindow, "one line below same X")
// multiple lines - 2
helpsTestGetNumberOfChars(t, 161, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 2}, maxWindow, "one line below same X")
helpsTestGetNumberOfChars(t, 161, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 12}, maxWindow, "one line below same X")
// multiple lines - 3
helpsTestGetNumberOfChars(t, 241, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 3}, maxWindow, "one line below same X")
helpsTestGetNumberOfChars(t, 241, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 13}, maxWindow, "one line below same X")
// full line
helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 0}, COORD{X: 79, Y: 0}, maxWindow, "Full line - first")
helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 23}, COORD{X: 79, Y: 23}, maxWindow, "Full line - random")
helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 49}, COORD{X: 79, Y: 49}, maxWindow, "Full line - last")
// full screen
helpsTestGetNumberOfChars(t, 80*50, leftTop, rightBottom, maxWindow, "full screen")
helpsTestGetNumberOfChars(t, 80*50-1, COORD{X: 1, Y: 0}, rightBottom, maxWindow, "dropping first char to, end of screen")
helpsTestGetNumberOfChars(t, 80*50-2, COORD{X: 2, Y: 0}, rightBottom, maxWindow, "dropping first two char to, end of screen")
helpsTestGetNumberOfChars(t, 80*50-1, leftTop, COORD{X: 78, Y: 49}, maxWindow, "from start of screen, till last char-1")
helpsTestGetNumberOfChars(t, 80*50-2, leftTop, COORD{X: 77, Y: 49}, maxWindow, "from start of screen, till last char-2")
helpsTestGetNumberOfChars(t, 80*50-5, COORD{X: 4, Y: 0}, COORD{X: 78, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-1")
helpsTestGetNumberOfChars(t, 80*50-6, COORD{X: 4, Y: 0}, COORD{X: 77, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-2")
}
var allForeground = []int16{
ANSI_FOREGROUND_BLACK,
ANSI_FOREGROUND_RED,
ANSI_FOREGROUND_GREEN,
ANSI_FOREGROUND_YELLOW,
ANSI_FOREGROUND_BLUE,
ANSI_FOREGROUND_MAGENTA,
ANSI_FOREGROUND_CYAN,
ANSI_FOREGROUND_WHITE,
ANSI_FOREGROUND_DEFAULT,
}
var allBackground = []int16{
ANSI_BACKGROUND_BLACK,
ANSI_BACKGROUND_RED,
ANSI_BACKGROUND_GREEN,
ANSI_BACKGROUND_YELLOW,
ANSI_BACKGROUND_BLUE,
ANSI_BACKGROUND_MAGENTA,
ANSI_BACKGROUND_CYAN,
ANSI_BACKGROUND_WHITE,
ANSI_BACKGROUND_DEFAULT,
}
func maskForeground(flag WORD) WORD {
return flag & FOREGROUND_MASK_UNSET
}
func onlyForeground(flag WORD) WORD {
return flag & FOREGROUND_MASK_SET
}
func maskBackground(flag WORD) WORD {
return flag & BACKGROUND_MASK_UNSET
}
func onlyBackground(flag WORD) WORD {
return flag & BACKGROUND_MASK_SET
}
func helpsTestGetWindowsTextAttributeForAnsiValue(t *testing.T, oldValue WORD /*, expected WORD*/, ansi int16, onlyMask WORD, restMask WORD) WORD {
actual, err := getWindowsTextAttributeForAnsiValue(oldValue, FOREGROUND_MASK_SET, ansi)
assertTrue(t, nil == err, "Should be no error")
// assert that other bits are not affected
if 0 != oldValue {
assertTrue(t, (actual&restMask) == (oldValue&restMask), "The operation should not have affected other bits actual=%X oldValue=%X ansi=%d", actual, oldValue, ansi)
}
return actual
}
func TestBackgroundForAnsiValue(t *testing.T) {
// Check that nothing else changes
// background changes
for _, state1 := range allBackground {
for _, state2 := range allBackground {
flag := WORD(0)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET)
}
}
// cummulative bcakground changes
for _, state1 := range allBackground {
flag := WORD(0)
for _, state2 := range allBackground {
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET)
}
}
// change background after foreground
for _, state1 := range allForeground {
for _, state2 := range allBackground {
flag := WORD(0)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET)
}
}
// change background after change cumulative
for _, state1 := range allForeground {
flag := WORD(0)
for _, state2 := range allBackground {
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET)
}
}
}
func TestForegroundForAnsiValue(t *testing.T) {
// Check that nothing else changes
for _, state1 := range allForeground {
for _, state2 := range allForeground {
flag := WORD(0)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET)
}
}
for _, state1 := range allForeground {
flag := WORD(0)
for _, state2 := range allForeground {
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET)
}
}
for _, state1 := range allBackground {
for _, state2 := range allForeground {
flag := WORD(0)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET)
}
}
for _, state1 := range allBackground {
flag := WORD(0)
for _, state2 := range allForeground {
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET)
flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET)
}
}
}

View File

@ -0,0 +1,218 @@
package winconsole
import (
"io"
"strconv"
"strings"
)
// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
const (
ANSI_ESCAPE_PRIMARY = 0x1B
ANSI_ESCAPE_SECONDARY = 0x5B
ANSI_COMMAND_FIRST = 0x40
ANSI_COMMAND_LAST = 0x7E
ANSI_PARAMETER_SEP = ";"
ANSI_CMD_G0 = '('
ANSI_CMD_G1 = ')'
ANSI_CMD_G2 = '*'
ANSI_CMD_G3 = '+'
ANSI_CMD_DECPNM = '>'
ANSI_CMD_DECPAM = '='
ANSI_CMD_OSC = ']'
ANSI_CMD_STR_TERM = '\\'
ANSI_BEL = 0x07
KEY_EVENT = 1
)
// Interface that implements terminal handling
type terminalEmulator interface {
HandleOutputCommand(fd uintptr, command []byte) (n int, err error)
HandleInputSequence(fd uintptr, command []byte) (n int, err error)
WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error)
ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error)
}
type terminalWriter struct {
wrappedWriter io.Writer
emulator terminalEmulator
command []byte
inSequence bool
fd uintptr
}
type terminalReader struct {
wrappedReader io.ReadCloser
emulator terminalEmulator
command []byte
inSequence bool
fd uintptr
}
// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
func isAnsiCommandChar(b byte) bool {
switch {
case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY:
return true
case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM:
// non-CSI escape sequence terminator
return true
case b == ANSI_CMD_STR_TERM || b == ANSI_BEL:
// String escape sequence terminator
return true
}
return false
}
func isCharacterSelectionCmdChar(b byte) bool {
return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3)
}
func isXtermOscSequence(command []byte, current byte) bool {
return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL)
}
// Write writes len(p) bytes from p to the underlying data stream.
// http://golang.org/pkg/io/#Writer
func (tw *terminalWriter) Write(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if tw.emulator == nil {
return tw.wrappedWriter.Write(p)
}
// Emulate terminal by extracting commands and executing them
totalWritten := 0
start := 0 // indicates start of the next chunk
end := len(p)
for current := 0; current < end; current++ {
if tw.inSequence {
// inside escape sequence
tw.command = append(tw.command, p[current])
if isAnsiCommandChar(p[current]) {
if !isXtermOscSequence(tw.command, p[current]) {
// found the last command character.
// Now we have a complete command.
nchar, err := tw.emulator.HandleOutputCommand(tw.fd, tw.command)
totalWritten += nchar
if err != nil {
return totalWritten, err
}
// clear the command
// don't include current character again
tw.command = tw.command[:0]
start = current + 1
tw.inSequence = false
}
}
} else {
if p[current] == ANSI_ESCAPE_PRIMARY {
// entering escape sequnce
tw.inSequence = true
// indicates end of "normal sequence", write whatever you have so far
if len(p[start:current]) > 0 {
nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:current])
totalWritten += nw
if err != nil {
return totalWritten, err
}
}
// include the current character as part of the next sequence
tw.command = append(tw.command, p[current])
}
}
}
// note that so far, start of the escape sequence triggers writing out of bytes to console.
// For the part _after_ the end of last escape sequence, it is not written out yet. So write it out
if !tw.inSequence {
// assumption is that we can't be inside sequence and therefore command should be empty
if len(p[start:]) > 0 {
nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:])
totalWritten += nw
if err != nil {
return totalWritten, err
}
}
}
return totalWritten, nil
}
// Read reads up to len(p) bytes into p.
// http://golang.org/pkg/io/#Reader
func (tr *terminalReader) Read(p []byte) (n int, err error) {
//Implementations of Read are discouraged from returning a zero byte count
// with a nil error, except when len(p) == 0.
if len(p) == 0 {
return 0, nil
}
if nil == tr.emulator {
return tr.readFromWrappedReader(p)
}
return tr.emulator.ReadChars(tr.fd, tr.wrappedReader, p)
}
// Close the underlying stream
func (tr *terminalReader) Close() (err error) {
return tr.wrappedReader.Close()
}
func (tr *terminalReader) readFromWrappedReader(p []byte) (n int, err error) {
return tr.wrappedReader.Read(p)
}
type ansiCommand struct {
CommandBytes []byte
Command string
Parameters []string
IsSpecial bool
}
func parseAnsiCommand(command []byte) *ansiCommand {
if isCharacterSelectionCmdChar(command[1]) {
// Is Character Set Selection commands
return &ansiCommand{
CommandBytes: command,
Command: string(command),
IsSpecial: true,
}
}
// last char is command character
lastCharIndex := len(command) - 1
retValue := &ansiCommand{
CommandBytes: command,
Command: string(command[lastCharIndex]),
IsSpecial: false,
}
// more than a single escape
if lastCharIndex != 0 {
start := 1
// skip if double char escape sequence
if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY {
start++
}
// convert this to GetNextParam method
retValue.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP)
}
return retValue
}
func (c *ansiCommand) getParam(index int) string {
if len(c.Parameters) > index {
return c.Parameters[index]
}
return ""
}
func parseInt16OrDefault(s string, defaultValue int16) (n int16, err error) {
if s == "" {
return defaultValue, nil
}
parsedValue, err := strconv.ParseInt(s, 10, 16)
if err != nil {
return defaultValue, err
}
return int16(parsedValue), nil
}

Some files were not shown because too many files have changed in this diff Show More