diff --git a/CHANGELOG.md b/CHANGELOG.md index cfbd86cd75..968c6696f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,105 @@ # Changelog +## 0.6.1 (2013-08-23) +* Registry: Pass "meta" headers in API calls to the registry +- Packaging: Use correct upstart script with new build tool +- Packaging: Use libffi-dev, don't build it from sources +- Packaging: Removed duplicate mercurial install command + +## 0.6.0 (2013-08-22) +- Runtime: Load authConfig only when needed and fix useless WARNING ++ Runtime: Add lxc-conf flag to allow custom lxc options +- Runtime: Fix race conditions in parallel pull +- Runtime: Improve CMD, ENTRYPOINT, and attach docs. +* Documentation: Small fix to docs regarding adding docker groups +* Documentation: Add MongoDB image example ++ Builder: Add USER instruction do Dockerfile +* Documentation: updated default -H docs +* Remote API: Sort Images by most recent creation date. ++ Builder: Add workdir support for the Buildfile ++ Runtime: Add an option to set the working directory +- Runtime: Show tag used when image is missing +* Documentation: Update readme with dependencies for building +* Documentation: Add instructions for creating and using the docker group +* Remote API: Reworking opaque requests in registry module +- Runtime: Fix Graph ByParent() to generate list of child images per parent image. +* Runtime: Add Image name to LogEvent tests +* Documentation: Add sudo to examples and installation to documentation ++ Hack: Bash Completion: Limit commands to containers of a relevant state +* Remote API: Add image name in /events +* Runtime: Apply volumes-from before creating volumes +- Runtime: Make docker run handle SIGINT/SIGTERM +- Runtime: Prevent crash when .dockercfg not readable +* Hack: Add docker dependencies coverage testing into docker-ci ++ Runtime: Add -privileged flag and relevant tests, docs, and examples ++ Packaging: Docker-brew 0.5.2 support and memory footprint reduction +- Runtime: Install script should be fetched over https, not http. +* Packaging: Add new docker dependencies into docker-ci +* Runtime: Use Go 1.1.2 for dockerbuilder +* Registry: Improve auth push +* Runtime: API, issue 1471: Use groups for socket permissions +* Documentation: PostgreSQL service example in documentation +* Contrib: bash completion script +* Tests: Improve TestKillDifferentUser to prevent timeout on buildbot +* Documentation: Fix typo in docs for docker run -dns +* Documentation: Adding a reference to ps -a +- Runtime: Correctly detect IPv4 forwarding +- Packaging: Revert "docker.upstart: avoid spawning a `sh` process" +* Runtime: Use ranged for loop on channels +- Runtime: Fix typo: fmt.Sprint -> fmt.Sprintf +- Tests: Fix typo in TestBindMounts (runContainer called without image) +* Runtime: add websocket support to /container//attach/ws +* Runtime: Mount /dev/shm as a tmpfs +- Builder: Only count known instructions as build steps +- Builder: Fix docker build and docker events output +- Runtime: switch from http to https for get.docker.io +* Tests: Improve TestGetContainersTop so it does not rely on sleep ++ Packaging: Docker-brew and Docker standard library +* Testing: Add some tests in server and utils ++ Packaging: Release docker with docker +- Builder: Make sure ENV instruction within build perform a commit each time +* Packaging: Fix the upstart script generated by get.docker.io +- Runtime: fix small \n error un docker build +* Runtime: Let userland proxy handle container-bound traffic +* Runtime: Updated the Docker CLI to specify a value for the "Host" header. +* Runtime: Add warning when net.ipv4.ip_forwarding = 0 +* Registry: Registry unit tests + mock registry +* Runtime: fixed #910. print user name to docker info output +- Builder: Forbid certain paths within docker build ADD +- Runtime: change network range to avoid conflict with EC2 DNS +* Tests: Relax the lo interface test to allow iface index != 1 +* Documentation: Suggest installing linux-headers by default. +* Documentation: Change the twitter handle +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Remote API: Use mime pkg to parse Content-Type +- Runtime: Reduce connect and read timeout when pinging the registry +* Documentation: Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* Packaging: Enabled the docs to generate manpages. +* Runtime: Parallel pull +- Runtime: Handle ip route showing mask-less IP addresses +* Documentation: Clarify Amazon EC2 installation +* Documentation: 'Base' image is deprecated and should no longer be referenced in the docs. +* Runtime: Fix to "Inject dockerinit at /.dockerinit" +* Runtime: Allow ENTRYPOINT without CMD +- Runtime: Always consider localhost as a domain name when parsing the FQN repos name +* Remote API: 650 http utils and user agent field +* Documentation: fix a typo in the ubuntu installation guide +- Builder: Repository name (and optionally a tag) in build usage +* Documentation: Move note about officially supported kernel +* Packaging: Revert "Bind daemon to 0.0.0.0 in Vagrant. +* Builder: Add no cache for docker build +* Runtime: Add hostname to environment +* Runtime: Add last stable version in `docker version` +- Builder: Make sure ADD will create everything in 0755 +* Documentation: Add ufw doc +* Tests: Add registry functional test to docker-ci +- Documentation: Solved the logo being squished in Safari +- Runtime: Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +* Runtime: Refactor checksum +- Runtime: Improve connect message with socket error +* Documentation: Added information about Docker's high level tools over LXC. +* Don't read from stdout when only attached to stdin + ## 0.5.3 (2013-08-13) * Runtime: Use docker group for socket permissions - Runtime: Spawn shell within upstart script diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d90e28ca8..d5438c3eae 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,7 +23,7 @@ that feature *on top of* docker. ### Discuss your design on the mailing list We recommend discussing your plans [on the mailing -list](https://groups.google.com/forum/?fromgroups#!forum/docker-club) +list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev) before starting to code - especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give feedback on your design, and maybe point out if someone diff --git a/Dockerfile b/Dockerfile index a7a7724ce5..8694f07c37 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,32 +3,35 @@ docker-version 0.4.2 from ubuntu:12.04 maintainer Solomon Hykes # Build dependencies +run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list +run apt-get update run apt-get install -y -q curl run apt-get install -y -q git +run apt-get install -y -q mercurial # Install Go -run curl -s https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz | tar -v -C /usr/local -xz +run curl -s https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | tar -v -C /usr/local -xz env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin env GOPATH /go env CGO_ENABLED 0 run cd /tmp && echo 'package main' > t.go && go test -a -i -v +# Ubuntu stuff +run apt-get install -y -q ruby1.9.3 rubygems libffi-dev +run gem install fpm +run apt-get install -y -q reprepro dpkg-sig +# Install s3cmd 1.0.1 (earlier versions don't support env variables in the config) +run apt-get install -y -q python-pip +run pip install s3cmd +run pip install python-magic +run /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg # Download dependencies run PKG=github.com/kr/pty REV=27435c699; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV run PKG=github.com/gorilla/context/ REV=708054d61e5; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV run PKG=github.com/gorilla/mux/ REV=9b36453141c; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV run PKG=github.com/dotcloud/tar/ REV=d06045a6d9; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV -# Run dependencies -run apt-get install -y iptables -# lxc requires updating ubuntu sources -run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list -run apt-get update -run apt-get install -y lxc -run apt-get install -y aufs-tools -# Docker requires code.google.com/p/go.net/websocket -run apt-get install -y -q mercurial -run PKG=code.google.com/p/go.net REV=78ad7f42aa2e; hg clone https://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout -r $REV +run PKG=code.google.com/p/go.net/ REV=84a4013f96e0; hg clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout $REV # Upload docker source add . /go/src/github.com/dotcloud/docker +run ln -s /go/src/github.com/dotcloud/docker /src # Build the binary -run cd /go/src/github.com/dotcloud/docker/docker && go install -ldflags "-X main.GITCOMMIT '??' -d -w" -env PATH /usr/local/go/bin:/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin -cmd ["docker"] +run cd /go/src/github.com/dotcloud/docker && hack/release/make.sh +cmd cd /go/src/github.com/dotcloud/docker && hack/release/release.sh diff --git a/Makefile b/Makefile deleted file mode 100644 index dd365dc30e..0000000000 --- a/Makefile +++ /dev/null @@ -1,95 +0,0 @@ -DOCKER_PACKAGE := github.com/dotcloud/docker -RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1) -SRCRELEASE := docker-$(RELEASE_VERSION) -BINRELEASE := docker-$(RELEASE_VERSION).tgz -BUILD_SRC := build_src -BUILD_PATH := ${BUILD_SRC}/src/${DOCKER_PACKAGE} - -GIT_ROOT := $(shell git rev-parse --show-toplevel) -BUILD_DIR := $(CURDIR)/.gopath - -GOPATH ?= $(BUILD_DIR) -export GOPATH - -GO_OPTIONS ?= -a -ldflags='-w -d' -ifeq ($(VERBOSE), 1) -GO_OPTIONS += -v -endif - -GIT_COMMIT = $(shell git rev-parse --short HEAD) -GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES") - -BUILD_OPTIONS = -a -ldflags "-X main.GITCOMMIT $(GIT_COMMIT)$(GIT_STATUS) -d -w" - -SRC_DIR := $(GOPATH)/src - -DOCKER_DIR := $(SRC_DIR)/$(DOCKER_PACKAGE) -DOCKER_MAIN := $(DOCKER_DIR)/docker - -DOCKER_BIN_RELATIVE := bin/docker -DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE) - -.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR) - -all: $(DOCKER_BIN) - -$(DOCKER_BIN): $(DOCKER_DIR) - @mkdir -p $(dir $@) - @(cd $(DOCKER_MAIN); CGO_ENABLED=0 go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@) - @echo $(DOCKER_BIN_RELATIVE) is created. - -$(DOCKER_DIR): - @mkdir -p $(dir $@) - @if [ -h $@ ]; then rm -f $@; fi; ln -sf $(CURDIR)/ $@ - @(cd $(DOCKER_MAIN); go get -d $(GO_OPTIONS)) - -whichrelease: - echo $(RELEASE_VERSION) - -release: $(BINRELEASE) - s3cmd -P put $(BINRELEASE) s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-$(RELEASE_VERSION).tgz - s3cmd -P put docker-latest.tgz s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-latest.tgz - s3cmd -P put $(SRCRELEASE)/bin/docker s3://get.docker.io/builds/`uname -s`/`uname -m`/docker - echo $(RELEASE_VERSION) > latest ; s3cmd -P put latest s3://get.docker.io/latest ; rm latest - -srcrelease: $(SRCRELEASE) -deps: $(DOCKER_DIR) - -# A clean checkout of $RELEASE_VERSION, with vendored dependencies -$(SRCRELEASE): - rm -fr $(SRCRELEASE) - git clone $(GIT_ROOT) $(SRCRELEASE) - cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION) - -# A binary release ready to be uploaded to a mirror -$(BINRELEASE): $(SRCRELEASE) - rm -f $(BINRELEASE) - cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION) - cd $(SRCRELEASE); cp -R bin docker-latest; tar -f ../docker-latest.tgz -zv -c docker-latest -clean: - @rm -rf $(dir $(DOCKER_BIN)) -ifeq ($(GOPATH), $(BUILD_DIR)) - @rm -rf $(BUILD_DIR) -else ifneq ($(DOCKER_DIR), $(realpath $(DOCKER_DIR))) - @rm -f $(DOCKER_DIR) -endif - -test: - # Copy docker source and dependencies for testing - rm -rf ${BUILD_SRC}; mkdir -p ${BUILD_PATH} - tar --exclude=${BUILD_SRC} -cz . | tar -xz -C ${BUILD_PATH} - GOPATH=${CURDIR}/${BUILD_SRC} go get -d - # Do the test - sudo -E GOPATH=${CURDIR}/${BUILD_SRC} CGO_ENABLED=0 go test ${GO_OPTIONS} - -testall: all - @(cd $(DOCKER_DIR); CGO_ENABLED=0 sudo -E go test ./... $(GO_OPTIONS)) - -fmt: - @gofmt -s -l -w . - -hack: - cd $(CURDIR)/hack && vagrant up - -ssh-dev: - cd $(CURDIR)/hack && vagrant ssh diff --git a/README.md b/README.md index 89767a9cce..ddf3f4d805 100644 --- a/README.md +++ b/README.md @@ -163,29 +163,6 @@ supported. * [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/) * [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/) -Installing from source ----------------------- - -1. Install Dependencies - * [Go language 1.1.x](http://golang.org/doc/install) - * [git](http://git-scm.com) - * [lxc](http://lxc.sourceforge.net) - * [aufs-tools](http://aufs.sourceforge.net) - -2. Checkout the source code - - ```bash - git clone http://github.com/dotcloud/docker - ``` - -3. Build the ``docker`` binary - - ```bash - cd docker - make VERBOSE=1 - sudo cp ./bin/docker /usr/local/bin/docker - ``` - Usage examples ============== @@ -305,8 +282,7 @@ Contributing to Docker ====================== Want to hack on Docker? Awesome! There are instructions to get you -started on the website: -http://docs.docker.io/en/latest/contributing/contributing/ +started [here](CONTRIBUTING.md). They are probably not perfect, please let us know if anything feels wrong or incomplete. diff --git a/VERSION b/VERSION new file mode 100644 index 0000000000..2feed2fef8 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.6.1-dev diff --git a/Vagrantfile b/Vagrantfile index 18aa5b5afb..4cee3a04d0 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -17,11 +17,12 @@ Vagrant::Config.run do |config| # Provision docker and new kernel if deployment was not done if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty? # Add lxc-docker package - pkg_cmd = "apt-get update -qq; apt-get install -q -y python-software-properties; " \ - "add-apt-repository -y ppa:dotcloud/lxc-docker; apt-get update -qq; " \ - "apt-get install -q -y lxc-docker; " + pkg_cmd = "wget -q -O - http://get.docker.io/gpg | apt-key add -;" \ + "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \ + "apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; " # Add X.org Ubuntu backported 3.8 kernel - pkg_cmd << "add-apt-repository -y ppa:ubuntu-x-swat/r-lts-backport; " \ + pkg_cmd << "apt-get update -qq; apt-get install -q -y python-software-properties; " \ + "add-apt-repository -y ppa:ubuntu-x-swat/r-lts-backport; " \ "apt-get update -qq; apt-get install -q -y linux-image-3.8.0-19-generic; " # Add guest additions if local vbox VM is_vbox = true diff --git a/api.go b/api.go index 18f42c446d..9e094231b5 100644 --- a/api.go +++ b/api.go @@ -101,7 +101,7 @@ func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Reque if err != nil { return err } - status, err := auth.Login(authConfig, srv.HTTPRequestFactory()) + status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil)) if err != nil { return err } @@ -399,7 +399,13 @@ func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *ht } sf := utils.NewStreamFormatter(version > 1.0) if image != "" { //pull - if err := srv.ImagePull(image, tag, w, sf, &auth.AuthConfig{}, version > 1.3); err != nil { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := srv.ImagePull(image, tag, w, sf, &auth.AuthConfig{}, metaHeaders, version > 1.3); err != nil { if sf.Used() { w.Write(sf.FormatError(err)) return nil @@ -468,6 +474,12 @@ func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *ht func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { authConfig := &auth.AuthConfig{} + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { return err } @@ -483,7 +495,7 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http w.Header().Set("Content-Type", "application/json") } sf := utils.NewStreamFormatter(version > 1.0) - if err := srv.ImagePush(name, w, sf, authConfig); err != nil { + if err := srv.ImagePush(name, w, sf, authConfig, metaHeaders); err != nil { if sf.Used() { w.Write(sf.FormatError(err)) return nil @@ -526,7 +538,7 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.") } - if !srv.runtime.capabilities.IPv4Forwarding { + if srv.runtime.capabilities.IPv4ForwardingDisabled { log.Println("Warning: IPv4 forwarding is disabled.") out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.") } diff --git a/auth/auth.go b/auth/auth.go index 003a6e737c..91314877c7 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -76,7 +76,7 @@ func LoadConfig(rootPath string) (*ConfigFile, error) { configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} confFile := path.Join(rootPath, CONFIGFILE) if _, err := os.Stat(confFile); err != nil { - return &configFile, ErrConfigFileMissing + return &configFile, nil //missing file is not an error } b, err := ioutil.ReadFile(confFile) if err != nil { @@ -86,13 +86,13 @@ func LoadConfig(rootPath string) (*ConfigFile, error) { if err := json.Unmarshal(b, &configFile.Configs); err != nil { arr := strings.Split(string(b), "\n") if len(arr) < 2 { - return nil, fmt.Errorf("The Auth config file is empty") + return &configFile, fmt.Errorf("The Auth config file is empty") } authConfig := AuthConfig{} origAuth := strings.Split(arr[0], " = ") authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) if err != nil { - return nil, err + return &configFile, err } origEmail := strings.Split(arr[1], " = ") authConfig.Email = origEmail[1] @@ -101,7 +101,7 @@ func LoadConfig(rootPath string) (*ConfigFile, error) { for k, authConfig := range configFile.Configs { authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) if err != nil { - return nil, err + return &configFile, err } authConfig.Auth = "" configFile.Configs[k] = authConfig diff --git a/buildfile.go b/buildfile.go index 0666ba4388..4c8db2c60e 100644 --- a/buildfile.go +++ b/buildfile.go @@ -56,7 +56,7 @@ func (b *buildFile) CmdFrom(name string) error { if err != nil { if b.runtime.graph.IsNotExist(err) { remote, tag := utils.ParseRepositoryTag(name) - if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil, true); err != nil { + if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil, nil, true); err != nil { return err } image, err = b.runtime.repositories.LookupImage(name) @@ -197,6 +197,11 @@ func (b *buildFile) CmdExpose(args string) error { return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) } +func (b *buildFile) CmdUser(args string) error { + b.config.User = args + return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) +} + func (b *buildFile) CmdInsert(args string) error { return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") } diff --git a/buildfile_test.go b/buildfile_test.go index d89c40d16c..14986161d8 100644 --- a/buildfile_test.go +++ b/buildfile_test.go @@ -270,6 +270,17 @@ func TestBuildMaintainer(t *testing.T) { } } +func TestBuildUser(t *testing.T) { + img := buildImage(testContextTemplate{` + from {IMAGE} + user dockerio + `, nil, nil}, t, nil, true) + + if img.Config.User != "dockerio" { + t.Fail() + } +} + func TestBuildEnv(t *testing.T) { img := buildImage(testContextTemplate{` from {IMAGE} diff --git a/commands.go b/commands.go index 899a8e2f1a..236fb65d10 100644 --- a/commands.go +++ b/commands.go @@ -19,6 +19,7 @@ import ( "os/signal" "path/filepath" "reflect" + "runtime" "strconv" "strings" "syscall" @@ -27,10 +28,9 @@ import ( "unicode" ) -const VERSION = "0.5.3-dev" - var ( GITCOMMIT string + VERSION string ) func (cli *DockerCli) getMethod(name string) (reflect.Method, bool) { @@ -72,7 +72,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error { return nil } } - help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[tcp://%s:%d]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTHTTPHOST, DEFAULTHTTPPORT) + help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET) for _, command := range [][]string{ {"attach", "Attach to a running container"}, {"build", "Build a container from a Dockerfile"}, @@ -303,6 +303,8 @@ func (cli *DockerCli) CmdLogin(args ...string) error { return nil } + cli.LoadConfigFile() + var oldState *term.State if *flUsername == "" || *flPassword == "" || *flEmail == "" { oldState, err = term.SetRawTerminal(cli.terminalFd) @@ -433,6 +435,12 @@ func (cli *DockerCli) CmdVersion(args ...string) error { return nil } + fmt.Fprintf(cli.out, "Client version: %s\n", VERSION) + fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) + if GITCOMMIT != "" { + fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT) + } + body, _, err := cli.call("GET", "/version", nil) if err != nil { return err @@ -444,13 +452,12 @@ func (cli *DockerCli) CmdVersion(args ...string) error { utils.Debugf("Error unmarshal: body: %s, err: %s\n", body, err) return err } - fmt.Fprintf(cli.out, "Client version: %s\n", VERSION) fmt.Fprintf(cli.out, "Server version: %s\n", out.Version) if out.GitCommit != "" { - fmt.Fprintf(cli.out, "Git commit: %s\n", out.GitCommit) + fmt.Fprintf(cli.out, "Git commit (server): %s\n", out.GitCommit) } if out.GoVersion != "" { - fmt.Fprintf(cli.out, "Go version: %s\n", out.GoVersion) + fmt.Fprintf(cli.out, "Go version (server): %s\n", out.GoVersion) } release := utils.GetReleaseVersion() @@ -498,6 +505,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } if len(out.IndexServerAddress) != 0 { + cli.LoadConfigFile() u := cli.configFile.Configs[out.IndexServerAddress].Username if len(u) > 0 { fmt.Fprintf(cli.out, "Username: %v\n", u) @@ -576,15 +584,17 @@ func (cli *DockerCli) CmdStart(args ...string) error { return nil } + var encounteredError error for _, name := range args { _, _, err := cli.call("POST", "/containers/"+name+"/start", nil) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to start one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } - return nil + return encounteredError } func (cli *DockerCli) CmdInspect(args ...string) error { @@ -838,12 +848,18 @@ func (cli *DockerCli) CmdPush(args ...string) error { return nil } + cli.LoadConfigFile() + // If we're not using a custom registry, we know the restrictions // applied to repository names and can warn the user in advance. // Custom repositories can have different rules, and we must also // allow pushing by image ID. if len(strings.SplitN(name, "/", 2)) == 1 { - return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", cli.configFile.Configs[auth.IndexServerAddress()].Username, name) + username := cli.configFile.Configs[auth.IndexServerAddress()].Username + if username == "" { + username = "" + } + return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) } v := url.Values{} @@ -1467,15 +1483,18 @@ func (cli *DockerCli) CmdRun(args ...string) error { v := url.Values{} v.Set("logs", "1") v.Set("stream", "1") + var out io.Writer if config.AttachStdin { v.Set("stdin", "1") } if config.AttachStdout { v.Set("stdout", "1") + out = cli.out } if config.AttachStderr { v.Set("stderr", "1") + out = cli.out } signals := make(chan os.Signal, 1) @@ -1489,7 +1508,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } }() - if err := cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, cli.in, cli.out); err != nil { + if err := cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, cli.in, out); err != nil { utils.Debugf("Error hijack: %s", err) return err } @@ -1515,6 +1534,10 @@ func (cli *DockerCli) CmdCp(args ...string) error { var copyData APICopy info := strings.Split(cmd.Arg(0), ":") + if len(info) != 2 { + return fmt.Errorf("Error: Resource not specified") + } + copyData.Resource = info[1] copyData.HostPath = cmd.Arg(1) @@ -1661,11 +1684,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea rwc, br := clientconn.Hijack() defer rwc.Close() - receiveStdout := utils.Go(func() error { - _, err := io.Copy(out, br) - utils.Debugf("[hijack] End of stdout") - return err - }) + var receiveStdout (chan error) + if out != nil { + receiveStdout = utils.Go(func() error { + _, err := io.Copy(out, br) + utils.Debugf("[hijack] End of stdout") + return err + }) + } if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { oldState, err := term.SetRawTerminal(cli.terminalFd) @@ -1693,9 +1719,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea return nil }) - if err := <-receiveStdout; err != nil { - utils.Debugf("Error receiveStdout: %s", err) - return err + if out != nil { + if err := <-receiveStdout; err != nil { + utils.Debugf("Error receiveStdout: %s", err) + return err + } } if !cli.isTerminal { @@ -1761,6 +1789,14 @@ func Subcmd(name, signature, description string) *flag.FlagSet { return flags } +func (cli *DockerCli) LoadConfigFile() (err error) { + cli.configFile, err = auth.LoadConfig(os.Getenv("HOME")) + if err != nil { + fmt.Fprintf(cli.err, "WARNING: %s\n", err) + } + return err +} + func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli { var ( isTerminal = false @@ -1777,15 +1813,9 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *Doc if err == nil { err = out } - - configFile, e := auth.LoadConfig(os.Getenv("HOME")) - if e != nil { - fmt.Fprintf(err, "WARNING: %s\n", e) - } return &DockerCli{ proto: proto, addr: addr, - configFile: configFile, in: in, out: out, err: err, diff --git a/commands_test.go b/commands_test.go index db344d7043..25e4804361 100644 --- a/commands_test.go +++ b/commands_test.go @@ -318,7 +318,7 @@ func TestRunAttachStdin(t *testing.T) { ch := make(chan struct{}) go func() { defer close(ch) - cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat") + cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat && sleep 5") }() // Send input to the command, close stdin @@ -346,12 +346,10 @@ func TestRunAttachStdin(t *testing.T) { // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() { - // Unblock hijack end - stdout.Read([]byte{}) <-ch }) - setTimeout(t, "Waiting for command to exit timed out", 5*time.Second, func() { + setTimeout(t, "Waiting for command to exit timed out", 10*time.Second, func() { container.Wait() }) diff --git a/container.go b/container.go index 472ae3990d..9099d90f6f 100644 --- a/container.go +++ b/container.go @@ -20,6 +20,7 @@ import ( "strings" "syscall" "time" + "net" ) type Container struct { @@ -86,6 +87,7 @@ type Config struct { type HostConfig struct { Binds []string ContainerIDFile string + LxcConf []KeyValuePair } type BindMap struct { @@ -98,6 +100,11 @@ var ( ErrInvaidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.") ) +type KeyValuePair struct { + Key string + Value string +} + func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) { cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container") if len(args) > 0 && args[0] != "--help" { @@ -140,6 +147,9 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container") flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image") + var flLxcOpts ListOpts + cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err } @@ -187,6 +197,12 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, entrypoint = []string{*flEntrypoint} } + var lxcConf []KeyValuePair + lxcConf, err := parseLxcConfOpts(flLxcOpts) + if err != nil { + return nil, nil, cmd, err + } + config := &Config{ Hostname: *flHostname, PortSpecs: flPorts, @@ -212,6 +228,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, hostConfig := &HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, + LxcConf: lxcConf, } if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit { @@ -315,7 +332,7 @@ func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) { return ioutil.WriteFile(container.hostConfigPath(), data, 0666) } -func (container *Container) generateLXCConfig() error { +func (container *Container) generateLXCConfig(hostConfig *HostConfig) error { fo, err := os.Create(container.lxcConfigPath()) if err != nil { return err @@ -324,6 +341,11 @@ func (container *Container) generateLXCConfig() error { if err := LxcTemplateCompiled.Execute(fo, container); err != nil { return err } + if hostConfig != nil { + if err := LxcHostConfigTemplateCompiled.Execute(fo, hostConfig); err != nil { + return err + } + } return nil } @@ -520,7 +542,7 @@ func (container *Container) Start(hostConfig *HostConfig) error { container.State.Lock() defer container.State.Unlock() - if len(hostConfig.Binds) == 0 { + if len(hostConfig.Binds) == 0 && len(hostConfig.LxcConf) == 0 { hostConfig, _ = container.ReadHostConfig() } @@ -548,7 +570,7 @@ func (container *Container) Start(hostConfig *HostConfig) error { container.Config.MemorySwap = -1 } - if !container.runtime.capabilities.IPv4Forwarding { + if container.runtime.capabilities.IPv4ForwardingDisabled { log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") } @@ -645,7 +667,7 @@ func (container *Container) Start(hostConfig *HostConfig) error { } } - if err := container.generateLXCConfig(); err != nil { + if err := container.generateLXCConfig(hostConfig); err != nil { return err } @@ -778,14 +800,44 @@ func (container *Container) allocateNetwork() error { return nil } - iface, err := container.runtime.networkManager.Allocate() - if err != nil { - return err + var iface *NetworkInterface + var err error + if !container.State.Ghost { + iface, err = container.runtime.networkManager.Allocate() + if err != nil { + return err + } + } else { + manager := container.runtime.networkManager + if manager.disabled { + iface = &NetworkInterface{disabled: true} + } else { + iface = &NetworkInterface{ + IPNet: net.IPNet{IP: net.ParseIP(container.NetworkSettings.IPAddress), Mask: manager.bridgeNetwork.Mask}, + Gateway: manager.bridgeNetwork.IP, + manager: manager, + } + ipNum := ipToInt(iface.IPNet.IP) + manager.ipAllocator.inUse[ipNum] = struct{}{} + } } + + var portSpecs []string + if !container.State.Ghost { + portSpecs = container.Config.PortSpecs + } else { + for backend, frontend := range container.NetworkSettings.PortMapping["Tcp"] { + portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/tcp",frontend, backend)) + } + for backend, frontend := range container.NetworkSettings.PortMapping["Udp"] { + portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/udp",frontend, backend)) + } + } + container.NetworkSettings.PortMapping = make(map[string]PortMapping) container.NetworkSettings.PortMapping["Tcp"] = make(PortMapping) container.NetworkSettings.PortMapping["Udp"] = make(PortMapping) - for _, spec := range container.Config.PortSpecs { + for _, spec := range portSpecs { nat, err := iface.AllocatePort(spec) if err != nil { iface.Release() diff --git a/container_test.go b/container_test.go index 3752615a3c..ba48ceb47a 100644 --- a/container_test.go +++ b/container_test.go @@ -1070,7 +1070,7 @@ func TestLXCConfig(t *testing.T) { t.Fatal(err) } defer runtime.Destroy(container) - container.generateLXCConfig() + container.generateLXCConfig(nil) grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar") grepFile(t, container.lxcConfigPath(), fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) @@ -1078,6 +1078,36 @@ func TestLXCConfig(t *testing.T) { fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) } +func TestCustomLxcConfig(t *testing.T) { + runtime := mkRuntime(t) + defer nuke(runtime) + container, err := NewBuilder(runtime).Create(&Config{ + Image: GetTestImage(runtime).ID, + Cmd: []string{"/bin/true"}, + + Hostname: "foobar", + }, + ) + if err != nil { + t.Fatal(err) + } + defer runtime.Destroy(container) + hostConfig := &HostConfig{LxcConf: []KeyValuePair{ + { + Key: "lxc.utsname", + Value: "docker", + }, + { + Key: "lxc.cgroup.cpuset.cpus", + Value: "0,1", + }, + }} + + container.generateLXCConfig(hostConfig) + grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker") + grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1") +} + func BenchmarkRunSequencial(b *testing.B) { runtime := mkRuntime(b) defer nuke(runtime) diff --git a/contrib/docker.bash b/contrib/docker.bash index 6719ef6a92..32f2b5f8f1 100644 --- a/contrib/docker.bash +++ b/contrib/docker.bash @@ -115,7 +115,7 @@ _docker_build() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-t -q" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-no-cache -t -q" -- "$cur" ) ) ;; *) _filedir @@ -138,11 +138,37 @@ _docker_commit() COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) ) ;; *) - __docker_containers_all + local counter=$cpos + while [ $counter -le $cword ]; do + case "${words[$counter]}" in + -author|-m|-run) + (( counter++ )) + ;; + -*) + ;; + *) + break + ;; + esac + (( counter++ )) + done + + if [ $counter -eq $cword ]; then + __docker_containers_all + fi ;; esac } +_docker_cp() +{ + if [ $cpos -eq $cword ]; then + __docker_containers_all + else + _filedir + fi +} + _docker_diff() { if [ $cpos -eq $cword ]; then @@ -152,7 +178,21 @@ _docker_diff() _docker_events() { - COMPREPLY=( $( compgen -W "-since" -- "$cur" ) ) + case "$prev" in + -since) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-since" -- "$cur" ) ) + ;; + *) + ;; + esac } _docker_export() @@ -231,7 +271,21 @@ _docker_kill() _docker_login() { - COMPREPLY=( $( compgen -W "-e -p -u" -- "$cur" ) ) + case "$prev" in + -e|-p|-u) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-e -p -u" -- "$cur" ) ) + ;; + *) + ;; + esac } _docker_logs() @@ -250,12 +304,40 @@ _docker_port() _docker_ps() { - COMPREPLY=( $( compgen -W "-a -beforeId -l -n -notrunc -q -s -sinceId" -- "$cur" ) ) + case "$prev" in + -beforeId|-n|-sinceId) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-a -beforeId -l -n -notrunc -q -s -sinceId" -- "$cur" ) ) + ;; + *) + ;; + esac } _docker_pull() { - COMPREPLY=( $( compgen -W "-t" -- "$cur" ) ) + case "$prev" in + -t) + return + ;; + *) + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "-t" -- "$cur" ) ) + ;; + *) + ;; + esac } _docker_push() @@ -309,7 +391,7 @@ _docker_run() -volumes-from) __docker_containers_all ;; - -a|-c|-dns|-e|-entrypoint|-h|-m|-p|-u|-v) + -a|-c|-dns|-e|-entrypoint|-h|-lxc-conf|-m|-p|-u|-v|-w) return ;; *) @@ -318,34 +400,27 @@ _docker_run() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-a -c -cidfile -d -dns -e -entrypoint -h -i -m -n -p -t -u -v -volumes-from" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-a -c -cidfile -d -dns -e -entrypoint -h -i -lxc-conf -m -n -p -privileged -t -u -v -volumes-from -w" -- "$cur" ) ) ;; *) - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "-a -notrunc -q -viz" -- "$cur" ) ) - ;; - *) - local counter=$cpos - while [ $counter -le $cword ]; do - case "${words[$counter]}" in - -a|-c|-cidfile|-dns|-e|-entrypoint|-h|-m|-p|-u|-v|-volumes-from) - (( counter++ )) - ;; - -*) - ;; - *) - break - ;; - esac + local counter=$cpos + while [ $counter -le $cword ]; do + case "${words[$counter]}" in + -a|-c|-cidfile|-dns|-e|-entrypoint|-h|-lxc-conf|-m|-p|-u|-v|-volumes-from|-w) (( counter++ )) - done + ;; + -*) + ;; + *) + break + ;; + esac + (( counter++ )) + done - if [ $counter -eq $cword ]; then - __docker_image_repos_and_tags - fi - ;; - esac + if [ $counter -eq $cword ]; then + __docker_image_repos_and_tags + fi ;; esac } @@ -409,6 +484,7 @@ _docker() attach build commit + cp diff events export diff --git a/contrib/install.sh b/contrib/install.sh index 04340e2acb..3cf7169a07 100755 --- a/contrib/install.sh +++ b/contrib/install.sh @@ -35,10 +35,10 @@ else fi fi -echo "Downloading docker binary and uncompressing into /usr/local/bin..." -curl -s https://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-latest.tgz | -tar -C /usr/local/bin --strip-components=1 -zxf- \ -docker-latest/docker +echo "Downloading docker binary to /usr/local/bin..." +curl -s https://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-latest \ + > /usr/local/bin/docker +chmod +x /usr/local/bin/docker if [ -f /etc/init/dockerd.conf ] then @@ -50,7 +50,7 @@ description "Docker daemon" start on filesystem or runlevel [2345] stop on runlevel [!2345] respawn -exec env LANG="en_US.UTF-8" /usr/local/bin/docker -d +exec /usr/local/bin/docker -d EOF fi diff --git a/docker/docker.go b/docker/docker.go index a48865bfa3..6ac0c9379d 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -16,6 +16,7 @@ import ( var ( GITCOMMIT string + VERSION string ) func main() { @@ -25,6 +26,7 @@ func main() { return } // FIXME: Switch d and D ? (to be more sshd like) + flVersion := flag.Bool("v", false, "Print version information and quit") flDaemon := flag.Bool("d", false, "Daemon mode") flDebug := flag.Bool("D", false, "Debug mode") flAutoRestart := flag.Bool("r", false, "Restart previously running containers") @@ -36,6 +38,10 @@ func main() { flHosts := docker.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)} flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use") flag.Parse() + if *flVersion { + showVersion() + return + } if len(flHosts) > 1 { flHosts = flHosts[1:] //trick to display a nice default value in the usage } @@ -52,6 +58,7 @@ func main() { os.Setenv("DEBUG", "1") } docker.GITCOMMIT = GITCOMMIT + docker.VERSION = VERSION if *flDaemon { if flag.NArg() != 0 { flag.Usage() @@ -74,6 +81,10 @@ func main() { } } +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", VERSION, GITCOMMIT) +} + func createPidFile(pidfile string) error { if pidString, err := ioutil.ReadFile(pidfile); err == nil { pid, err := strconv.Atoi(string(pidString)) diff --git a/docs/sources/api/docker_remote_api.rst b/docs/sources/api/docker_remote_api.rst index 17fe3b82e3..82db83d507 100644 --- a/docs/sources/api/docker_remote_api.rst +++ b/docs/sources/api/docker_remote_api.rst @@ -165,7 +165,7 @@ Initial version Docker Remote API Client Libraries ================================== -These libraries have been not tested by the Docker Maintainers for +These libraries have not been tested by the Docker Maintainers for compatibility. Please file issues with the library owners. If you find more library implementations, please list them in Docker doc bugs and we will add the libraries here. diff --git a/docs/sources/api/docker_remote_api_v1.4.rst b/docs/sources/api/docker_remote_api_v1.4.rst index 14dbfd7c3f..d512de9ca3 100644 --- a/docs/sources/api/docker_remote_api_v1.4.rst +++ b/docs/sources/api/docker_remote_api_v1.4.rst @@ -356,7 +356,8 @@ Start a container Content-Type: application/json { - "Binds":["/tmp:/tmp"] + "Binds":["/tmp:/tmp"], + "LxcConf":{"lxc.utsname":"docker"} } **Example response**: diff --git a/docs/sources/commandline/cli.rst b/docs/sources/commandline/cli.rst index 6d96886473..9f8c296fe6 100644 --- a/docs/sources/commandline/cli.rst +++ b/docs/sources/commandline/cli.rst @@ -15,7 +15,7 @@ To list available commands, either run ``docker`` with no parameters or execute $ sudo docker Usage: docker [OPTIONS] COMMAND [arg...] - -H=[tcp://127.0.0.1:4243]: tcp://host:port to bind/connect to or unix://path/to/socket to use + -H=[unix:///var/run/docker.sock]: tcp://host:port to bind/connect to or unix://path/to/socket to use A self-sufficient runtime for linux containers. diff --git a/docs/sources/commandline/command/attach.rst b/docs/sources/commandline/command/attach.rst index 4c4c189d8f..12ed802a02 100644 --- a/docs/sources/commandline/command/attach.rst +++ b/docs/sources/commandline/command/attach.rst @@ -10,4 +10,50 @@ Usage: docker attach CONTAINER - Attach to a running container + Attach to a running container. + +You can detach from the container again (and leave it running) with +``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of +the Docker client when it quits. + +To stop a container, use ``docker stop`` + +To kill the container, use ``docker kill`` + +Examples: +--------- + +.. code-block:: bash + + $ ID=$(sudo docker run -d ubuntu /usr/bin/top -b) + $ sudo docker attach $ID + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + + + top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355780k used, 17792k free, 27880k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + ^C$ + $ sudo docker stop $ID + diff --git a/docs/sources/commandline/command/cp.rst b/docs/sources/commandline/command/cp.rst index 14b5061ef7..ea84fa1f90 100644 --- a/docs/sources/commandline/command/cp.rst +++ b/docs/sources/commandline/command/cp.rst @@ -2,12 +2,13 @@ :description: Copy files/folders from the containers filesystem to the host path :keywords: cp, docker, container, documentation, copy -=========================================================== +============================================================================ ``cp`` -- Copy files/folders from the containers filesystem to the host path -=========================================================== +============================================================================ :: Usage: docker cp CONTAINER:RESOURCE HOSTPATH - Copy files/folders from the containers filesystem to the host path. Paths are relative to the root of the filesystem. + Copy files/folders from the containers filesystem to the host + path. Paths are relative to the root of the filesystem. diff --git a/docs/sources/commandline/command/run.rst b/docs/sources/commandline/command/run.rst index bd78ea473f..cd283669e6 100644 --- a/docs/sources/commandline/command/run.rst +++ b/docs/sources/commandline/command/run.rst @@ -30,7 +30,7 @@ -volumes-from="": Mount all volumes from the given container. -entrypoint="": Overwrite the default entrypoint set by the image. -w="": Working directory inside the container - + -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" Examples -------- diff --git a/docs/sources/concepts/images/lego_docker.jpg b/docs/sources/concepts/images/lego_docker.jpg deleted file mode 100644 index b3039a2cb5..0000000000 Binary files a/docs/sources/concepts/images/lego_docker.jpg and /dev/null differ diff --git a/docs/sources/concepts/index.rst b/docs/sources/concepts/index.rst deleted file mode 100644 index e1cb8cd1a9..0000000000 --- a/docs/sources/concepts/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -:title: Overview -:description: Docker documentation summary -:keywords: concepts, documentation, docker, containers - - - -Overview -======== - -Contents: - -.. toctree:: - :maxdepth: 1 - - ../index - manifesto diff --git a/docs/sources/concepts/manifesto.rst b/docs/sources/concepts/manifesto.rst deleted file mode 100644 index 7dd4b4bdda..0000000000 --- a/docs/sources/concepts/manifesto.rst +++ /dev/null @@ -1,129 +0,0 @@ -:title: Manifesto -:description: An overview of Docker and standard containers -:keywords: containers, lxc, concepts, explanation - -.. _dockermanifesto: - -Docker Manifesto ----------------- - -Docker complements LXC with a high-level API which operates at the -process level. It runs unix processes with strong guarantees of -isolation and repeatability across servers. - -Docker is a great building block for automating distributed systems: -large-scale web deployments, database clusters, continuous deployment -systems, private PaaS, service-oriented architectures, etc. - -- **Heterogeneous payloads** Any combination of binaries, libraries, - configuration files, scripts, virtualenvs, jars, gems, tarballs, you - name it. No more juggling between domain-specific tools. Docker can - deploy and run them all. -- **Any server** Docker can run on any x64 machine with a modern linux - kernel - whether it's a laptop, a bare metal server or a VM. This - makes it perfect for multi-cloud deployments. -- **Isolation** docker isolates processes from each other and from the - underlying host, using lightweight containers. -- **Repeatability** Because containers are isolated in their own - filesystem, they behave the same regardless of where, when, and - alongside what they run. - -.. image:: images/lego_docker.jpg - :target: http://bricks.argz.com/ins/7823-1/12 - -What is a Standard Container? -............................. - -Docker defines a unit of software delivery called a Standard -Container. The goal of a Standard Container is to encapsulate a -software component and all its dependencies in a format that is -self-describing and portable, so that any compliant runtime can run it -without extra dependency, regardless of the underlying machine and the -contents of the container. - -The spec for Standard Containers is currently work in progress, but it -is very straightforward. It mostly defines 1) an image format, 2) a -set of standard operations, and 3) an execution environment. - -A great analogy for this is the shipping container. Just like Standard -Containers are a fundamental unit of software delivery, shipping -containers are a fundamental unit of physical delivery. - -Standard operations -~~~~~~~~~~~~~~~~~~~ - -Just like shipping containers, Standard Containers define a set of -STANDARD OPERATIONS. Shipping containers can be lifted, stacked, -locked, loaded, unloaded and labelled. Similarly, standard containers -can be started, stopped, copied, snapshotted, downloaded, uploaded and -tagged. - - -Content-agnostic -~~~~~~~~~~~~~~~~~~~ - -Just like shipping containers, Standard Containers are -CONTENT-AGNOSTIC: all standard operations have the same effect -regardless of the contents. A shipping container will be stacked in -exactly the same way whether it contains Vietnamese powder coffee or -spare Maserati parts. Similarly, Standard Containers are started or -uploaded in the same way whether they contain a postgres database, a -php application with its dependencies and application server, or Java -build artifacts. - -Infrastructure-agnostic -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be -transported to thousands of facilities around the world, and -manipulated by a wide variety of equipment. A shipping container can -be packed in a factory in Ukraine, transported by truck to the nearest -routing center, stacked onto a train, loaded into a German boat by an -Australian-built crane, stored in a warehouse at a US facility, -etc. Similarly, a standard container can be bundled on my laptop, -uploaded to S3, downloaded, run and snapshotted by a build server at -Equinix in Virginia, uploaded to 10 staging servers in a home-made -Openstack cluster, then sent to 30 production instances across 3 EC2 -regions. - - -Designed for automation -~~~~~~~~~~~~~~~~~~~~~~~ - -Because they offer the same standard operations regardless of content -and infrastructure, Standard Containers, just like their physical -counterpart, are extremely well-suited for automation. In fact, you -could say automation is their secret weapon. - -Many things that once required time-consuming and error-prone human -effort can now be programmed. Before shipping containers, a bag of -powder coffee was hauled, dragged, dropped, rolled and stacked by 10 -different people in 10 different locations by the time it reached its -destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The -process was slow, inefficient and cost a fortune - and was entirely -different depending on the facility and the type of goods. - -Similarly, before Standard Containers, by the time a software -component ran in production, it had been individually built, -configured, bundled, documented, patched, vendored, templated, tweaked -and instrumented by 10 different people on 10 different -computers. Builds failed, libraries conflicted, mirrors crashed, -post-it notes were lost, logs were misplaced, cluster updates were -half-broken. The process was slow, inefficient and cost a fortune - -and was entirely different depending on the language and -infrastructure provider. - -Industrial-grade delivery -~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are 17 million shipping containers in existence, packed with -every physical good imaginable. Every single one of them can be loaded -on the same boats, by the same cranes, in the same facilities, and -sent anywhere in the World with incredible efficiency. It is -embarrassing to think that a 30 ton shipment of coffee can safely -travel half-way across the World in *less time* than it takes a -software team to deliver its code from one datacenter to another -sitting 10 miles away. - -With Standard Containers we can put an end to that embarrassment, by -making INDUSTRIAL-GRADE DELIVERY of software a reality. diff --git a/docs/sources/examples/index.rst b/docs/sources/examples/index.rst index 58da18e344..2664b95e54 100644 --- a/docs/sources/examples/index.rst +++ b/docs/sources/examples/index.rst @@ -21,3 +21,4 @@ Contents: running_ssh_service couchdb_data_volumes postgresql_service + mongodb diff --git a/docs/sources/examples/mongodb.rst b/docs/sources/examples/mongodb.rst new file mode 100644 index 0000000000..5527fc00c7 --- /dev/null +++ b/docs/sources/examples/mongodb.rst @@ -0,0 +1,100 @@ +:title: Building a Docker Image with MongoDB +:description: How to build a Docker image with MongoDB pre-installed +:keywords: docker, example, package installation, networking, mongodb + +.. _mongodb_image: + +Building an Image with MongoDB +============================== + +.. include:: example_header.inc + +The goal of this example is to show how you can build your own +docker images with MongoDB preinstalled. We will do that by +constructing a Dockerfile that downloads a base image, adds an +apt source and installs the database software on Ubuntu. + +Creating a ``Dockerfile`` ++++++++++++++++++++++++++ + +Create an empty file called ``Dockerfile``: + +.. code-block:: bash + + touch Dockerfile + +Next, define the parent image you want to use to build your own image on top of. +Here, we’ll use `Ubuntu `_ (tag: ``latest``) +available on the `docker index `_: + +.. code-block:: bash + + FROM ubuntu:latest + +Since we want to be running the latest version of MongoDB we'll need to add the +10gen repo to our apt sources list. + +.. code-block:: bash + + # Add 10gen official apt source to the sources list + RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 + RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list + +Then, we don't want Ubuntu to complain about init not being available so we'll +divert /sbin/initctl to /bin/true so it thinks everything is working. + +.. code-block:: bash + + # Hack for initctl not being available in Ubuntu + RUN dpkg-divert --local --rename --add /sbin/initctl + RUN ln -s /bin/true /sbin/initctl + +Afterwards we'll be able to update our apt repositories and install MongoDB + +.. code-block:: bash + + # Install MongoDB + RUN apt-get update + RUN apt-get install mongodb-10gen + +To run MongoDB we'll have to create the default data directory (because we want it to +run without needing to provide a special configuration file) + +.. code-block:: bash + + # Create the MongoDB data directory + RUN mkdir -p /data/db + +Finally, we'll expose the standard port that MongoDB runs on (27107) as well as +define an ENTRYPOINT for the container. + +.. code-block:: bash + + EXPOSE 27017 + ENTRYPOINT ["usr/bin/mongod"] + +Now, lets build the image which will go through the ``Dockerfile`` we made and +run all of the commands. + +.. code-block:: bash + + docker build -t /mongodb . + +Now you should be able to run ``mongod`` as a daemon and be able to connect on +the local port! + +.. code-block:: bash + + # Regular style + MONGO_ID=$(docker run -d /mongodb) + + # Lean and mean + MONGO_ID=$(docker run -d /mongodb --noprealloc --smallfiles) + + # Check the logs out + docker logs $MONGO_ID + + # Connect and play around + mongo --port + +Sweet! diff --git a/docs/sources/examples/nodejs_web_app.rst b/docs/sources/examples/nodejs_web_app.rst index cb580c43d9..67584d1794 100644 --- a/docs/sources/examples/nodejs_web_app.rst +++ b/docs/sources/examples/nodejs_web_app.rst @@ -93,7 +93,7 @@ To install the right package for CentOS, we’ll use the instructions from the # Enable EPEL for Node.js RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm # Install Node.js and npm - RUN yum install -y npm-1.2.17-5.el6 + RUN yum install -y npm To bundle your app’s source code inside the docker image, use the ``ADD`` command: @@ -137,7 +137,7 @@ Your ``Dockerfile`` should now look like this: # Enable EPEL for Node.js RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm # Install Node.js and npm - RUN yum install -y npm-1.2.17-5.el6 + RUN yum install -y npm # Bundle app source ADD . /src diff --git a/docs/sources/index.rst b/docs/sources/index.rst index 8dfffa718b..2800310e5e 100644 --- a/docs/sources/index.rst +++ b/docs/sources/index.rst @@ -1,11 +1,11 @@ -:title: Welcome to the Docker Documentation +:title: Docker Documentation :description: An overview of the Docker Documentation :keywords: containers, lxc, concepts, explanation -Welcome -======= +.. image:: static_files/dockerlogo-h.png -.. image:: concepts/images/dockerlogo-h.png +Introduction +------------ ``docker``, the Linux Container Runtime, runs Unix processes with strong guarantees of isolation across servers. Your software runs diff --git a/docs/sources/installation/images/win/hp_bios_vm.JPG b/docs/sources/installation/images/win/hp_bios_vm.JPG new file mode 100644 index 0000000000..b44cabeac1 Binary files /dev/null and b/docs/sources/installation/images/win/hp_bios_vm.JPG differ diff --git a/docs/sources/installation/images/win/ts_go_bios.JPG b/docs/sources/installation/images/win/ts_go_bios.JPG new file mode 100644 index 0000000000..05f2310ad3 Binary files /dev/null and b/docs/sources/installation/images/win/ts_go_bios.JPG differ diff --git a/docs/sources/installation/images/win/ts_no_docker.JPG b/docs/sources/installation/images/win/ts_no_docker.JPG new file mode 100644 index 0000000000..51fa766711 Binary files /dev/null and b/docs/sources/installation/images/win/ts_no_docker.JPG differ diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index d1103544fd..4142a9c373 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -2,12 +2,15 @@ :description: Please note this project is currently under heavy development. It should not be used in production. :keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux +**These instructions have changed for 0.6. If you are upgrading from an earlier version, you will need to follow them again.** + .. _ubuntu_linux: Ubuntu Linux ============ - **Please note this project is currently under heavy development. It should not be used in production.** + **Please note this project is currently under heavy development. It should not be used in production.** + Right now, the officially supported distribution are: @@ -35,8 +38,8 @@ Dependencies **Linux kernel 3.8** Due to a bug in LXC, docker works best on the 3.8 kernel. Precise -comes with a 3.2 kernel, so we need to upgrade it. The kernel we -install comes with AUFS built in. We also include the generic headers +comes with a 3.2 kernel, so we need to upgrade it. The kernel you'll install when following these steps +comes with AUFS built in. We also include the generic headers to enable packages that depend on them, like ZFS and the VirtualBox guest additions. If you didn't install the headers for your "precise" kernel, then you can skip these headers for the "raring" kernel. But @@ -56,14 +59,18 @@ it is safer to include them if you're not sure. Installation ------------ -Docker is available as a Ubuntu PPA (Personal Package Archive), -`hosted on launchpad `_ -which makes installing Docker on Ubuntu very easy. +Docker is available as a Debian package, which makes installation easy. + +*Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need +to follow them again.* .. code-block:: bash - # Add the PPA sources to your apt sources list. - sudo apt-get install python-software-properties && sudo add-apt-repository ppa:dotcloud/lxc-docker + # Add the Docker repository key to your local keychain + sudo sh -c "curl https://get.docker.io/gpg | apt-key add -" + + # Add the Docker repository to your apt sources list. + sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list" # Update your sources sudo apt-get update @@ -101,30 +108,22 @@ have AUFS filesystem support enabled, so we need to install it. sudo apt-get update sudo apt-get install linux-image-extra-`uname -r` -**add-apt-repository support** - -Some installations of Ubuntu 13.04 require ``software-properties-common`` to be -installed before being able to use add-apt-repository. - -.. code-block:: bash - - sudo apt-get install software-properties-common - Installation ------------ -Docker is available as a Ubuntu PPA (Personal Package Archive), -`hosted on launchpad `_ -which makes installing Docker on Ubuntu very easy. +Docker is available as a Debian package, which makes installation easy. - -Add the custom package sources to your apt sources list. +*Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need +to follow them again.* .. code-block:: bash - # add the sources to your apt - sudo add-apt-repository ppa:dotcloud/lxc-docker + # Add the Docker repository key to your local keychain + sudo sh -c "curl http://get.docker.io/gpg | apt-key add -" + + # Add the Docker repository to your apt sources list. + sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list" # update sudo apt-get update diff --git a/docs/sources/installation/vagrant.rst b/docs/sources/installation/vagrant.rst index 28b2a4e22d..568ec584ec 100644 --- a/docs/sources/installation/vagrant.rst +++ b/docs/sources/installation/vagrant.rst @@ -2,6 +2,8 @@ :description: This guide will setup a new virtualbox virtual machine with docker installed on your computer. :keywords: Docker, Docker documentation, virtualbox, vagrant, git, ssh, putty, cygwin +**Vagrant installation is temporarily out of date, it will be updated for 0.6 soon.** + .. _install_using_vagrant: Using Vagrant (Mac, Linux) diff --git a/docs/sources/installation/windows.rst b/docs/sources/installation/windows.rst index 7830106020..889db4c670 100644 --- a/docs/sources/installation/windows.rst +++ b/docs/sources/installation/windows.rst @@ -2,6 +2,8 @@ :description: Docker's tutorial to run docker on Windows :keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin +**Vagrant installation is temporarily out of date, it will be updated for 0.6 soon.** + .. _windows: Using Vagrant (Windows) @@ -47,6 +49,8 @@ This should open a cmd prompt window. Alternatively, you can also use a Cygwin terminal, or Git Bash (or any other command line program you are usually using). The next steps would be the same. +.. _launch_ubuntu: + Launch an Ubuntu virtual server ------------------------------- @@ -166,3 +170,27 @@ You are now ready for the docker’s “hello world” example. Run All done! Now you can continue with the :ref:`hello_world` example. + +Troubleshooting +--------------- + +VM does not boot +```````````````` + +.. image:: images/win/ts_go_bios.JPG + +If you run into this error message "The VM failed to remain in the 'running' +state while attempting to boot", please check that your computer has virtualization +technology available and activated by going to the BIOS. Here's an example for an HP +computer (System configuration / Device configuration) + +.. image:: images/win/hp_bios_vm.JPG + + +Docker is not installed +``````````````````````` + +.. image:: images/win/ts_no_docker.JPG + +If you run into this error message "The program 'docker' is currently not installed", +try deleting the docker folder and restart from :ref:`launch_ubuntu` diff --git a/docs/sources/concepts/images/dockerlogo-h.png b/docs/sources/static_files/dockerlogo-h.png similarity index 100% rename from docs/sources/concepts/images/dockerlogo-h.png rename to docs/sources/static_files/dockerlogo-h.png diff --git a/docs/sources/concepts/images/dockerlogo-v.png b/docs/sources/static_files/dockerlogo-v.png similarity index 100% rename from docs/sources/concepts/images/dockerlogo-v.png rename to docs/sources/static_files/dockerlogo-v.png diff --git a/docs/sources/toctree.rst b/docs/sources/toctree.rst index 3c319863e2..647257a0dd 100644 --- a/docs/sources/toctree.rst +++ b/docs/sources/toctree.rst @@ -10,7 +10,7 @@ This documentation has the following resources: .. toctree:: :titlesonly: - concepts/index + Introduction installation/index use/index examples/index @@ -19,6 +19,3 @@ This documentation has the following resources: api/index terms/index faq - - - diff --git a/docs/sources/use/baseimages.rst b/docs/sources/use/baseimages.rst new file mode 100644 index 0000000000..6c8d4fbe2c --- /dev/null +++ b/docs/sources/use/baseimages.rst @@ -0,0 +1,43 @@ +:title: Base Image Creation +:description: How to create base images +:keywords: Examples, Usage, base image, docker, documentation, examples + +.. _base_image_creation: + +Base Image Creation +=================== + +So you want to create your own :ref:`base_image_def`? Great! + +The specific process will depend heavily on the Linux distribution you +want to package. We have some examples below, and you are encouraged +to submit pull requests to contribute new ones. + +Getting Started +............... + +In general, you'll want to start with a working machine that is +running the distribution you'd like to package as a base image, though +that is not required for some tools like Debian's `Debootstrap +`_, which you can also use to +build Ubuntu images. + +It can be as simple as this to create an Ubuntu base image:: + + $ sudo debootstrap raring raring > /dev/null + $ sudo tar -C raring -c . | sudo docker import - raring + a29c15f1bf7a + $ sudo docker run raring cat /etc/lsb-release + DISTRIB_ID=Ubuntu + DISTRIB_RELEASE=13.04 + DISTRIB_CODENAME=raring + DISTRIB_DESCRIPTION="Ubuntu 13.04" + +There are more example scripts for creating base images in the +Docker Github Repo: + +* `BusyBox `_ +* `CentOS + `_ +* `Debian + `_ diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst index 411e1c30d1..acae031f09 100644 --- a/docs/sources/use/basics.rst +++ b/docs/sources/use/basics.rst @@ -33,6 +33,8 @@ Running an interactive shell # Run an interactive shell in the ubuntu image, # allocate a tty, attach stdin and stdout + # To detach the tty without exiting the shell, + # use the escape sequence Ctrl-p + Ctrl-q sudo docker run -i -t ubuntu /bin/bash Why ``sudo``? @@ -41,7 +43,7 @@ Why ``sudo``? The ``docker`` daemon always runs as root, and since ``docker`` version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user *root*, and so, -by default, you can access it with ``sudo``. +by default, you can access it with ``sudo``. Starting in version 0.5.3, if you create a Unix group called *docker* and add users to it, then the ``docker`` daemon will make the @@ -56,6 +58,8 @@ you don't need to add ``sudo`` to all the client commands. sudo groupadd docker # Add the ubuntu user to the docker group + # You may have to logout and log back in again for + # this to take effect sudo gpasswd -a ubuntu docker # Restart the docker daemon diff --git a/docs/sources/use/builder.rst b/docs/sources/use/builder.rst index de765a5467..7a985e766b 100644 --- a/docs/sources/use/builder.rst +++ b/docs/sources/use/builder.rst @@ -4,13 +4,13 @@ .. _dockerbuilder: -================== -Dockerfile Builder -================== +====================== +Dockerfiles for Images +====================== **Docker can act as a builder** and read instructions from a text -Dockerfile to automate the steps you would otherwise make manually to -create an image. Executing ``docker build`` will run your steps and +``Dockerfile`` to automate the steps you would otherwise take manually +to create an image. Executing ``docker build`` will run your steps and commit them along the way, giving you a final image. .. contents:: Table of Contents @@ -35,6 +35,8 @@ build succeeds: Docker will run your steps one-by-one, committing the result if necessary, before finally outputting the ID of your new image. +When you're done with your build, you're ready to look into :ref:`image_push`. + 2. Format ========= @@ -48,9 +50,9 @@ The Dockerfile format is quite simple: The Instruction is not case-sensitive, however convention is for them to be UPPERCASE in order to distinguish them from arguments more easily. -Docker evaluates the instructions in a Dockerfile in order. **The first -instruction must be `FROM`** in order to specify the base image from -which you are building. +Docker evaluates the instructions in a Dockerfile in order. **The +first instruction must be `FROM`** in order to specify the +:ref:`base_image_def` from which you are building. Docker will ignore **comment lines** *beginning* with ``#``. A comment marker anywhere in the rest of the line will be treated as an argument. @@ -68,7 +70,9 @@ building images. The ``FROM`` instruction sets the :ref:`base_image_def` for subsequent instructions. As such, a valid Dockerfile must have ``FROM`` as its -first instruction. +first instruction. The image can be any valid image -- it is +especially easy to start by **pulling an image** from the +:ref:`using_public_repositories`. ``FROM`` must be the first non-comment instruction in the ``Dockerfile``. @@ -102,11 +106,50 @@ control. 3.4 CMD ------- - ``CMD `` +CMD has three forms: -The ``CMD`` instruction sets the command to be executed when running -the image. This is functionally equivalent to running ``docker commit --run '{"Cmd": }'`` outside the builder. +* ``CMD ["executable","param1","param2"]`` (like an *exec*, preferred form) +* ``CMD ["param1","param2"]`` (as *default parameters to ENTRYPOINT*) +* ``CMD command param1 param2`` (as a *shell*) + +There can only be one CMD in a Dockerfile. If you list more than one +CMD then only the last CMD will take effect. + +**The main purpose of a CMD is to provide defaults for an executing +container.** These defaults can include an executable, or they can +omit the executable, in which case you must specify an ENTRYPOINT as +well. + +When used in the shell or exec formats, the ``CMD`` instruction sets +the command to be executed when running the image. This is +functionally equivalent to running ``docker commit -run '{"Cmd": +}'`` outside the builder. + +If you use the *shell* form of the CMD, then the ```` will +execute in ``/bin/sh -c``: + +.. code-block:: bash + + FROM ubuntu + CMD echo "This is a test." | wc - + +If you want to **run your** ```` **without a shell** then you +must express the command as a JSON array and give the full path to the +executable. **This array form is the preferred format of CMD.** Any +additional parameters must be individually expressed as strings in the +array: + +.. code-block:: bash + + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + +If you would like your container to run the same executable every +time, then you should consider using ``ENTRYPOINT`` in combination +with ``CMD``. See :ref:`entrypoint_def`. + +If the user specifies arguments to ``docker run`` then they will +override the default specified in CMD. .. note:: Don't confuse ``RUN`` with ``CMD``. ``RUN`` actually runs a @@ -121,7 +164,7 @@ the image. This is functionally equivalent to running ``docker commit The ``EXPOSE`` instruction sets ports to be publicly exposed when running the image. This is functionally equivalent to running ``docker commit -run '{"PortSpecs": ["", ""]}'`` outside the -builder. +builder. Take a look at :ref:`port_redirection` for more information. 3.6 ENV ------- @@ -186,16 +229,55 @@ The copy obeys the following rules: directories in its path. All new files and directories are created with mode 0755, uid and gid 0. +.. _entrypoint_def: + 3.8 ENTRYPOINT -------------- - ``ENTRYPOINT ["/bin/echo"]`` +ENTRYPOINT has two forms: -The ``ENTRYPOINT`` instruction adds an entry command that will not be -overwritten when arguments are passed to docker run, unlike the +* ``ENTRYPOINT ["executable", "param1", "param2"]`` (like an *exec*, + preferred form) +* ``ENTRYPOINT command param1 param2`` (as a *shell*) + +There can only be one ``ENTRYPOINT`` in a Dockerfile. If you have more +than one ``ENTRYPOINT``, then only the last one in the Dockerfile will +have an effect. + +An ``ENTRYPOINT`` helps you to configure a container that you can run +as an executable. That is, when you specify an ``ENTRYPOINT``, then +the whole container runs as if it was just that executable. + +The ``ENTRYPOINT`` instruction adds an entry command that will **not** +be overwritten when arguments are passed to ``docker run``, unlike the behavior of ``CMD``. This allows arguments to be passed to the -entrypoint. i.e. ``docker run -d`` will pass the "-d" argument -to the entrypoint. +entrypoint. i.e. ``docker run -d`` will pass the "-d" +argument to the ENTRYPOINT. + +You can specify parameters either in the ENTRYPOINT JSON array (as in +"like an exec" above), or by using a CMD statement. Parameters in the +ENTRYPOINT will not be overridden by the ``docker run`` arguments, but +parameters specified via CMD will be overridden by ``docker run`` +arguments. + +Like a ``CMD``, you can specify a plain string for the ENTRYPOINT and +it will execute in ``/bin/sh -c``: + +.. code-block:: bash + + FROM ubuntu + ENTRYPOINT wc -l - + +For example, that Dockerfile's image will *always* take stdin as input +("-") and print the number of lines ("-l"). If you wanted to make +this optional but default, you could use a CMD: + +.. code-block:: bash + + FROM ubuntu + CMD ["-l", "-"] + ENTRYPOINT ["/usr/bin/wc"] + 3.9 VOLUME ---------- @@ -205,14 +287,23 @@ to the entrypoint. The ``VOLUME`` instruction will add one or more new volumes to any container created from the image. -3.10 WORKDIR --------------- +3.10 USER +--------- + + ``USER daemon`` + +The ``USER`` instruction sets the username or UID to use when running +the image. + +3.11 WORKDIR +------------ ``WORKDIR /path/to/workdir`` The ``WORKDIR`` instruction sets the working directory in which the command given by ``CMD`` is executed. + 4. Dockerfile Examples ====================== diff --git a/docs/sources/use/index.rst b/docs/sources/use/index.rst index 2f74f60718..d0a40159e9 100644 --- a/docs/sources/use/index.rst +++ b/docs/sources/use/index.rst @@ -13,8 +13,8 @@ Contents: :maxdepth: 1 basics - workingwithrepository - port_redirection builder + workingwithrepository + baseimages + port_redirection puppet - diff --git a/docs/sources/use/port_redirection.rst b/docs/sources/use/port_redirection.rst index e16a27c0eb..b19673af27 100644 --- a/docs/sources/use/port_redirection.rst +++ b/docs/sources/use/port_redirection.rst @@ -3,6 +3,8 @@ :keywords: Usage, basic port, docker, documentation, examples +.. _port_redirection: + Port redirection ================ diff --git a/docs/sources/use/workingwithrepository.rst b/docs/sources/use/workingwithrepository.rst index 1cb4c9aa90..cbb0e073bb 100644 --- a/docs/sources/use/workingwithrepository.rst +++ b/docs/sources/use/workingwithrepository.rst @@ -28,12 +28,18 @@ repositories. You can host your own Registry too! Docker acts as a client for these services via ``docker search, pull, login`` and ``push``. -Top-level, User, and Your Own Repositories ------------------------------------------- +.. _using_public_repositories: + +Public Repositories +------------------- There are two types of public repositories: *top-level* repositories which are controlled by the Docker team, and *user* repositories -created by individual contributors. +created by individual contributors. Anyone can read from these +repositories -- they really help people get started quickly! You could +also use :ref:`using_private_repositories` if you need to keep control +of who accesses your images, but we will only refer to public +repositories in these examples. * Top-level repositories can easily be recognized by **not** having a ``/`` (slash) in their name. These repositories can generally be @@ -46,28 +52,6 @@ created by individual contributors. * User images are not checked, it is therefore up to you whether or not you trust the creator of this image. -Right now (version 0.5), private repositories are only possible by -hosting `your own registry -`_. To push or pull to a -repository on your own registry, you must prefix the tag with the -address of the registry's host, like this: - -.. code-block:: bash - - # Tag to create a repository with the full registry location. - # The location (e.g. localhost.localdomain:5000) becomes - # a permanent part of the repository name - sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name - - # Push the new repository to its home location on localhost - sudo docker push localhost.localdomain:5000/repo_name - -Once a repository has your registry's host name as part of the tag, -you can push and pull it like any other repository, but it will -**not** be searchable (or indexed at all) in the Central Index, and -there will be no user name checking performed. Your registry will -function completely independently from the Central Index. - Find public images available on the Central Index ------------------------------------------------- @@ -105,6 +89,7 @@ If your username does not exist it will prompt you to also enter a password and your e-mail address. It will then automatically log you in. +.. _container_commit: Committing a container to a named image --------------------------------------- @@ -117,16 +102,45 @@ your container to an image within your username namespace. # for example docker commit $CONTAINER_ID dhrp/kickassapp sudo docker commit / +.. _image_push: -Pushing a container to its repository -------------------------------------- +Pushing an image to its repository +---------------------------------- In order to push an image to its repository you need to have committed your container to a named image (see above) -Now you can commit this image to the repository +Now you can commit this image to the repository designated by its name +or tag. .. code-block:: bash # for example docker push dhrp/kickassapp sudo docker push / + +.. _using_private_repositories: + +Private Repositories +-------------------- + +Right now (version 0.5), private repositories are only possible by +hosting `your own registry +`_. To push or pull to a +repository on your own registry, you must prefix the tag with the +address of the registry's host, like this: + +.. code-block:: bash + + # Tag to create a repository with the full registry location. + # The location (e.g. localhost.localdomain:5000) becomes + # a permanent part of the repository name + sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name + + # Push the new repository to its home location on localhost + sudo docker push localhost.localdomain:5000/repo_name + +Once a repository has your registry's host name as part of the tag, +you can push and pull it like any other repository, but it will +**not** be searchable (or indexed at all) in the Central Index, and +there will be no user name checking performed. Your registry will +function completely independently from the Central Index. diff --git a/docs/theme/docker/layout.html b/docs/theme/docker/layout.html index e90663d2b6..30f158641f 100755 --- a/docs/theme/docker/layout.html +++ b/docs/theme/docker/layout.html @@ -213,7 +213,9 @@ } // attached handler on click - $('.sidebar > ul > li > a').not(':last').click(function(){ + // Do not attach to first element or last (intro, faq) so that + // first and last link directly instead of accordian + $('.sidebar > ul > li > a').not(':last').not(':first').click(function(){ var index = $.inArray(this.href, openmenus) diff --git a/hack/RELEASE.md b/hack/RELEASE.md deleted file mode 100644 index 5cf407745a..0000000000 --- a/hack/RELEASE.md +++ /dev/null @@ -1,133 +0,0 @@ -## A maintainer's guide to releasing Docker - -So you're in charge of a docker release? Cool. Here's what to do. - -If your experience deviates from this document, please document the changes to keep it -up-to-date. - - -### 1. Pull from master and create a release branch - - ```bash - $ git checkout master - $ git pull - $ git checkout -b bump_$VERSION - ``` - -### 2. Update CHANGELOG.md - - You can run this command for reference: - - ```bash - LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1) - git log $LAST_VERSION..HEAD - ``` - - Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION``` - - * BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix, - new feature or upgrade, respectively. - - * CATEGORY should describe which part of the project is affected. - Valid categories are: - * Builder - * Documentation - * Hack - * Packaging - * Remote API - * Runtime - - * DESCRIPTION: a concise description of the change that is relevant to the end-user, - using the present tense. - Changes should be described in terms of how they affect the user, for example "new feature - X which allows Y", "fixed bug which caused X", "increased performance of Y". - - EXAMPLES: - - ``` - + Builder: 'docker build -t FOO' applies the tag FOO to the newly built container. - * Runtime: improve detection of kernel version - - Remote API: fix a bug in the optional unix socket transport - ``` - -### 3. Change VERSION in commands.go - -### 4. Run all tests - - ```bash - $ make test - ``` - -### 5. Commit and create a pull request - - ```bash - $ git add commands.go CHANGELOG.md - $ git commit -m "Bump version to $VERSION" - $ git push origin bump_$VERSION - ``` - -### 6. Get 2 other maintainers to validate the pull request - -### 7. Merge the pull request and apply tags - - ```bash - $ git checkout master - $ git merge bump_$VERSION - $ git tag -a v$VERSION # Don't forget the v! - $ git tag -f -a latest - $ git push - $ git push --tags - ``` - -### 8. Publish binaries - - To run this you will need access to the release credentials. - Get them from [the infrastructure maintainers](https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS). - - ```bash - $ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers - $ BUILD=$(docker run -d -e RELEASE_PPA=0 $RELEASE_IMAGE) - ``` - - This will do 2 things: - - * It will build and upload the binaries on http://get.docker.io - * It will *test* the release on our Ubuntu PPA (a PPA is a community repository for ubuntu packages) - - Wait for the build to complete. - - ```bash - $ docker wait $BUILD # This should print 0. If it doesn't, your build failed. - ``` - - Check that the output looks OK. Here's an example of a correct output: - - ```bash - $ docker logs 2>&1 b4e7c8299d73 | grep -e 'Public URL' -e 'Successfully uploaded' - Public URL of the object is: http://get.docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-v0.4.7.tgz - Public URL of the object is: http://get.docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-latest.tgz - Successfully uploaded packages. - ``` - - If you don't see 3 lines similar to this, something might be wrong. Check the full logs and try again. - - -### 9. Publish Ubuntu packages - - If everything went well in the previous step, you can finalize the release by submitting the Ubuntu - packages. - - ```bash - $ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers - $ docker run -e RELEASE_PPA=1 $RELEASE_IMAGE - ``` - - If that goes well, Ubuntu Precise package is in its way. It will take anywhere from 0.5 to 30 hours - for the builders to complete their job depending on builder demand at this time. At this point, Quantal - and Raring packages need to be created using the Launchpad interface: - https://launchpad.net/~dotcloud/+archive/lxc-docker/+packages - - Notify [the packager maintainers](https://github.com/dotcloud/docker/blob/master/packaging/MAINTAINERS) - who will ensure PPA is ready. - - Congratulations! You're done diff --git a/hack/dockerbuilder/Dockerfile b/hack/dockerbuilder/Dockerfile deleted file mode 100644 index 496ee45e7a..0000000000 --- a/hack/dockerbuilder/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -# DESCRIPTION Build a container capable of producing official binary and -# PPA packages and uploading them to S3 and Launchpad -# VERSION 1.2 -# DOCKER_VERSION 0.4 -# AUTHOR Solomon Hykes -# Daniel Mizyrycki -# BUILD_CMD docker build -t dockerbuilder . -# RUN_CMD docker run -e AWS_ID="$AWS_ID" -e AWS_KEY="$AWS_KEY" -e GPG_KEY="$GPG_KEY" -e PUBLISH_PPA="$PUBLISH_PPA" dockerbuilder -# -# ENV_VARIABLES AWS_ID, AWS_KEY: S3 credentials for uploading Docker binary and tarball -# GPG_KEY: Signing key for docker package -# PUBLISH_PPA: 0 for staging release, 1 for production release -# -from ubuntu:12.04 -maintainer Solomon Hykes -# Workaround the upstart issue -run dpkg-divert --local --rename --add /sbin/initctl -run ln -s /bin/true /sbin/initctl -# Enable universe and gophers PPA -run DEBIAN_FRONTEND=noninteractive apt-get install -y -q python-software-properties -run add-apt-repository "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe" -run add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu -run apt-get update -# Packages required to checkout, build and upload docker -run DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd curl -run curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz -run tar -C /usr/local -xzf /go.tar.gz -run echo "export PATH=/usr/local/go/bin:$PATH" > /.bashrc -run echo "export PATH=/usr/local/go/bin:$PATH" > /.bash_profile -run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git build-essential -# Packages required to build an ubuntu package -run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang-stable debhelper autotools-dev devscripts -# Copy dockerbuilder files into the container -add . /src -run cp /src/dockerbuilder /usr/local/bin/ && chmod +x /usr/local/bin/dockerbuilder -cmd ["dockerbuilder"] diff --git a/hack/dockerbuilder/MAINTAINERS b/hack/dockerbuilder/MAINTAINERS deleted file mode 100644 index 5dfc881420..0000000000 --- a/hack/dockerbuilder/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Daniel Mizyrycki (@mzdaniel) diff --git a/hack/dockerbuilder/dockerbuilder b/hack/dockerbuilder/dockerbuilder deleted file mode 100644 index 9fa05ce11e..0000000000 --- a/hack/dockerbuilder/dockerbuilder +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh -set -x -set -e - -export PATH=/usr/local/go/bin:$PATH - -PACKAGE=github.com/dotcloud/docker - -if [ $# -gt 1 ]; then - echo "Usage: $0 [REVISION]" - exit 1 -fi - -export REVISION=$1 - -if [ -z "$AWS_ID" -o -z "$AWS_KEY" ]; then - echo "Warning: either AWS_ID or AWS_KEY environment variable not set. Won't upload to S3." -else - /bin/echo -e "[default]\naccess_key = $AWS_ID\nsecret_key = $AWS_KEY\n" > /.s3cfg -fi - -if [ -z "$GPG_KEY" ]; then - echo "Warning: environment variable GPG_KEY is not set. Ubuntu package upload will not succeed." - NO_UBUNTU=1 -fi - -rm -fr docker-release -git clone https://github.com/dotcloud/docker docker-release -cd docker-release -if [ -z "$REVISION" ]; then - make release -else - make release RELEASE_VERSION=$REVISION -fi - -# Remove credentials from container -rm -f /.s3cfg - -if [ -z "$NO_UBUNTU" ]; then - export PATH=`echo $PATH | sed 's#/usr/local/go/bin:##g'` - (cd packaging/ubuntu && make ubuntu) -fi diff --git a/hack/release/README.md b/hack/release/README.md new file mode 100644 index 0000000000..f01bb3a60b --- /dev/null +++ b/hack/release/README.md @@ -0,0 +1,106 @@ +## A maintainer's guide to releasing Docker + +So you're in charge of a Docker release? Cool. Here's what to do. + +If your experience deviates from this document, please document the changes +to keep it up-to-date. + + +### 1. Pull from master and create a release branch + +```bash +git checkout master +git pull +git checkout -b bump_$VERSION +``` + +### 2. Update CHANGELOG.md + +You can run this command for reference: + +```bash +LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1) +git log $LAST_VERSION..HEAD +``` + +Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION``` + +* BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix, + new feature or upgrade, respectively. + +* CATEGORY should describe which part of the project is affected. + Valid categories are: + * Builder + * Documentation + * Hack + * Packaging + * Remote API + * Runtime + +* DESCRIPTION: a concise description of the change that is relevant to the + end-user, using the present tense. Changes should be described in terms + of how they affect the user, for example "new feature X which allows Y", + "fixed bug which caused X", "increased performance of Y". + +EXAMPLES: + +``` ++ Builder: 'docker build -t FOO' applies the tag FOO to the newly built + container. +* Runtime: improve detection of kernel version +- Remote API: fix a bug in the optional unix socket transport +``` + +### 3. Change the contents of the VERSION file + +### 4. Run all tests + +```bash +go test +``` + +### 5. Commit and create a pull request + +```bash +git add CHANGELOG.md +git commit -m "Bump version to $VERSION" +git push origin bump_$VERSION +``` + +### 6. Get 2 other maintainers to validate the pull request + +### 7. Merge the pull request and apply tags + +```bash +git checkout master +git merge bump_$VERSION +git tag -a v$VERSION # Don't forget the v! +git tag -f -a latest +git push +git push --tags +``` + +### 8. Publish binaries + +To run this you will need access to the release credentials. +Get them from [the infrastructure maintainers]( +https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS). + +```bash +docker build -t releasedocker . +docker run \ + -e AWS_S3_BUCKET=get-nightly.docker.io \ + -e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \ + -e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \ + -e GPG_PASSPHRASE=supersecretsesame \ + releasedocker +``` + +It will build and upload the binaries on the specified bucket (you should +use get-nightly.docker.io for general testing, and once everything is fine, +switch to get.docker.io). + + +### 9. Rejoice! + +Congratulations! You're done. diff --git a/hack/release/make.sh b/hack/release/make.sh new file mode 100755 index 0000000000..7792297861 --- /dev/null +++ b/hack/release/make.sh @@ -0,0 +1,179 @@ +#!/bin/sh + +# This script builds various binary artifacts from a checkout of the docker +# source code. +# +# Requirements: +# - The current directory should be a checkout of the docker source code +# (http://github.com/dotcloud/docker). Whatever version is checked out +# will be built. +# - The VERSION file, at the root of the repository, should exist, and +# will be used as Docker binary version and package version. +# - The hash of the git commit will also be included in the Docker binary, +# with the suffix -dirty if the repository isn't clean. +# - The script is intented to be run as part of a docker build, as defined +# in the Dockerfile at the root of the source. In other words: +# DO NOT CALL THIS SCRIPT DIRECTLY. +# - The right way to call this script is to invoke "docker build ." from +# your checkout of the Docker repository. +# + +set -e + +# We're a nice, sexy, little shell script, and people might try to run us; +# but really, they shouldn't. We want to be in a container! +RESOLVCONF=$(readlink --canonicalize /etc/resolv.conf) +grep -q "$RESOLVCONF" /proc/mounts || { + echo "# I will only run within a container." + echo "# Try this instead:" + echo "docker build ." + exit 1 +} + +VERSION=$(cat ./VERSION) +PKGVERSION="$VERSION" +GITCOMMIT=$(git rev-parse --short HEAD) +if test -n "$(git status --porcelain)" +then + GITCOMMIT="$GITCOMMIT-dirty" + PKGVERSION="$PKGVERSION-$(date +%Y%m%d%H%M%S)-$GITCOMMIT" +fi + +PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" +PACKAGE_URL="http://www.docker.io/" +PACKAGE_MAINTAINER="docker@dotcloud.com" +PACKAGE_DESCRIPTION="lxc-docker is a Linux container runtime +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. +Docker is a great building block for automating distributed systems: +large-scale web deployments, database clusters, continuous deployment systems, +private PaaS, service-oriented architectures, etc." + +UPSTART_SCRIPT='description "Docker daemon" + +start on filesystem or runlevel [2345] +stop on runlevel [!2345] + +respawn + +script + /usr/bin/docker -d +end script +' + +# Each "bundle" is a different type of build artefact: static binary, Ubuntu +# package, etc. + +# Build Docker as a static binary file +bundle_binary() { + mkdir -p bundles/$VERSION/binary + go build -o bundles/$VERSION/binary/docker-$VERSION \ + -ldflags "-X main.GITCOMMIT $GITCOMMIT -X main.VERSION $VERSION -d -w" \ + ./docker +} + + +# Build Docker's test suite as a collection of binary files (one per +# sub-package to test) +bundle_test() { + mkdir -p bundles/$VERSION/test + for test_dir in $(find_test_dirs); do + test_binary=$( + cd $test_dir + go test -c -v -ldflags "-X main.GITCOMMIT $GITCOMMIT -X main.VERSION $VERSION -d -w" >&2 + find . -maxdepth 1 -type f -name '*.test' -executable + ) + cp $test_dir/$test_binary bundles/$VERSION/test/ + done +} + +# Build docker as an ubuntu package using FPM and REPREPRO (sue me). +# bundle_binary must be called first. +bundle_ubuntu() { + mkdir -p bundles/$VERSION/ubuntu + + DIR=$(pwd)/bundles/$VERSION/ubuntu/build + + # Generate an upstart config file (ubuntu-specific) + mkdir -p $DIR/etc/init + echo "$UPSTART_SCRIPT" > $DIR/etc/init/docker.conf + + # Copy the binary + mkdir -p $DIR/usr/bin + cp bundles/$VERSION/binary/docker-$VERSION $DIR/usr/bin/docker + + # Generate postinstall/prerm scripts + cat >/tmp/postinstall </tmp/prerm </dev/null || true + # Check access to the bucket. + # s3cmd has no useful exit status, so we cannot check that. + # Instead, we check if it outputs anything on standard output. + # (When there are problems, it uses standard error instead.) + s3cmd info s3://$BUCKET | grep -q . + # Make the bucket accessible through website endpoints. + s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET +} + +# write_to_s3 uploads the contents of standard input to the specified S3 url. +write_to_s3() { + DEST=$1 + F=`mktemp` + cat > $F + s3cmd --acl-public put $F $DEST + rm -f $F +} + +s3_url() { + echo "http://$BUCKET.s3.amazonaws.com" +} + +# Upload the 'ubuntu' bundle to S3: +# 1. A full APT repository is published at $BUCKET/ubuntu/ +# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info +release_ubuntu() { + # Make sure that we have our keys + mkdir -p /.gnupg/ + s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true + gpg --list-keys releasedocker >/dev/null || { + gpg --gen-key --batch < $APTDIR/conf/distributions < bundles/$VERSION/ubuntu/gpg + s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg + + # Upload repo + s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ + cat < /etc/apt/sources.list.d/docker.list +# Then import the repository key +curl $(s3_url $BUCKET)/gpg | apt-key add - +# Install docker +apt-get update ; apt-get install -y lxc-docker +EOF + echo "APT repository uploaded. Instructions available at $(s3_url $BUCKET)/ubuntu/info" +} + +# Upload a static binary to S3 +release_binary() { + [ -e bundles/$VERSION ] + S3DIR=s3://$BUCKET/builds/Linux/x86_64 + s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION + cat < 0 && content[0] == '1' - if !runtime.capabilities.IPv4Forwarding && !quiet { + runtime.capabilities.IPv4ForwardingDisabled = err3 != nil || len(content) == 0 || content[0] != '1' + if runtime.capabilities.IPv4ForwardingDisabled && !quiet { log.Printf("WARNING: IPv4 forwarding is disabled.") } } diff --git a/runtime_test.go b/runtime_test.go index 8819df2221..83ada6dd21 100644 --- a/runtime_test.go +++ b/runtime_test.go @@ -101,7 +101,7 @@ func init() { // If the unit test is not found, try to download it. if img, err := globalRuntime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID { // Retrieve the Image - if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, true); err != nil { + if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil { panic(err) } } diff --git a/server.go b/server.go index e312827915..646cb44877 100644 --- a/server.go +++ b/server.go @@ -102,7 +102,7 @@ func (srv *Server) ContainerExport(name string, out io.Writer) error { } func (srv *Server) ImagesSearch(term string) ([]APISearch, error) { - r, err := registry.NewRegistry(srv.runtime.root, nil, srv.HTTPRequestFactory()) + r, err := registry.NewRegistry(srv.runtime.root, nil, srv.HTTPRequestFactory(nil)) if err != nil { return nil, err } @@ -271,7 +271,7 @@ func (srv *Server) DockerInfo() *APIInfo { Images: imgcount, MemoryLimit: srv.runtime.capabilities.MemoryLimit, SwapLimit: srv.runtime.capabilities.SwapLimit, - IPv4Forwarding: srv.runtime.capabilities.IPv4Forwarding, + IPv4Forwarding: !srv.runtime.capabilities.IPv4ForwardingDisabled, Debug: os.Getenv("DEBUG") != "", NFd: utils.GetTotalUsedFds(), NGoroutines: runtime.NumGoroutine(), @@ -419,19 +419,30 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin if err != nil { return err } - + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling", "dependend layers")) // FIXME: Try to stream the images? // FIXME: Launch the getRemoteImage() in goroutines + for _, id := range history { + + // ensure no two downloads of the same layer happen at the same time + if err := srv.poolAdd("pull", "layer:"+id); err != nil { + utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) + return nil + } + defer srv.poolRemove("pull", "layer:"+id) + if !srv.runtime.graph.Exists(id) { out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata")) imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token) if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers")) // FIXME: Keep going in case of error? return err } img, err := NewImgJSON(imgJSON) if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers")) return fmt.Errorf("Failed to parse json: %s", err) } @@ -439,13 +450,17 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "fs layer")) layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token) if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers")) return err } defer layer.Close() if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf.FormatProgress(utils.TruncateID(id), "Downloading", "%8v/%v (%v)"), sf, false), img); err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "downloading dependend layers")) return err } } + out.Write(sf.FormatProgress(utils.TruncateID(id), "Download", "complete")) + } return nil } @@ -493,29 +508,57 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName downloadImage := func(img *registry.ImgData) { if askedTag != "" && img.Tag != askedTag { utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) - errors <- nil + if parallel { + errors <- nil + } return } if img.Tag == "" { utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) - errors <- nil + if parallel { + errors <- nil + } return } + + // ensure no two downloads of the same image happen at the same time + if err := srv.poolAdd("pull", "img:"+img.ID); err != nil { + utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + if parallel { + errors <- nil + } + return + } + defer srv.poolRemove("pull", "img:"+img.ID) + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s", img.Tag, localName))) success := false + var lastErr error for _, ep := range repoData.Endpoints { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s", img.Tag, localName, ep))) if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { - out.Write(sf.FormatStatus(utils.TruncateID(img.ID), "Error while retrieving image for tag: %s (%s); checking next endpoint", askedTag, err)) + // Its not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err))) continue } success = true break } if !success { - errors <- fmt.Errorf("Could not find repository on any of the indexed registries.") + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, %s", img.Tag, localName, lastErr))) + if parallel { + errors <- fmt.Errorf("Could not find repository on any of the indexed registries.") + return + } + } + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download", "complete")) + + if parallel { + errors <- nil } - errors <- nil } if parallel { @@ -524,15 +567,18 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName downloadImage(image) } } - if parallel { + var lastError error for i := 0; i < len(repoData.ImgList); i++ { if err := <-errors; err != nil { - return err + lastError = err } } - } + if lastError != nil { + return lastError + } + } for tag, id := range tagsList { if askedTag != "" && tag != askedTag { continue @@ -586,8 +632,8 @@ func (srv *Server) poolRemove(kind, key string) error { return nil } -func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, parallel bool) error { - r, err := registry.NewRegistry(srv.runtime.root, authConfig, srv.HTTPRequestFactory()) +func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string, parallel bool) error { + r, err := registry.NewRegistry(srv.runtime.root, authConfig, srv.HTTPRequestFactory(metaHeaders)) if err != nil { return err } @@ -734,7 +780,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, } // FIXME: Allow to interrupt current push when new push of same image is done. -func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error { +func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string) error { if err := srv.poolAdd("push", localName); err != nil { return err } @@ -748,7 +794,7 @@ func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFo out = utils.NewWriteFlusher(out) img, err := srv.runtime.graph.Get(localName) - r, err2 := registry.NewRegistry(srv.runtime.root, authConfig, srv.HTTPRequestFactory()) + r, err2 := registry.NewRegistry(srv.runtime.root, authConfig, srv.HTTPRequestFactory(metaHeaders)) if err2 != nil { return err2 } @@ -1221,10 +1267,13 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) ( return srv, nil } -func (srv *Server) HTTPRequestFactory() *utils.HTTPRequestFactory { +func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { if srv.reqFactory == nil { ud := utils.NewHTTPUserAgentDecorator(srv.versionInfos()...) - factory := utils.NewHTTPRequestFactory(ud) + md := &utils.HTTPMetaHeadersDecorator{ + Headers: metaHeaders, + } + factory := utils.NewHTTPRequestFactory(ud, md) srv.reqFactory = factory } return srv.reqFactory diff --git a/utils.go b/utils.go index b8f264fdb4..aed8ffdd76 100644 --- a/utils.go +++ b/utils.go @@ -1,6 +1,7 @@ package docker import ( + "fmt" "strings" ) @@ -146,3 +147,23 @@ func MergeConfig(userConf, imageConf *Config) { } } } + +func parseLxcConfOpts(opts ListOpts) ([]KeyValuePair, error) { + out := make([]KeyValuePair, len(opts)) + for i, o := range opts { + k, v, err := parseLxcOpt(o) + if err != nil { + return nil, err + } + out[i] = KeyValuePair{Key: k, Value: v} + } + return out, nil +} + +func parseLxcOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} diff --git a/utils/http.go b/utils/http.go index 8c1e4b7a79..1332ce816d 100644 --- a/utils/http.go +++ b/utils/http.go @@ -93,6 +93,20 @@ func (self *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *ht return req, nil } +type HTTPMetaHeadersDecorator struct { + Headers map[string][]string +} + +func (self *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { + if self.Headers == nil { + return req, nil + } + for k, v := range self.Headers { + req.Header[k] = v + } + return req, nil +} + // HTTPRequestFactory creates an HTTP request // and applies a list of decorators on the request. type HTTPRequestFactory struct { diff --git a/utils/utils.go b/utils/utils.go index 6a5beb8e48..441f71a126 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -515,9 +515,7 @@ func FindCgroupMountpoint(cgroupType string) (string, error) { func GetKernelVersion() (*KernelVersionInfo, error) { var ( - flavor string - kernel, major, minor int - err error + err error ) uts, err := uname() @@ -536,8 +534,18 @@ func GetKernelVersion() (*KernelVersionInfo, error) { // Remove the \x00 from the release for Atoi to parse correctly release = release[:bytes.IndexByte(release, 0)] - tmp := strings.SplitN(string(release), "-", 2) - tmp2 := strings.SplitN(tmp[0], ".", 3) + return ParseRelease(string(release)) +} + +func ParseRelease(release string) (*KernelVersionInfo, error) { + var ( + flavor string + kernel, major, minor int + err error + ) + + tmp := strings.SplitN(release, "-", 2) + tmp2 := strings.Split(tmp[0], ".") if len(tmp2) > 0 { kernel, err = strconv.Atoi(tmp2[0]) diff --git a/utils/utils_test.go b/utils/utils_test.go index 1030b2902a..20a5820dd5 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -337,3 +337,21 @@ search dotcloud.net`: true, } } } + +func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { + var ( + a *KernelVersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: "1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "19-generic"}, 0) +} diff --git a/utils_test.go b/utils_test.go index 5c37e9e8ec..e8aae17186 100644 --- a/utils_test.go +++ b/utils_test.go @@ -301,3 +301,20 @@ func TestMergeConfigPublicPortNotHonored(t *testing.T) { t.Fail() } } + +func TestParseLxcConfOpt(t *testing.T) { + opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} + + for _, o := range opts { + k, v, err := parseLxcOpt(o) + if err != nil { + t.FailNow() + } + if k != "lxc.utsname" { + t.Fail() + } + if v != "docker" { + t.Fail() + } + } +}