mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
commit
fe6406b0f2
227 changed files with 14021 additions and 6908 deletions
.mailmap.travis.ymlAUTHORSCHANGELOG.mdCONTRIBUTING.mdDockerfileMAINTAINERSMakefileNOTICEREADME.mdREMOTE_TODO.mdVERSIONVagrantfile
api
api_params.goapi_unit_test.goarchive
buildfile.gocgroups
commands.goconfig.gocontainer.gocontrib
completion
init/systemd
mkimage-arch-pacman.confmkimage-arch.shmkimage-busybox.shmkimage-crux.shmkimage-debootstrap.shmkimage-rinse.shmkimage-unittest.shmkimage-yum.shmkseccomp.plmkseccomp.sampleprepare-commit-msg.hookdocker
docs
DockerfileMAINTAINERSrequirements.txttoctree.rst
sources
api
articles
conf.pyexamples
couchdb_data_volumes.rsthello_world.rstpython_web_app.rstrunning_redis_service.rstrunning_ssh_service.rstusing_supervisord.rst
faq.rstindex.rstinstallation
archlinux.rstbinaries.rstfrugalware.rstgentoolinux.rstgoogle.rstindex.rstkernel.rstmac.rstopenSUSE.rstubuntulinux.rstupgrading.rstvagrant.rstwindows.rst
reference
api
README.mddocker_remote_api.rstdocker_remote_api_v1.0.rstdocker_remote_api_v1.1.rstdocker_remote_api_v1.2.rstdocker_remote_api_v1.3.rstdocker_remote_api_v1.4.rstdocker_remote_api_v1.5.rstdocker_remote_api_v1.6.rstdocker_remote_api_v1.7.rstdocker_remote_api_v1.8.rstdocker_remote_api_v1.9.rstindex.rstindex_api.rstregistry_api.rstregistry_index_spec.rstremote_api_client_libraries.rst
builder.rstcommandline
index.rstrun.rstuse
32
.mailmap
32
.mailmap
|
@ -1,9 +1,11 @@
|
|||
# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
|
||||
<charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf
|
||||
<charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com> <charmes.guillaume@gmail.com>
|
||||
<guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@docker.com>
|
||||
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
|
||||
<kencochrane@gmail.com> <KenCochrane@gmail.com>
|
||||
<sridharr@activestate.com> <github@srid.name>
|
||||
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@dotcloud.com>
|
||||
|
@ -15,8 +17,11 @@ Joffrey F <joffrey@dotcloud.com>
|
|||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
<kalessin@kalessin.fr> <louis@dotcloud.com>
|
||||
<victor.vieux@dotcloud.com> <victor@dotcloud.com>
|
||||
<victor.vieux@dotcloud.com> <dev@vvieux.com>
|
||||
<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
|
||||
<victor.vieux@docker.com> <victor@dotcloud.com>
|
||||
<victor.vieux@docker.com> <dev@vvieux.com>
|
||||
<victor.vieux@docker.com> <victor@docker.com>
|
||||
<victor.vieux@docker.com> <vieux@docker.com>
|
||||
<dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||
Thatcher Peskens <thatcher@dotcloud.com>
|
||||
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
|
||||
|
@ -38,3 +43,18 @@ Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
|||
Matthew Mueller <mattmuelle@gmail.com>
|
||||
<mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
<proppy@google.com> <proppy@aminche.com>
|
||||
<michael@crosbymichael.com> <crosby.michael@gmail.com>
|
||||
<github@metaliveblog.com> <github@developersupport.net>
|
||||
<brandon@ifup.org> <brandon@ifup.co>
|
||||
<dano@spotify.com> <daniel.norberg@gmail.com>
|
||||
<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
|
||||
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
|
||||
<shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
||||
<solomon@dotcloud.com> <solomon.hykes@dotcloud.com>
|
||||
<SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> ¨Sven <¨SvenDowideit@home.org.au¨>
|
||||
unclejack <unclejacksons@gmail.com> <unclejack@users.noreply.github.com>
|
||||
|
|
|
@ -13,11 +13,18 @@ before_script:
|
|||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq python-yaml
|
||||
- git remote add upstream git://github.com/dotcloud/docker.git
|
||||
- git fetch --append --no-tags upstream refs/heads/master:refs/remotes/upstream/master
|
||||
- upstream=master;
|
||||
if [ "$TRAVIS_PULL_REQUEST" != false ]; then
|
||||
upstream=$TRAVIS_BRANCH;
|
||||
fi;
|
||||
git fetch --append --no-tags upstream refs/heads/$upstream:refs/remotes/upstream/$upstream
|
||||
# sometimes we have upstream master already as origin/master (PRs), but other times we don't, so let's just make sure we have a completely unambiguous way to specify "upstream master" from here out
|
||||
# but if it's a PR against non-master, we need that upstream branch instead :)
|
||||
- sudo pip install -r docs/requirements.txt
|
||||
|
||||
script:
|
||||
- hack/travis/dco.py
|
||||
- hack/travis/gofmt.py
|
||||
- make -sC docs SPHINXOPTS=-q docs man
|
||||
|
||||
# vim:set sw=2 ts=2:
|
||||
|
|
172
AUTHORS
172
AUTHORS
|
@ -3,29 +3,42 @@
|
|||
#
|
||||
# For a list of active project maintainers, see the MAINTAINERS file.
|
||||
#
|
||||
Al Tobey <al@ooyala.com>
|
||||
Alex Gaynor <alex.gaynor@gmail.com>
|
||||
Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aaron Feng <aaron.feng@gmail.com>
|
||||
Abel Muiño <amuino@gmail.com>
|
||||
Alexander Larsson <alexl@redhat.com>
|
||||
Alexey Shamrin <shamrin@gmail.com>
|
||||
Alex Gaynor <alex.gaynor@gmail.com>
|
||||
Alexis THOMAS <fr.alexisthomas@gmail.com>
|
||||
Al Tobey <al@ooyala.com>
|
||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||
Andreas Savvides <andreas@editd.com>
|
||||
Andreas Tiefenthaler <at@an-ti.eu>
|
||||
Andrew Duckworth <grillopress@gmail.com>
|
||||
Andrew Macgregor <andrew.macgregor@agworld.com.au>
|
||||
Andrew Munsell <andrew@wizardapps.net>
|
||||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
Andy Chambers <anchambers@paypal.com>
|
||||
andy diller <dillera@gmail.com>
|
||||
Andy Rothfusz <github@metaliveblog.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
Anthony Bishopric <git@anthonybishopric.com>
|
||||
Anton Nikitin <anton.k.nikitin@gmail.com>
|
||||
Antony Messerli <amesserl@rackspace.com>
|
||||
apocas <petermdias@gmail.com>
|
||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||
Barry Allard <barry.allard@gmail.com>
|
||||
Bartłomiej Piotrowski <b@bpiotrowski.pl>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Ben Sargent <ben@brokendigits.com>
|
||||
Ben Toews <mastahyeti@gmail.com>
|
||||
Ben Wiklund <ben@daisyowl.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Bouke Haarsma <bouke@webatoom.nl>
|
||||
Brandon Liu <bdon@bdon.org>
|
||||
Brandon Philips <brandon@ifup.co>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brian Dorsey <brian@dorseys.org>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Brian McCallister <brianm@skife.org>
|
||||
Brian Olsen <brian@maven-group.org>
|
||||
Brian Shumate <brian@couchbase.com>
|
||||
|
@ -33,169 +46,298 @@ Briehan Lombaard <briehan.lombaard@gmail.com>
|
|||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Calen Pennington <cale@edx.org>
|
||||
Carl X. Su <bcbcarl@gmail.com>
|
||||
Charles Hooper <charles.hooper@dotcloud.com>
|
||||
Charles Lindsay <chaz@chazomatic.us>
|
||||
Chia-liang Kao <clkao@clkao.org>
|
||||
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
||||
Christopher Currie <codemonkey+github@gmail.com>
|
||||
Christopher Rigor <crigor@gmail.com>
|
||||
Christophe Troestler <christophe.Troestler@umons.ac.be>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Colin Dunklau <colin.dunklau@gmail.com>
|
||||
Colin Rice <colin@daedrum.net>
|
||||
Cory Forsyth <cory.forsyth@gmail.com>
|
||||
cressie176 <github@stephen-cresswell.net>
|
||||
Dan Buch <d.buch@modcloth.com>
|
||||
Dan Hirsch <thequux@upstandinghackers.com>
|
||||
Daniel Exner <dex@dragonslave.de>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
|
||||
Daniel Norberg <dano@spotify.com>
|
||||
Daniel Nordberg <dnordberg@gmail.com>
|
||||
Daniel Robinson <gottagetmac@gmail.com>
|
||||
Daniel Von Fange <daniel@leancoder.com>
|
||||
Daniel YC Lin <dlin.tw@gmail.com>
|
||||
Danny Yates <danny@codeaholics.org>
|
||||
Darren Coxall <darren@darrencoxall.com>
|
||||
David Anderson <dave@natulte.net>
|
||||
David Calavera <david.calavera@gmail.com>
|
||||
David Mcanulty <github@hellspark.com>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
Deni Bertovic <deni@kset.org>
|
||||
Dinesh Subhraveti <dineshs@altiscale.com>
|
||||
dkumor <daniel@dkumor.com>
|
||||
Dmitry Demeshchuk <demeshchuk@gmail.com>
|
||||
Dominik Honnef <dominik@honnef.co>
|
||||
Don Spaulding <donspauldingii@gmail.com>
|
||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
Dražen Lučanin <kermit666@gmail.com>
|
||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
Dustin Sallings <dustin@spy.net>
|
||||
Edmund Wagner <edmund-wagner@web.de>
|
||||
Elias Probst <mail@eliasprobst.eu>
|
||||
Emil Hernvall <emil@quench.at>
|
||||
Emily Rose <emily@contactvibe.com>
|
||||
Eric Hanchrow <ehanchrow@ine.com>
|
||||
Eric Lee <thenorthsecedes@gmail.com>
|
||||
Eric Myhre <hash@exultant.us>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
eugenkrizo <eugen.krizo@gmail.com>
|
||||
Evan Krall <krall@yelp.com>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Evan Wies <evan@neomantra.net>
|
||||
Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
|
||||
ezbercih <cem.ezberci@gmail.com>
|
||||
Fabio Falci <fabiofalci@gmail.com>
|
||||
Fabio Rehm <fgrehm@gmail.com>
|
||||
Fabrizio Regini <freegenie@gmail.com>
|
||||
Faiz Khan <faizkhan00@gmail.com>
|
||||
Fareed Dudhia <fareeddudhia@googlemail.com>
|
||||
Fernando <fermayo@gmail.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Frank Macreery <frank@macreery.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
Frederik Loeffert <frederik@zitrusmedia.de>
|
||||
Freek Kalter <freek@kalteronline.org>
|
||||
Gabe Rosenhouse <gabe@missionst.com>
|
||||
Gabriel Monroy <gabriel@opdemand.com>
|
||||
Galen Sampson <galen.sampson@gmail.com>
|
||||
Gareth Rushgrove <gareth@morethanseven.net>
|
||||
Gereon Frey <gereon.frey@dynport.de>
|
||||
Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Thornton <xdissent@me.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
|
||||
grunny <mwgrunny@gmail.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com>
|
||||
Gurjeet Singh <gurjeet@singh.im>
|
||||
Guruprasad <lgp171188@gmail.com>
|
||||
Harley Laue <losinggeneration@gmail.com>
|
||||
Hector Castro <hectcastro@gmail.com>
|
||||
Hunter Blanks <hunter@twilio.com>
|
||||
inglesp <peter.inglesby@gmail.com>
|
||||
Isaac Dupree <antispam@idupree.com>
|
||||
Isao Jonas <isao.jonas@gmail.com>
|
||||
Jake Moshenko <jake@devtable.com>
|
||||
James Allen <jamesallen0108@gmail.com>
|
||||
James Carr <james.r.carr@gmail.com>
|
||||
James Mills <prologic@shortcircuit.net.au>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
jaseg <jaseg@jaseg.net>
|
||||
Jason McVetta <jason.mcvetta@gmail.com>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
Jeff Lindsay <progrium@gmail.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
|
||||
Jesse Dubay <jesse@thefortytwo.net>
|
||||
Jim Alateras <jima@comware.com.au>
|
||||
Jimmy Cuadra <jimmy@jimmycuadra.com>
|
||||
Joe Beda <joe.github@bedafamily.com>
|
||||
Joe Van Dyk <joe@tanga.com>
|
||||
Joffrey F <joffrey@dotcloud.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Johannes 'fish' Ziemke <github@freigeist.org>
|
||||
Johan Rydberg <johan.rydberg@gmail.com>
|
||||
John Costa <john.costa@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
John Feminella <jxf@jxf.me>
|
||||
John Gardiner Myers <jgmyers@proofpoint.com>
|
||||
John Warwick <jwarwick@gmail.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan Mueller <j.mueller@apoveda.ch>
|
||||
Jonathan Rudenberg <jonathan@titanous.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Joost Cassee <joost@cassee.net>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Jordan Sissel <jls@semicomplete.com>
|
||||
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
|
||||
Joseph Hager <ajhager@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
JP <jpellerin@leapfrogonline.com>
|
||||
Julien Barbier <write0@gmail.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
|
||||
Julien Dubois <julien.dubois@gmail.com>
|
||||
Justin Force <justin.force@gmail.com>
|
||||
Justin Plock <jplock@users.noreply.github.com>
|
||||
Karan Lyons <karan@karanlyons.com>
|
||||
Karl Grzeszczak <karl@karlgrz.com>
|
||||
Karl Grzeszczak <karlgrz@gmail.com>
|
||||
Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
|
||||
Keli Hu <dev@keli.hu>
|
||||
Ken Cochrane <kencochrane@gmail.com>
|
||||
Kevin Clark <kevin.clark@gmail.com>
|
||||
Kevin J. Lynagh <kevin@keminglabs.com>
|
||||
Keyvan Fatehi <keyvanfatehi@gmail.com>
|
||||
kim0 <email.ahmedkamal@googlemail.com>
|
||||
Kim BKC Carlbacker <kim.carlbacker@gmail.com>
|
||||
Kimbro Staken <kstaken@kstaken.com>
|
||||
Kiran Gangadharan <kiran.daredevil@gmail.com>
|
||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
Kyle Conroy <kyle.j.conroy@gmail.com>
|
||||
Laurie Voss <github@seldo.com>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Lokesh Mandvekar <lsm5@redhat.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
lukaspustina <lukas.pustina@centerdevice.com>
|
||||
Mahesh Tiyyagura <tmahesh@gmail.com>
|
||||
Manuel Meurer <manuel@krautcomputing.com>
|
||||
Manuel Woelker <docker@manuel.woelker.org>
|
||||
Manuel Woelker <github@manuel.woelker.org>
|
||||
Marc Kuo <kuomarc2@gmail.com>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marcus Farkas <toothlessgear@finitebox.com>
|
||||
Marcus Ramberg <marcus@nordaaker.com>
|
||||
Marek Goldmann <marek.goldmann@gmail.com>
|
||||
Mark Allen <mrallen1@yahoo.com>
|
||||
Mark McGranaghan <mmcgrana@gmail.com>
|
||||
Marko Mikulicic <mmikulicic@gmail.com>
|
||||
Markus Fix <lispmeister@gmail.com>
|
||||
Martijn van Oosterhout <kleptog@svana.org>
|
||||
Martin Redmond <martin@tinychat.com>
|
||||
Matt Apperson <me@mattapperson.com>
|
||||
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
|
||||
Matt Apperson <me@mattapperson.com>
|
||||
Matt Bachmann <bachmann.matt@gmail.com>
|
||||
Matt Haggard <haggardii@gmail.com>
|
||||
Matthew Mueller <mattmuelle@gmail.com>
|
||||
mattymo <raytrac3r@gmail.com>
|
||||
Maxime Petazzoni <max@signalfuse.com>
|
||||
Maxim Treskin <zerthurd@gmail.com>
|
||||
meejah <meejah@meejah.ca>
|
||||
Michael Crosby <crosby.michael@gmail.com>
|
||||
Michael Crosby <michael@crosbymichael.com>
|
||||
Michael Gorsuch <gorsuch@github.com>
|
||||
Michael Stapelberg <michael+gh@stapelberg.de>
|
||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||
Mike Gaffney <mike@uberu.com>
|
||||
Mike Naberezny <mike@naberezny.com>
|
||||
Mikhail Sobolev <mss@mawhrin.net>
|
||||
Mohit Soni <mosoni@ebay.com>
|
||||
Morten Siebuhr <sbhr@sbhr.dk>
|
||||
Nan Monnand Deng <monnand@gmail.com>
|
||||
Nate Jones <nate@endot.org>
|
||||
Nathan Kleyn <nathan@nathankleyn.com>
|
||||
Nelson Chen <crazysim@gmail.com>
|
||||
Niall O'Higgins <niallo@unworkable.org>
|
||||
Nick Payne <nick@kurai.co.uk>
|
||||
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
|
||||
Nick Stinemates <nick@stinemates.org>
|
||||
Nicolas Dudebout <nicolas.dudebout@gatech.edu>
|
||||
Nicolas Kaiser <nikai@nikai.net>
|
||||
Nolan Darilek <nolan@thewordnerd.info>
|
||||
odk- <github@odkurzacz.org>
|
||||
Oguz Bilgic <fisyonet@gmail.com>
|
||||
Ole Reifschneider <mail@ole-reifschneider.de>
|
||||
O.S.Tezer <ostezer@gmail.com>
|
||||
pandrew <letters@paulnotcom.se>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
pattichen <craftsbear@gmail.com>
|
||||
Paul Bowsher <pbowsher@globalpersonals.co.uk>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Paul Liétar <paul@lietar.net>
|
||||
Paul Lietar <paul@lietar.net>
|
||||
Paul Morie <pmorie@gmail.com>
|
||||
Paul Nasrat <pnasrat@gmail.com>
|
||||
Paul <paul9869@gmail.com>
|
||||
Peter Braden <peterbraden@peterbraden.co.uk>
|
||||
Peter Waller <peter@scraperwiki.com>
|
||||
Phil Spitler <pspitler@gmail.com>
|
||||
Piergiuliano Bossi <pgbossi@gmail.com>
|
||||
Pierre-Alain RIVIERE <pariviere@ippon.fr>
|
||||
Piotr Bogdan <ppbogdan@gmail.com>
|
||||
pysqz <randomq@126.com>
|
||||
Quentin Brossard <qbrossard@gmail.com>
|
||||
Rafal Jeczalik <rjeczalik@gmail.com>
|
||||
Ramkumar Ramachandra <artagnon@gmail.com>
|
||||
Ramon van Alteren <ramon@vanalteren.nl>
|
||||
Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
|
||||
rgstephens <greg@udon.org>
|
||||
Rhys Hiltner <rhys@twitch.tv>
|
||||
Richo Healey <richo@psych0tik.net>
|
||||
Rick Bradley <rick@users.noreply.github.com>
|
||||
Robert Obryk <robryk@gmail.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
||||
Roberto Hashioka <roberto_hashioka@hotmail.com>
|
||||
Rodrigo Vaz <rodrigo.vaz@gmail.com>
|
||||
Roel Van Nyen <roel.vannyen@gmail.com>
|
||||
Roger Peppe <rogpeppe@gmail.com>
|
||||
Ryan Fowler <rwfowler@gmail.com>
|
||||
Ryan O'Donnell <odonnellryanc@gmail.com>
|
||||
Ryan Seto <ryanseto@yak.net>
|
||||
Sam Alba <sam.alba@gmail.com>
|
||||
Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
|
||||
Samuel Andaya <samuel@andaya.net>
|
||||
Scott Bessler <scottbessler@gmail.com>
|
||||
Sean Cronin <seancron@gmail.com>
|
||||
Sean P. Kane <skane@newrelic.com>
|
||||
Shawn Landden <shawn@churchofgit.com>
|
||||
Shawn Siefkas <shawn.siefkas@meredith.com>
|
||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||
shin- <joffrey@docker.com>
|
||||
Silas Sewell <silas@sewell.org>
|
||||
Simon Taranto <simon.taranto@gmail.com>
|
||||
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
|
||||
Solomon Hykes <solomon@dotcloud.com>
|
||||
Song Gao <song@gao.io>
|
||||
Sridatta Thatipamala <sthatipamala@gmail.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Steeve Morin <steeve.morin@gmail.com>
|
||||
Stefan Praszalowicz <stefan@greplin.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
sudosurootdev <sudosurootdev@gmail.com>
|
||||
Sven Dowideit <svendowideit@home.org.au>
|
||||
Sylvain Bellemare <sylvain.bellemare@ezeep.com>
|
||||
tang0th <tang0th@gmx.com>
|
||||
Tatsuki Sugiura <sugi@nemui.org>
|
||||
Tehmasp Chaudhri <tehmasp@gmail.com>
|
||||
Thatcher Peskens <thatcher@dotcloud.com>
|
||||
Thermionix <bond711@gmail.com>
|
||||
Thijs Terlouw <thijsterlouw@gmail.com>
|
||||
Thomas Bikeev <thomas.bikeev@mac.com>
|
||||
Thomas Frössman <thomasf@jossystem.se>
|
||||
Thomas Hansen <thomas.hansen@gmail.com>
|
||||
Thomas LEVEIL <thomasleveil@gmail.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tim Bosse <maztaim@users.noreply.github.com>
|
||||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Tobias Bieniek <Tobias.Bieniek@gmx.de>
|
||||
Tobias Schmidt <ts@soundcloud.com>
|
||||
Tobias Schwab <tobias.schwab@dynport.de>
|
||||
Todd Lunter <tlunter@gmail.com>
|
||||
Tom Hulihan <hulihan.tom159@gmail.com>
|
||||
Tommaso Visconti <tommaso.visconti@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tyler Brock <tyler.brock@gmail.com>
|
||||
Tzu-Jung Lee <roylee17@gmail.com>
|
||||
Ulysse Carion <ulyssecarion@gmail.com>
|
||||
unclejack <unclejacksons@gmail.com>
|
||||
vgeta <gopikannan.venugopalsamy@gmail.com>
|
||||
Victor Coisne <victor.coisne@dotcloud.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Victor Vieux <victor.vieux@dotcloud.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Bernat <bernat@luffy.cx>
|
||||
Vincent Woo <me@vincentwoo.com>
|
||||
Vinod Kulkarni <vinod.kulkarni@gmail.com>
|
||||
Vitor Monteiro <vmrmonteiro@gmail.com>
|
||||
Vivek Agarwal <me@vivek.im>
|
||||
Vladimir Kirillov <proger@wilab.org.ua>
|
||||
Vladimir Rutsky <iamironbob@gmail.com>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
WarheadsSE <max@warheads.net>
|
||||
Wes Morgan <cap10morgan@gmail.com>
|
||||
Will Dietz <w@wdtz.org>
|
||||
William Delanoue <william.delanoue@gmail.com>
|
||||
Will Rouesnel <w.rouesnel@gmail.com>
|
||||
Xiuming Chen <cc@cxm.cc>
|
||||
Yang Bai <hamo.by@gmail.com>
|
||||
Yurii Rashkovskii <yrashk@gmail.com>
|
||||
Zain Memon <zain@inzain.net>
|
||||
Zaiste! <oh@zaiste.net>
|
||||
Zilin Du <zilin.du@gmail.com>
|
||||
zimbatm <zimbatm@zimbatm.com>
|
||||
|
|
20
CHANGELOG.md
20
CHANGELOG.md
|
@ -1,5 +1,25 @@
|
|||
# Changelog
|
||||
|
||||
## 0.8.0 (2014-02-04)
|
||||
|
||||
#### Notable features since 0.7.0
|
||||
|
||||
* Images and containers can be removed much faster
|
||||
* Building an image from source with docker build is now much faster
|
||||
* The Docker daemon starts and stops much faster
|
||||
* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations
|
||||
* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations
|
||||
* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar
|
||||
* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers
|
||||
With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages
|
||||
* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change
|
||||
|
||||
* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed
|
||||
* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build
|
||||
* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write
|
||||
* Docker is officially supported on Mac OSX
|
||||
* The Docker daemon supports systemd socket activation
|
||||
|
||||
## 0.7.6 (2014-01-14)
|
||||
|
||||
#### Builder
|
||||
|
|
|
@ -7,7 +7,7 @@ feels wrong or incomplete.
|
|||
## Reporting Issues
|
||||
|
||||
When reporting [issues](https://github.com/dotcloud/docker/issues)
|
||||
on Github please include your host OS ( Ubuntu 12.04, Fedora 19, etc... )
|
||||
on GitHub please include your host OS ( Ubuntu 12.04, Fedora 19, etc... )
|
||||
and the output of `docker version` along with the output of `docker info` if possible.
|
||||
This information will help us review and fix your issue faster.
|
||||
|
||||
|
@ -45,7 +45,7 @@ else is working on the same thing.
|
|||
|
||||
### Create issues...
|
||||
|
||||
Any significant improvement should be documented as [a github
|
||||
Any significant improvement should be documented as [a GitHub
|
||||
issue](https://github.com/dotcloud/docker/issues) before anybody
|
||||
starts working on it.
|
||||
|
||||
|
@ -115,16 +115,28 @@ can certify the below:
|
|||
```
|
||||
Docker Developer Grant and Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to the Docker Project ("Project"), I represent and warrant that:
|
||||
By making a contribution to the Docker Project ("Project"), I represent and
|
||||
warrant that:
|
||||
|
||||
a. The contribution was created in whole or in part by me and I have the right to submit the contribution on my own behalf or on behalf of a third party who has authorized me to submit this contribution to the Project; or
|
||||
a. The contribution was created in whole or in part by me and I have the right
|
||||
to submit the contribution on my own behalf or on behalf of a third party who
|
||||
has authorized me to submit this contribution to the Project; or
|
||||
|
||||
b. The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right and authorization to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license) that I have identified in the contribution; or
|
||||
b. The contribution is based upon previous work that, to the best of my
|
||||
knowledge, is covered under an appropriate open source license and I have the
|
||||
right and authorization to submit that work with modifications, whether
|
||||
created in whole or in part by me, under the same open source license (unless
|
||||
I am permitted to submit under a different license) that I have identified in
|
||||
the contribution; or
|
||||
|
||||
c. The contribution was provided directly to me by some other person who represented and warranted (a) or (b) and I have not modified it.
|
||||
|
||||
d. I understand and agree that this Project and the contribution are publicly known and that a record of the contribution (including all personal information I submit with it, including my sign-off record) is maintained indefinitely and may be redistributed consistent with this Project or the open source license(s) involved.
|
||||
c. The contribution was provided directly to me by some other person who
|
||||
represented and warranted (a) or (b) and I have not modified it.
|
||||
|
||||
d. I understand and agree that this Project and the contribution are publicly
|
||||
known and that a record of the contribution (including all personal
|
||||
information I submit with it, including my sign-off record) is maintained
|
||||
indefinitely and may be redistributed consistent with this Project or the open
|
||||
source license(s) involved.
|
||||
```
|
||||
|
||||
then you just add a line to every git commit message:
|
||||
|
@ -134,20 +146,14 @@ then you just add a line to every git commit message:
|
|||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
One way to automate this, is customise your get ``commit.template`` by adding
|
||||
the following to your ``.git/hooks/prepare-commit-msg`` script (needs
|
||||
``chmod 755 .git/hooks/prepare-commit-msg`` ) in the docker checkout:
|
||||
a ``prepare-commit-msg`` hook to your docker checkout:
|
||||
|
||||
```
|
||||
#!/bin/sh
|
||||
# Auto sign all commits to allow them to be used by the Docker project.
|
||||
# see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work
|
||||
#
|
||||
GH_USER=$(git config --get github.user)
|
||||
SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p")
|
||||
grep -qs "^$SOB" "$1" || echo "\n$SOB" >> "$1"
|
||||
|
||||
curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
|
||||
```
|
||||
|
||||
* Note: the above script expects to find your GitHub user name in ``git config --get github.user``
|
||||
|
||||
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)
|
||||
|
||||
|
||||
|
|
28
Dockerfile
28
Dockerfile
|
@ -24,23 +24,23 @@
|
|||
#
|
||||
|
||||
docker-version 0.6.1
|
||||
FROM stackbrew/ubuntu:12.04
|
||||
FROM ubuntu:13.10
|
||||
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
||||
# Add precise-backports to get s3cmd >= 1.1.0 (so we get ENV variable support in our .s3cfg)
|
||||
RUN echo 'deb http://archive.ubuntu.com/ubuntu precise-backports main universe' > /etc/apt/sources.list.d/backports.list
|
||||
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
|
||||
apt-utils \
|
||||
aufs-tools \
|
||||
automake \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
curl \
|
||||
dpkg-sig \
|
||||
git \
|
||||
iptables \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libsqlite3-dev \
|
||||
lxc \
|
||||
mercurial \
|
||||
reprepro \
|
||||
ruby1.9.1 \
|
||||
|
@ -48,10 +48,14 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
|
|||
s3cmd=1.1.0* \
|
||||
--no-install-recommends
|
||||
|
||||
# Get and compile LXC 0.8 (since it is the most stable)
|
||||
RUN git clone --no-checkout https://github.com/lxc/lxc.git /usr/local/lxc && cd /usr/local/lxc && git checkout -q lxc-0.8.0
|
||||
RUN cd /usr/local/lxc && ./autogen.sh && ./configure --disable-docs && make && make install
|
||||
|
||||
# Get lvm2 source for compiling statically
|
||||
RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
|
||||
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
|
||||
# note: we can't use "git clone -b" above because it requires at least git 1.7.10 to be able to use that on a tag instead of a branch and we only have 1.7.9.5
|
||||
# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
|
||||
|
||||
# Compile and install lvm2
|
||||
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||
|
@ -64,19 +68,23 @@ ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
|||
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
|
||||
|
||||
# Compile Go for cross compilation
|
||||
ENV DOCKER_CROSSPLATFORMS darwin/amd64 darwin/386
|
||||
# TODO add linux/386 and linux/arm
|
||||
ENV DOCKER_CROSSPLATFORMS linux/386 linux/arm darwin/amd64 darwin/386
|
||||
# (set an explicit GOARM of 5 for maximum compatibility)
|
||||
ENV GOARM 5
|
||||
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
|
||||
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
RUN go get code.google.com/p/go.tools/cmd/cover
|
||||
|
||||
# TODO replace FPM with some very minimal debhelper stuff
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.0.1
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
|
||||
|
||||
# Setup s3cmd config
|
||||
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
|
||||
|
||||
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||
RUN git config --global user.email 'docker-dummy@example.com'
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/dotcloud/docker
|
||||
|
||||
|
|
|
@ -6,4 +6,4 @@ Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
|||
api.go: Victor Vieux <victor@dotcloud.com> (@vieux)
|
||||
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Vagrantfile: Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)
|
||||
Vagrantfile: Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
|
||||
|
|
14
Makefile
14
Makefile
|
@ -1,4 +1,4 @@
|
|||
.PHONY: all binary build cross default docs shell test
|
||||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
DOCKER_IMAGE := docker:$(GIT_BRANCH)
|
||||
|
@ -16,18 +16,26 @@ binary: build
|
|||
cross: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
|
||||
|
||||
docs:
|
||||
docker build -rm -t "$(DOCKER_DOCS_IMAGE)" docs
|
||||
docs: docs-build
|
||||
docker run -rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)"
|
||||
|
||||
docs-shell: docs-build
|
||||
docker run -rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
|
||||
|
||||
test-integration: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
|
||||
|
||||
shell: build
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
build: bundles
|
||||
docker build -rm -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
docs-build:
|
||||
docker build -rm -t "$(DOCKER_DOCS_IMAGE)" docs
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
|
|
2
NOTICE
2
NOTICE
|
@ -1,5 +1,5 @@
|
|||
Docker
|
||||
Copyright 2012-2013 Docker, Inc.
|
||||
Copyright 2012-2014 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ hundreds of thousands of applications and databases.
|
|||
|
||||
## Better than VMs
|
||||
|
||||
A common method for distributing applications and sandbox their
|
||||
A common method for distributing applications and sandboxing their
|
||||
execution is to use virtual machines, or VMs. Typical VM formats are
|
||||
VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In
|
||||
theory these formats should allow every developer to automatically
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
```
|
||||
**GET**
|
||||
send objects deprecate multi-stream
|
||||
TODO "/events": getEvents, N
|
||||
ok "/info": getInfo, 1
|
||||
ok "/version": getVersion, 1
|
||||
... "/images/json": getImagesJSON, N
|
||||
TODO "/images/viz": getImagesViz, 0 yes
|
||||
TODO "/images/search": getImagesSearch, N
|
||||
#3490 "/images/{name:.*}/get": getImagesGet, 0
|
||||
TODO "/images/{name:.*}/history": getImagesHistory, N
|
||||
TODO "/images/{name:.*}/json": getImagesByName, 1
|
||||
TODO "/containers/ps": getContainersJSON, N
|
||||
TODO "/containers/json": getContainersJSON, 1
|
||||
ok "/containers/{name:.*}/export": getContainersExport, 0
|
||||
TODO "/containers/{name:.*}/changes": getContainersChanges, N
|
||||
TODO "/containers/{name:.*}/json": getContainersByName, 1
|
||||
TODO "/containers/{name:.*}/top": getContainersTop, N
|
||||
#3512 "/containers/{name:.*}/attach/ws": wsContainersAttach, 0 yes
|
||||
|
||||
**POST**
|
||||
TODO "/auth": postAuth, 0 yes
|
||||
ok "/commit": postCommit, 0
|
||||
TODO "/build": postBuild, 0 yes
|
||||
TODO "/images/create": postImagesCreate, N yes yes (pull)
|
||||
TODO "/images/{name:.*}/insert": postImagesInsert, N yes yes
|
||||
TODO "/images/load": postImagesLoad, 1 yes (stdin)
|
||||
TODO "/images/{name:.*}/push": postImagesPush, N yes
|
||||
ok "/images/{name:.*}/tag": postImagesTag, 0
|
||||
ok "/containers/create": postContainersCreate, 0
|
||||
ok "/containers/{name:.*}/kill": postContainersKill, 0
|
||||
#3476 "/containers/{name:.*}/restart": postContainersRestart, 0
|
||||
ok "/containers/{name:.*}/start": postContainersStart, 0
|
||||
ok "/containers/{name:.*}/stop": postContainersStop, 0
|
||||
ok "/containers/{name:.*}/wait": postContainersWait, 0
|
||||
ok "/containers/{name:.*}/resize": postContainersResize, 0
|
||||
#3512 "/containers/{name:.*}/attach": postContainersAttach, 0 yes
|
||||
TODO "/containers/{name:.*}/copy": postContainersCopy, 0 yes
|
||||
|
||||
**DELETE**
|
||||
#3180 "/containers/{name:.*}": deleteContainers, 0
|
||||
TODO "/images/{name:.*}": deleteImages, N
|
||||
|
||||
**OPTIONS**
|
||||
ok "": optionsHandler, 0
|
||||
```
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
0.7.6
|
||||
0.8.0
|
||||
|
|
40
Vagrantfile
vendored
40
Vagrantfile
vendored
|
@ -8,10 +8,20 @@ AWS_BOX_URI = ENV['BOX_URI'] || "https://github.com/mitchellh/vagrant-aws/raw/ma
|
|||
AWS_REGION = ENV['AWS_REGION'] || "us-east-1"
|
||||
AWS_AMI = ENV['AWS_AMI'] || "ami-69f5a900"
|
||||
AWS_INSTANCE_TYPE = ENV['AWS_INSTANCE_TYPE'] || 't1.micro'
|
||||
SSH_PRIVKEY_PATH = ENV['SSH_PRIVKEY_PATH']
|
||||
PRIVATE_NETWORK = ENV['PRIVATE_NETWORK']
|
||||
|
||||
# Boolean that forwards the Docker dynamic ports 49000-49900
|
||||
# See http://docs.docker.io/en/latest/use/port_redirection/ for more
|
||||
# $ FORWARD_DOCKER_PORTS=1 vagrant [up|reload]
|
||||
FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS']
|
||||
VAGRANT_RAM = ENV['VAGRANT_RAM'] || 512
|
||||
VAGRANT_CORES = ENV['VAGRANT_CORES'] || 1
|
||||
|
||||
SSH_PRIVKEY_PATH = ENV["SSH_PRIVKEY_PATH"]
|
||||
# You may also provide a comma-separated list of ports
|
||||
# for Vagrant to forward. For example:
|
||||
# $ FORWARD_PORTS=8080,27017 vagrant [up|reload]
|
||||
FORWARD_PORTS = ENV['FORWARD_PORTS']
|
||||
|
||||
# A script to upgrade from the 12.04 kernel to the raring backport kernel (3.8)
|
||||
# and install docker.
|
||||
|
@ -23,6 +33,10 @@ if [ -z "$user" ]; then
|
|||
user=vagrant
|
||||
fi
|
||||
|
||||
# Enable memory cgroup and swap accounting
|
||||
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' /etc/default/grub
|
||||
update-grub
|
||||
|
||||
# Adding an apt gpg key is idempotent.
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
|
@ -152,6 +166,8 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
|||
override.vm.provision :shell, :inline => $vbox_script
|
||||
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||
vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"]
|
||||
vb.customize ["modifyvm", :id, "--memory", VAGRANT_RAM]
|
||||
vb.customize ["modifyvm", :id, "--cpus", VAGRANT_CORES]
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -161,16 +177,30 @@ Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
|||
config.vm.provision :shell, :inline => $vbox_script
|
||||
end
|
||||
|
||||
if !FORWARD_DOCKER_PORTS.nil?
|
||||
# Setup port forwarding per loaded environment variables
|
||||
forward_ports = FORWARD_DOCKER_PORTS.nil? ? [] : [*49153..49900]
|
||||
forward_ports += FORWARD_PORTS.split(',').map{|i| i.to_i } if FORWARD_PORTS
|
||||
if forward_ports.any?
|
||||
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
||||
(49000..49900).each do |port|
|
||||
forward_ports.each do |port|
|
||||
config.vm.forward_port port, port
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
(49000..49900).each do |port|
|
||||
config.vm.network :forwarded_port, :host => port, :guest => port
|
||||
forward_ports.each do |port|
|
||||
config.vm.network :forwarded_port, :host => port, :guest => port, auto_correct: true
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if !PRIVATE_NETWORK.nil?
|
||||
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
||||
config.vm.network :hostonly, PRIVATE_NETWORK
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.network "private_network", ip: PRIVATE_NETWORK
|
||||
end
|
||||
end
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,4 @@
|
|||
package docker
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -7,6 +7,20 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestJsonContentType(t *testing.T) {
|
||||
if !MatchesContentType("application/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !MatchesContentType("application/json; charset=utf-8", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if MatchesContentType("dockerapplication/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBoolParam(t *testing.T) {
|
||||
if ret, err := getBoolParam("true"); err != nil || !ret {
|
||||
t.Fatalf("true -> true, nil | got %t %s", ret, err)
|
131
api_params.go
131
api_params.go
|
@ -1,131 +0,0 @@
|
|||
package docker
|
||||
|
||||
import "strings"
|
||||
|
||||
type (
|
||||
APIHistory struct {
|
||||
ID string `json:"Id"`
|
||||
Tags []string `json:",omitempty"`
|
||||
Created int64
|
||||
CreatedBy string `json:",omitempty"`
|
||||
Size int64
|
||||
}
|
||||
|
||||
APIImages struct {
|
||||
ID string `json:"Id"`
|
||||
RepoTags []string `json:",omitempty"`
|
||||
Created int64
|
||||
Size int64
|
||||
VirtualSize int64
|
||||
ParentId string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APIImagesOld struct {
|
||||
Repository string `json:",omitempty"`
|
||||
Tag string `json:",omitempty"`
|
||||
ID string `json:"Id"`
|
||||
Created int64
|
||||
Size int64
|
||||
VirtualSize int64
|
||||
}
|
||||
|
||||
APITop struct {
|
||||
Titles []string
|
||||
Processes [][]string
|
||||
}
|
||||
|
||||
APIRmi struct {
|
||||
Deleted string `json:",omitempty"`
|
||||
Untagged string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APIContainers struct {
|
||||
ID string `json:"Id"`
|
||||
Image string
|
||||
Command string
|
||||
Created int64
|
||||
Status string
|
||||
Ports []APIPort
|
||||
SizeRw int64
|
||||
SizeRootFs int64
|
||||
Names []string
|
||||
}
|
||||
|
||||
APIContainersOld struct {
|
||||
ID string `json:"Id"`
|
||||
Image string
|
||||
Command string
|
||||
Created int64
|
||||
Status string
|
||||
Ports string
|
||||
SizeRw int64
|
||||
SizeRootFs int64
|
||||
}
|
||||
|
||||
APIID struct {
|
||||
ID string `json:"Id"`
|
||||
}
|
||||
|
||||
APIRun struct {
|
||||
ID string `json:"Id"`
|
||||
Warnings []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
APIPort struct {
|
||||
PrivatePort int64
|
||||
PublicPort int64
|
||||
Type string
|
||||
IP string
|
||||
}
|
||||
|
||||
APIWait struct {
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
APIAuth struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
APIImageConfig struct {
|
||||
ID string `json:"Id"`
|
||||
*Config
|
||||
}
|
||||
|
||||
APICopy struct {
|
||||
Resource string
|
||||
HostPath string
|
||||
}
|
||||
APIContainer struct {
|
||||
*Container
|
||||
HostConfig *HostConfig
|
||||
}
|
||||
)
|
||||
|
||||
func (api APIImages) ToLegacy() []APIImagesOld {
|
||||
outs := []APIImagesOld{}
|
||||
for _, repotag := range api.RepoTags {
|
||||
components := strings.SplitN(repotag, ":", 2)
|
||||
outs = append(outs, APIImagesOld{
|
||||
ID: api.ID,
|
||||
Repository: components[0],
|
||||
Tag: components[1],
|
||||
Created: api.Created,
|
||||
Size: api.Size,
|
||||
VirtualSize: api.VirtualSize,
|
||||
})
|
||||
}
|
||||
return outs
|
||||
}
|
||||
|
||||
func (api APIContainers) ToLegacy() *APIContainersOld {
|
||||
return &APIContainersOld{
|
||||
ID: api.ID,
|
||||
Image: api.Image,
|
||||
Command: api.Command,
|
||||
Created: api.Created,
|
||||
Status: api.Status,
|
||||
Ports: displayablePorts(api.Ports),
|
||||
SizeRw: api.SizeRw,
|
||||
SizeRootFs: api.SizeRootFs,
|
||||
}
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJsonContentType(t *testing.T) {
|
||||
if !matchesContentType("application/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !matchesContentType("application/json; charset=utf-8", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if matchesContentType("dockerapplication/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
|
@ -13,6 +13,8 @@ import (
|
|||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type Archive io.Reader
|
||||
|
@ -21,10 +23,7 @@ type Compression int
|
|||
|
||||
type TarOptions struct {
|
||||
Includes []string
|
||||
Excludes []string
|
||||
Recursive bool
|
||||
Compression Compression
|
||||
CreateFiles []string
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -64,7 +63,7 @@ func DetectCompression(source []byte) Compression {
|
|||
func xzDecompress(archive io.Reader) (io.Reader, error) {
|
||||
args := []string{"xz", "-d", "-c", "-q"}
|
||||
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), archive, nil)
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), archive)
|
||||
}
|
||||
|
||||
func DecompressStream(archive io.Reader) (io.Reader, error) {
|
||||
|
@ -98,16 +97,20 @@ func DecompressStream(archive io.Reader) (io.Reader, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (compression *Compression) Flag() string {
|
||||
switch *compression {
|
||||
case Bzip2:
|
||||
return "j"
|
||||
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
|
||||
|
||||
switch compression {
|
||||
case Uncompressed:
|
||||
return dest, nil
|
||||
case Gzip:
|
||||
return "z"
|
||||
case Xz:
|
||||
return "J"
|
||||
return gzip.NewWriter(dest), nil
|
||||
case Bzip2, Xz:
|
||||
// archive/bzip2 does not support writing, and there is no xz support at all
|
||||
// However, this is not a problem as docker only currently generates gzipped tars
|
||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (compression *Compression) Extension() string {
|
||||
|
@ -124,10 +127,145 @@ func (compression *Compression) Extension() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func addTarFile(path, name string, tw *tar.Writer) error {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
link := ""
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
if link, err = os.Readlink(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
hdr, err := tar.FileInfoHeader(fi, link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.IsDir() && !strings.HasSuffix(name, "/") {
|
||||
name = name + "/"
|
||||
}
|
||||
|
||||
hdr.Name = name
|
||||
|
||||
stat, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if ok {
|
||||
// Currently go does not fill in the major/minors
|
||||
if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK ||
|
||||
stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR {
|
||||
hdr.Devmajor = int64(major(uint64(stat.Rdev)))
|
||||
hdr.Devminor = int64(minor(uint64(stat.Rdev)))
|
||||
}
|
||||
}
|
||||
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
if file, err := os.Open(path); err != nil {
|
||||
return err
|
||||
} else {
|
||||
_, err := io.Copy(tw, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) error {
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
// Create directory unless it exists as a directory already.
|
||||
// In that case we just want to merge the two
|
||||
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
|
||||
if err := os.Mkdir(path, os.FileMode(hdr.Mode)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
// Source is regular file
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(file, reader); err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
|
||||
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= syscall.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= syscall.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= syscall.S_IFIFO
|
||||
}
|
||||
|
||||
if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeLink:
|
||||
if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err := os.Symlink(hdr.Linkname, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeXGlobalHeader:
|
||||
utils.Debugf("PAX Global Extended Headers found and ignored")
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
|
||||
}
|
||||
|
||||
if err := syscall.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
// must happen after chown, as that can modify the file mode
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := syscall.Chmod(path, uint32(hdr.Mode&07777)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := LUtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tar creates an archive from the directory at `path`, and returns it as a
|
||||
// stream of bytes.
|
||||
func Tar(path string, compression Compression) (io.Reader, error) {
|
||||
return TarFilter(path, &TarOptions{Recursive: true, Compression: compression})
|
||||
return TarFilter(path, &TarOptions{Compression: compression})
|
||||
}
|
||||
|
||||
func escapeName(name string) string {
|
||||
|
@ -148,57 +286,55 @@ func escapeName(name string) string {
|
|||
|
||||
// Tar creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `filter`. If `filter` is nil, then all files are included.
|
||||
func TarFilter(path string, options *TarOptions) (io.Reader, error) {
|
||||
args := []string{"tar", "--numeric-owner", "-f", "-", "-C", path, "-T", "-"}
|
||||
if options.Includes == nil {
|
||||
options.Includes = []string{"."}
|
||||
}
|
||||
args = append(args, "-c"+options.Compression.Flag())
|
||||
func TarFilter(srcPath string, options *TarOptions) (io.Reader, error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
for _, exclude := range options.Excludes {
|
||||
args = append(args, fmt.Sprintf("--exclude=%s", exclude))
|
||||
compressWriter, err := CompressStream(pipeWriter, options.Compression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !options.Recursive {
|
||||
args = append(args, "--no-recursion")
|
||||
}
|
||||
tw := tar.NewWriter(compressWriter)
|
||||
|
||||
files := ""
|
||||
for _, f := range options.Includes {
|
||||
files = files + escapeName(f) + "\n"
|
||||
}
|
||||
go func() {
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
|
||||
tmpDir := ""
|
||||
|
||||
if options.CreateFiles != nil {
|
||||
var err error // Can't use := here or we override the outer tmpDir
|
||||
tmpDir, err = ioutil.TempDir("", "docker-tar")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if options.Includes == nil {
|
||||
options.Includes = []string{"."}
|
||||
}
|
||||
|
||||
files = files + "-C" + tmpDir + "\n"
|
||||
for _, f := range options.CreateFiles {
|
||||
path := filepath.Join(tmpDir, f)
|
||||
err := os.MkdirAll(filepath.Dir(path), 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, include := range options.Includes {
|
||||
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if file, err := os.OpenFile(path, os.O_CREATE, 0600); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
file.Close()
|
||||
}
|
||||
files = files + escapeName(f) + "\n"
|
||||
}
|
||||
}
|
||||
relFilePath, err := filepath.Rel(srcPath, filePath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), bytes.NewBufferString(files), func() {
|
||||
if tmpDir != "" {
|
||||
_ = os.RemoveAll(tmpDir)
|
||||
if err := addTarFile(filePath, relFilePath, tw); err != nil {
|
||||
utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
utils.Debugf("Can't close tar writer: %s\n", err)
|
||||
}
|
||||
if err := compressWriter.Close(); err != nil {
|
||||
utils.Debugf("Can't close compress writer: %s\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return pipeReader, nil
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
|
@ -206,54 +342,88 @@ func TarFilter(path string, options *TarOptions) (io.Reader, error) {
|
|||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
func Untar(archive io.Reader, path string, options *TarOptions) error {
|
||||
func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
||||
if archive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
}
|
||||
|
||||
buf := make([]byte, 10)
|
||||
totalN := 0
|
||||
for totalN < 10 {
|
||||
n, err := archive.Read(buf[totalN:])
|
||||
archive, err := DecompressStream(archive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr := tar.NewReader(archive)
|
||||
|
||||
var dirs []*tar.Header
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return fmt.Errorf("Tarball too short")
|
||||
}
|
||||
return err
|
||||
}
|
||||
totalN += n
|
||||
utils.Debugf("[tar autodetect] n: %d", n)
|
||||
}
|
||||
|
||||
compression := DetectCompression(buf)
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
utils.Debugf("Archive compression detected: %s", compression.Extension())
|
||||
args := []string{"--numeric-owner", "-f", "-", "-C", path, "-x" + compression.Flag()}
|
||||
if !strings.HasSuffix(hdr.Name, "/") {
|
||||
// Not the root directory, ensure that the parent directory exists
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if options != nil {
|
||||
for _, exclude := range options.Excludes {
|
||||
args = append(args, fmt.Sprintf("--exclude=%s", exclude))
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
|
||||
// If path exits we almost always just want to remove and replace it
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, hdr, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("tar", args...)
|
||||
cmd.Stdin = io.MultiReader(bytes.NewReader(buf), archive)
|
||||
// Hardcode locale environment for predictable outcome regardless of host configuration.
|
||||
// (see https://github.com/dotcloud/docker/issues/355)
|
||||
cmd.Env = []string{"LANG=en_US.utf-8", "LC_ALL=en_US.utf-8"}
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", err, output)
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with
|
||||
// the output of one piped into the other. If either Tar or Untar fails,
|
||||
// TarUntar aborts and returns the error.
|
||||
func TarUntar(src string, filter []string, dst string) error {
|
||||
utils.Debugf("TarUntar(%s %s %s)", src, filter, dst)
|
||||
archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed, Includes: filter, Recursive: true})
|
||||
func TarUntar(src string, dst string) error {
|
||||
utils.Debugf("TarUntar(%s %s)", src, dst)
|
||||
archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -290,7 +460,7 @@ func CopyWithTar(src, dst string) error {
|
|||
return err
|
||||
}
|
||||
utils.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
return TarUntar(src, nil, dst)
|
||||
return TarUntar(src, dst)
|
||||
}
|
||||
|
||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
||||
|
@ -353,13 +523,10 @@ func CopyFileWithTar(src, dst string) (err error) {
|
|||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
// If the command fails to run or doesn't complete successfully, an error
|
||||
// will be returned, including anything written on stderr.
|
||||
func CmdStream(cmd *exec.Cmd, input io.Reader, atEnd func()) (io.Reader, error) {
|
||||
func CmdStream(cmd *exec.Cmd, input io.Reader) (io.Reader, error) {
|
||||
if input != nil {
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
if atEnd != nil {
|
||||
atEnd()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Write stdin if any
|
||||
|
@ -370,16 +537,10 @@ func CmdStream(cmd *exec.Cmd, input io.Reader, atEnd func()) (io.Reader, error)
|
|||
}
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
if atEnd != nil {
|
||||
atEnd()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
if atEnd != nil {
|
||||
atEnd()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
pipeR, pipeW := io.Pipe()
|
||||
|
@ -404,9 +565,6 @@ func CmdStream(cmd *exec.Cmd, input io.Reader, atEnd func()) (io.Reader, error)
|
|||
} else {
|
||||
pipeW.Close()
|
||||
}
|
||||
if atEnd != nil {
|
||||
atEnd()
|
||||
}
|
||||
}()
|
||||
// Run the command and return the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -14,7 +15,7 @@ import (
|
|||
|
||||
func TestCmdStreamLargeStderr(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
|
||||
out, err := CmdStream(cmd, nil, nil)
|
||||
out, err := CmdStream(cmd, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: %s", err)
|
||||
}
|
||||
|
@ -35,7 +36,7 @@ func TestCmdStreamLargeStderr(t *testing.T) {
|
|||
|
||||
func TestCmdStreamBad(t *testing.T) {
|
||||
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
|
||||
out, err := CmdStream(badCmd, nil, nil)
|
||||
out, err := CmdStream(badCmd, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: %s", err)
|
||||
}
|
||||
|
@ -50,7 +51,7 @@ func TestCmdStreamBad(t *testing.T) {
|
|||
|
||||
func TestCmdStreamGood(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
|
||||
out, err := CmdStream(cmd, nil, nil)
|
||||
out, err := CmdStream(cmd, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -89,6 +90,16 @@ func tarUntar(t *testing.T, origin string, compression Compression) error {
|
|||
if _, err := os.Stat(tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changes, err := ChangesDirs(origin, tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(changes) != 0 {
|
||||
t.Fatalf("Unexpected differences after tarUntar: %v", changes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -108,11 +119,20 @@ func TestTarUntar(t *testing.T) {
|
|||
for _, c := range []Compression{
|
||||
Uncompressed,
|
||||
Gzip,
|
||||
Bzip2,
|
||||
Xz,
|
||||
} {
|
||||
if err := tarUntar(t, origin, c); err != nil {
|
||||
t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz
|
||||
// use PAX Global Extended Headers.
|
||||
// Failing prevents the archives from being uncompressed during ADD
|
||||
func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
|
||||
hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
|
||||
err := createTarFile("pax_global_header", "some_dir", &hdr, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -310,24 +313,51 @@ func ChangesSize(newDir string, changes []Change) int64 {
|
|||
return size
|
||||
}
|
||||
|
||||
func ExportChanges(dir string, changes []Change) (Archive, error) {
|
||||
files := make([]string, 0)
|
||||
deletions := make([]string, 0)
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
files = append(files, change.Path)
|
||||
}
|
||||
if change.Kind == ChangeDelete {
|
||||
base := filepath.Base(change.Path)
|
||||
dir := filepath.Dir(change.Path)
|
||||
deletions = append(deletions, filepath.Join(dir, ".wh."+base))
|
||||
}
|
||||
}
|
||||
// FIXME: Why do we create whiteout files inside Tar code ?
|
||||
return TarFilter(dir, &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
Includes: files,
|
||||
Recursive: false,
|
||||
CreateFiles: deletions,
|
||||
})
|
||||
func major(device uint64) uint64 {
|
||||
return (device >> 8) & 0xfff
|
||||
}
|
||||
|
||||
func minor(device uint64) uint64 {
|
||||
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||
}
|
||||
|
||||
func ExportChanges(dir string, changes []Change) (Archive, error) {
|
||||
reader, writer := io.Pipe()
|
||||
tw := tar.NewWriter(writer)
|
||||
|
||||
go func() {
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeDelete {
|
||||
whiteOutDir := filepath.Dir(change.Path)
|
||||
whiteOutBase := filepath.Base(change.Path)
|
||||
whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
|
||||
hdr := &tar.Header{
|
||||
Name: whiteOut[1:],
|
||||
Size: 0,
|
||||
ModTime: time.Now(),
|
||||
AccessTime: time.Now(),
|
||||
ChangeTime: time.Now(),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
utils.Debugf("Can't write whiteout header: %s\n", err)
|
||||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := addTarFile(path, change.Path[1:], tw); err != nil {
|
||||
utils.Debugf("Can't add file %s to tar: %s\n", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
utils.Debugf("Can't close layer: %s\n", err)
|
||||
}
|
||||
writer.Close()
|
||||
}()
|
||||
return reader, nil
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package archive
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -89,95 +88,22 @@ func ApplyLayer(dest string, layer Archive) error {
|
|||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
hasDir := false
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if fi.IsDir() && hdr.Typeflag == tar.TypeDir {
|
||||
hasDir = true
|
||||
} else {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if !hasDir {
|
||||
err = os.Mkdir(path, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
dirs = append(dirs, hdr)
|
||||
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
// Source is regular file
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(file, tr); err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
|
||||
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= syscall.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= syscall.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= syscall.S_IFIFO
|
||||
}
|
||||
|
||||
if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeLink:
|
||||
if err := os.Link(filepath.Join(dest, hdr.Linkname), path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err := os.Symlink(hdr.Linkname, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
utils.Debugf("unhandled type %d\n", hdr.Typeflag)
|
||||
}
|
||||
|
||||
if err = syscall.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
||||
if err := createTarFile(path, dest, hdr, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
// must happen after chown, as that can modify the file mode
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
err = syscall.Chmod(path, uint32(hdr.Mode&07777))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Directories must be handled at the end to avoid further
|
||||
// file creation in them to modify the mtime
|
||||
if hdr.Typeflag != tar.TypeDir {
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := LUtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// +build !linux !amd64
|
||||
|
||||
package archive
|
||||
|
||||
import "syscall"
|
||||
|
|
101
buildfile.go
101
buildfile.go
|
@ -8,6 +8,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/registry"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -47,6 +48,7 @@ type buildFile struct {
|
|||
rm bool
|
||||
|
||||
authConfig *auth.AuthConfig
|
||||
configFile *auth.ConfigFile
|
||||
|
||||
tmpContainers map[string]struct{}
|
||||
tmpImages map[string]struct{}
|
||||
|
@ -72,7 +74,22 @@ func (b *buildFile) CmdFrom(name string) error {
|
|||
if err != nil {
|
||||
if b.runtime.graph.IsNotExist(err) {
|
||||
remote, tag := utils.ParseRepositoryTag(name)
|
||||
if err := b.srv.ImagePull(remote, tag, b.outOld, b.sf, b.authConfig, nil, true); err != nil {
|
||||
pullRegistryAuth := b.authConfig
|
||||
if len(b.configFile.Configs) > 0 {
|
||||
// The request came with a full auth config file, we prefer to use that
|
||||
endpoint, _, err := registry.ResolveRepositoryName(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
|
||||
pullRegistryAuth = &resolvedAuth
|
||||
}
|
||||
job := b.srv.Eng.Job("pull", remote, tag)
|
||||
job.SetenvBool("json", b.sf.Json())
|
||||
job.SetenvBool("parallel", true)
|
||||
job.SetenvJson("authConfig", pullRegistryAuth)
|
||||
job.Stdout.Add(b.outOld)
|
||||
if err := job.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
image, err = b.runtime.repositories.LookupImage(name)
|
||||
|
@ -91,9 +108,26 @@ func (b *buildFile) CmdFrom(name string) error {
|
|||
if b.config.Env == nil || len(b.config.Env) == 0 {
|
||||
b.config.Env = append(b.config.Env, "HOME=/", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin")
|
||||
}
|
||||
// Process ONBUILD triggers if they exist
|
||||
if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
|
||||
fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers)
|
||||
}
|
||||
for n, step := range b.config.OnBuild {
|
||||
if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.config.OnBuild = []string{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The ONBUILD command declares a build instruction to be executed in any future build
|
||||
// using the current image as a base.
|
||||
func (b *buildFile) CmdOnbuild(trigger string) error {
|
||||
b.config.OnBuild = append(b.config.OnBuild, trigger)
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdMaintainer(name string) error {
|
||||
b.maintainer = name
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
|
||||
|
@ -124,7 +158,7 @@ func (b *buildFile) CmdRun(args string) error {
|
|||
if b.image == "" {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
config, _, _, err := ParseRun([]string{b.image, "/bin/sh", "-c", args}, nil)
|
||||
config, _, _, err := ParseRun(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -311,7 +345,7 @@ func (b *buildFile) checkPathForAddition(orig string) error {
|
|||
func (b *buildFile) addContext(container *Container, orig, dest string) error {
|
||||
var (
|
||||
origPath = path.Join(b.contextPath, orig)
|
||||
destPath = path.Join(container.RootfsPath(), dest)
|
||||
destPath = path.Join(container.BasefsPath(), dest)
|
||||
)
|
||||
// Preserve the trailing '/'
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
|
@ -476,7 +510,7 @@ func (b *buildFile) CmdAdd(args string) error {
|
|||
}
|
||||
b.tmpContainers[container.ID] = struct{}{}
|
||||
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
if err := container.Mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer container.Unmount()
|
||||
|
@ -598,7 +632,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
|||
b.tmpContainers[container.ID] = struct{}{}
|
||||
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
|
||||
id = container.ID
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
if err := container.Mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer container.Unmount()
|
||||
|
@ -630,7 +664,13 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b.context = &utils.TarSum{Reader: context, DisableCompression: true}
|
||||
|
||||
decompressedStream, err := archive.DecompressStream(context)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true}
|
||||
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -657,28 +697,11 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
|||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
tmp := strings.SplitN(line, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return "", fmt.Errorf("Invalid Dockerfile format")
|
||||
if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
|
||||
return "", err
|
||||
}
|
||||
instruction := strings.ToLower(strings.Trim(tmp[0], " "))
|
||||
arguments := strings.Trim(tmp[1], " ")
|
||||
|
||||
method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
||||
if !exists {
|
||||
fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
continue
|
||||
}
|
||||
|
||||
stepN += 1
|
||||
fmt.Fprintf(b.outStream, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
|
||||
|
||||
ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
||||
if ret != nil {
|
||||
return "", ret.(error)
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
|
||||
}
|
||||
if b.image != "" {
|
||||
fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
|
||||
|
@ -690,7 +713,32 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
|||
return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig) BuildFile {
|
||||
// BuildStep parses a single build step from `instruction` and executes it in the current context.
|
||||
func (b *buildFile) BuildStep(name, expression string) error {
|
||||
fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression)
|
||||
tmp := strings.SplitN(expression, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return fmt.Errorf("Invalid Dockerfile format")
|
||||
}
|
||||
instruction := strings.ToLower(strings.Trim(tmp[0], " "))
|
||||
arguments := strings.Trim(tmp[1], " ")
|
||||
|
||||
method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
||||
if !exists {
|
||||
fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
||||
if ret != nil {
|
||||
return ret.(error)
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig, authConfigFile *auth.ConfigFile) BuildFile {
|
||||
return &buildFile{
|
||||
runtime: srv.runtime,
|
||||
srv: srv,
|
||||
|
@ -704,6 +752,7 @@ func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeC
|
|||
rm: rm,
|
||||
sf: sf,
|
||||
authConfig: auth,
|
||||
configFile: authConfigFile,
|
||||
outOld: outOld,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,101 +0,0 @@
|
|||
package cgroups
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/mount"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
|
||||
|
||||
func FindCgroupMountpoint(subsystem string) (string, error) {
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if mount.Fstype == "cgroup" {
|
||||
for _, opt := range strings.Split(mount.VfsOpts, ",") {
|
||||
if opt == subsystem {
|
||||
return mount.Mountpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem)
|
||||
}
|
||||
|
||||
// Returns the relative path to the cgroup docker is running in.
|
||||
func getThisCgroupDir(subsystem string) (string, error) {
|
||||
f, err := os.Open("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseCgroupFile(subsystem, f)
|
||||
}
|
||||
|
||||
func parseCgroupFile(subsystem string, r io.Reader) (string, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
text := s.Text()
|
||||
parts := strings.Split(text, ":")
|
||||
if parts[1] == subsystem {
|
||||
return parts[2], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem)
|
||||
}
|
||||
|
||||
// Returns a list of pids for the given container.
|
||||
func GetPidsForContainer(id string) ([]int, error) {
|
||||
pids := []int{}
|
||||
|
||||
// memory is chosen randomly, any cgroup used by docker works
|
||||
subsystem := "memory"
|
||||
|
||||
cgroupRoot, err := FindCgroupMountpoint(subsystem)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
cgroupDir, err := getThisCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
// With more recent lxc versions use, cgroup will be in lxc/
|
||||
filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks")
|
||||
}
|
||||
|
||||
output, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
for _, p := range strings.Split(string(output), "\n") {
|
||||
if len(p) == 0 {
|
||||
continue
|
||||
}
|
||||
pid, err := strconv.Atoi(p)
|
||||
if err != nil {
|
||||
return pids, fmt.Errorf("Invalid pid '%s': %s", p, err)
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
630
commands.go
630
commands.go
File diff suppressed because it is too large
Load diff
59
config.go
59
config.go
|
@ -1,8 +1,15 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"net"
|
||||
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/networkdriver"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNetworkMtu = 1500
|
||||
DisableNetworkBridge = "none"
|
||||
)
|
||||
|
||||
// FIXME: separate runtime configuration from http api configuration
|
||||
|
@ -10,42 +17,48 @@ type DaemonConfig struct {
|
|||
Pidfile string
|
||||
Root string
|
||||
AutoRestart bool
|
||||
EnableCors bool
|
||||
Dns []string
|
||||
EnableIptables bool
|
||||
BridgeIface string
|
||||
BridgeIp string
|
||||
EnableIpForward bool
|
||||
DefaultIp net.IP
|
||||
BridgeIface string
|
||||
BridgeIP string
|
||||
InterContainerCommunication bool
|
||||
GraphDriver string
|
||||
Mtu int
|
||||
DisableNetwork bool
|
||||
}
|
||||
|
||||
// ConfigFromJob creates and returns a new DaemonConfig object
|
||||
// by parsing the contents of a job's environment.
|
||||
func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
||||
var config DaemonConfig
|
||||
config.Pidfile = job.Getenv("Pidfile")
|
||||
config.Root = job.Getenv("Root")
|
||||
config.AutoRestart = job.GetenvBool("AutoRestart")
|
||||
config.EnableCors = job.GetenvBool("EnableCors")
|
||||
func DaemonConfigFromJob(job *engine.Job) *DaemonConfig {
|
||||
config := &DaemonConfig{
|
||||
Pidfile: job.Getenv("Pidfile"),
|
||||
Root: job.Getenv("Root"),
|
||||
AutoRestart: job.GetenvBool("AutoRestart"),
|
||||
EnableIptables: job.GetenvBool("EnableIptables"),
|
||||
EnableIpForward: job.GetenvBool("EnableIpForward"),
|
||||
BridgeIP: job.Getenv("BridgeIP"),
|
||||
DefaultIp: net.ParseIP(job.Getenv("DefaultIp")),
|
||||
InterContainerCommunication: job.GetenvBool("InterContainerCommunication"),
|
||||
GraphDriver: job.Getenv("GraphDriver"),
|
||||
}
|
||||
if dns := job.GetenvList("Dns"); dns != nil {
|
||||
config.Dns = dns
|
||||
}
|
||||
config.EnableIptables = job.GetenvBool("EnableIptables")
|
||||
if br := job.Getenv("BridgeIface"); br != "" {
|
||||
config.BridgeIface = br
|
||||
} else {
|
||||
config.BridgeIface = DefaultNetworkBridge
|
||||
}
|
||||
config.BridgeIp = job.Getenv("BridgeIp")
|
||||
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
|
||||
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
|
||||
config.GraphDriver = job.Getenv("GraphDriver")
|
||||
if mtu := job.GetenvInt("Mtu"); mtu != -1 {
|
||||
if mtu := job.GetenvInt("Mtu"); mtu != 0 {
|
||||
config.Mtu = mtu
|
||||
} else {
|
||||
config.Mtu = DefaultNetworkMtu
|
||||
config.Mtu = GetDefaultNetworkMtu()
|
||||
}
|
||||
return &config
|
||||
config.DisableNetwork = job.Getenv("BridgeIface") == DisableNetworkBridge
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func GetDefaultNetworkMtu() int {
|
||||
if iface, err := networkdriver.GetDefaultRouteIface(); err == nil {
|
||||
return iface.MTU
|
||||
}
|
||||
return defaultNetworkMtu
|
||||
}
|
||||
|
|
698
container.go
698
container.go
File diff suppressed because it is too large
Load diff
|
@ -21,64 +21,88 @@
|
|||
# If the docker daemon is using a unix socket for communication your user
|
||||
# must have access to the socket for the completions to function correctly
|
||||
|
||||
__docker_q() {
|
||||
docker 2>/dev/null "$@"
|
||||
}
|
||||
|
||||
__docker_containers_all()
|
||||
{
|
||||
local containers
|
||||
containers="$( docker ps -a -q )"
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
local containers="$( __docker_q ps -a -q )"
|
||||
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_containers_running()
|
||||
{
|
||||
local containers
|
||||
containers="$( docker ps -q )"
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
local containers="$( __docker_q ps -q )"
|
||||
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_containers_stopped()
|
||||
{
|
||||
local containers
|
||||
containers="$( comm -13 <(docker ps -q | sort -u) <(docker ps -a -q | sort -u) )"
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
local containers="$( { __docker_q ps -a -q; __docker_q ps -q; } | sort | uniq -u )"
|
||||
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_image_repos()
|
||||
{
|
||||
local repos
|
||||
repos="$( docker images | awk 'NR>1{print $1}' )"
|
||||
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
|
||||
COMPREPLY=( $( compgen -W "$repos" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_images()
|
||||
{
|
||||
local images
|
||||
images="$( docker images | awk 'NR>1{print $1":"$2}' )"
|
||||
COMPREPLY=( $( compgen -W "$images" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
__docker_image_repos_and_tags()
|
||||
{
|
||||
local repos images
|
||||
repos="$( docker images | awk 'NR>1{print $1}' )"
|
||||
images="$( docker images | awk 'NR>1{print $1":"$2}' )"
|
||||
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
|
||||
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
|
||||
COMPREPLY=( $( compgen -W "$repos $images" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
__docker_image_repos_and_tags_and_ids()
|
||||
{
|
||||
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
|
||||
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
|
||||
local ids="$( __docker_q images -a -q )"
|
||||
COMPREPLY=( $( compgen -W "$repos $images $ids" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
__docker_containers_and_images()
|
||||
{
|
||||
local containers images
|
||||
containers="$( docker ps -a -q )"
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
images="$( docker images | awk 'NR>1{print $1":"$2}' )"
|
||||
COMPREPLY=( $( compgen -W "$images $names $containers" -- "$cur" ) )
|
||||
local containers="$( __docker_q ps -a -q )"
|
||||
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
|
||||
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
|
||||
local ids="$( __docker_q images -a -q )"
|
||||
COMPREPLY=( $( compgen -W "$containers $names $repos $images $ids" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
__docker_pos_first_nonflag()
|
||||
{
|
||||
local argument_flags=$1
|
||||
|
||||
local counter=$cpos
|
||||
while [ $counter -le $cword ]; do
|
||||
if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then
|
||||
(( counter++ ))
|
||||
else
|
||||
case "${words[$counter]}" in
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
echo $counter
|
||||
}
|
||||
|
||||
_docker_docker()
|
||||
{
|
||||
case "$prev" in
|
||||
|
@ -101,15 +125,24 @@ _docker_docker()
|
|||
|
||||
_docker_attach()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
__docker_containers_running
|
||||
fi
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--no-stdin --sig-proxy" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter="$(__docker_pos_first_nonflag)"
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_running
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_build()
|
||||
{
|
||||
case "$prev" in
|
||||
-t)
|
||||
-t|--tag)
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -118,10 +151,13 @@ _docker_build()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-no-cache -t -q -rm" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
_filedir
|
||||
local counter="$(__docker_pos_first_nonflag '-t|--tag')"
|
||||
if [ $cword -eq $counter ]; then
|
||||
_filedir
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
@ -129,7 +165,7 @@ _docker_build()
|
|||
_docker_commit()
|
||||
{
|
||||
case "$prev" in
|
||||
-author|-m|-run)
|
||||
-m|--message|-a|--author|--run)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -138,26 +174,20 @@ _docker_commit()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-m --message -a --author --run" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$cpos
|
||||
while [ $counter -le $cword ]; do
|
||||
case "${words[$counter]}" in
|
||||
-author|-m|-run)
|
||||
(( counter++ ))
|
||||
;;
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
local counter=$(__docker_pos_first_nonflag '-m|--message|-a|--author|--run')
|
||||
|
||||
if [ $counter -eq $cword ]; then
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
return
|
||||
fi
|
||||
(( counter++ ))
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
@ -165,16 +195,32 @@ _docker_commit()
|
|||
|
||||
_docker_cp()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
__docker_containers_all
|
||||
else
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
case "$cur" in
|
||||
*:)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
__docker_containers_all
|
||||
COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
|
||||
compopt -o nospace
|
||||
return
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
(( counter++ ))
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
_filedir
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_diff()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
}
|
||||
|
@ -182,7 +228,7 @@ _docker_diff()
|
|||
_docker_events()
|
||||
{
|
||||
case "$prev" in
|
||||
-since)
|
||||
--since)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -191,7 +237,7 @@ _docker_events()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-since" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--since" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
|
@ -200,45 +246,44 @@ _docker_events()
|
|||
|
||||
_docker_export()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_help()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
COMPREPLY=( $( compgen -W "$commands" -- "$cur" ) )
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_history()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
__docker_image_repos_and_tags
|
||||
fi
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-q --quiet --no-trunc" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_images()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-a -notrunc -q -viz" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-q --quiet -a --all --no-trunc -v --viz -t --tree" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$cpos
|
||||
while [ $counter -le $cword ]; do
|
||||
case "${words[$counter]}" in
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
if [ $counter -eq $cword ]; then
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos
|
||||
fi
|
||||
;;
|
||||
|
@ -247,7 +292,16 @@ _docker_images()
|
|||
|
||||
_docker_import()
|
||||
{
|
||||
return
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
return
|
||||
fi
|
||||
(( counter++ ))
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_info()
|
||||
|
@ -257,25 +311,16 @@ _docker_info()
|
|||
|
||||
_docker_insert()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
__docker_image_repos_and_tags
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_inspect()
|
||||
{
|
||||
__docker_containers_and_images
|
||||
}
|
||||
|
||||
_docker_kill()
|
||||
{
|
||||
__docker_containers_running
|
||||
}
|
||||
|
||||
_docker_login()
|
||||
{
|
||||
case "$prev" in
|
||||
-e|-p|-u)
|
||||
-f|--format)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -284,7 +329,37 @@ _docker_login()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-e -p -u" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-f --format" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_and_images
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_kill()
|
||||
{
|
||||
__docker_containers_running
|
||||
}
|
||||
|
||||
_docker_load()
|
||||
{
|
||||
return
|
||||
}
|
||||
|
||||
_docker_login()
|
||||
{
|
||||
case "$prev" in
|
||||
-u|--username|-p|--password|-e|--email)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
|
@ -293,14 +368,23 @@ _docker_login()
|
|||
|
||||
_docker_logs()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-f --follow" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_port()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
}
|
||||
|
@ -308,7 +392,13 @@ _docker_port()
|
|||
_docker_ps()
|
||||
{
|
||||
case "$prev" in
|
||||
-beforeId|-n|-sinceId)
|
||||
--since-id|--before-id)
|
||||
COMPREPLY=( $( compgen -W "$( __docker_q ps -a -q )" -- "$cur" ) )
|
||||
# TODO replace this with __docker_containers_all
|
||||
# see https://github.com/dotcloud/docker/issues/3565
|
||||
return
|
||||
;;
|
||||
-n)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -317,7 +407,7 @@ _docker_ps()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-a -beforeId -l -n -notrunc -q -s -sinceId" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since-id --before-id -n" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
|
@ -327,7 +417,7 @@ _docker_ps()
|
|||
_docker_pull()
|
||||
{
|
||||
case "$prev" in
|
||||
-t)
|
||||
-t|--tag)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -336,22 +426,31 @@ _docker_pull()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-t --tag" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '-t|--tag')
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_push()
|
||||
{
|
||||
__docker_image_repos
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos
|
||||
# TODO replace this with __docker_image_repos_and_tags
|
||||
# see https://github.com/dotcloud/docker/issues/3411
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_restart()
|
||||
{
|
||||
case "$prev" in
|
||||
-t)
|
||||
-t|--time)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -360,7 +459,7 @@ _docker_restart()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_all
|
||||
|
@ -372,7 +471,7 @@ _docker_rm()
|
|||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-v" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "-v --volumes -l --link" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_stopped
|
||||
|
@ -382,19 +481,27 @@ _docker_rm()
|
|||
|
||||
_docker_rmi()
|
||||
{
|
||||
__docker_image_repos_and_tags
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
}
|
||||
|
||||
_docker_run()
|
||||
{
|
||||
case "$prev" in
|
||||
-cidfile)
|
||||
--cidfile)
|
||||
_filedir
|
||||
;;
|
||||
-volumes-from)
|
||||
--volumes-from)
|
||||
__docker_containers_all
|
||||
;;
|
||||
-a|-c|-dns|-e|-entrypoint|-h|-lxc-conf|-m|-p|-u|-v|-w)
|
||||
-v|--volume)
|
||||
# TODO something magical with colons and _filedir ?
|
||||
return
|
||||
;;
|
||||
-e|--env)
|
||||
COMPREPLY=( $( compgen -e -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -403,45 +510,30 @@ _docker_run()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-a -c -cidfile -d -dns -e -entrypoint -h -i -lxc-conf -m -n -p -privileged -t -u -v -volumes-from -w" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$cpos
|
||||
while [ $counter -le $cword ]; do
|
||||
case "${words[$counter]}" in
|
||||
-a|-c|-cidfile|-dns|-e|-entrypoint|-h|-lxc-conf|-m|-p|-u|-v|-volumes-from|-w)
|
||||
(( counter++ ))
|
||||
;;
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf')
|
||||
|
||||
if [ $counter -eq $cword ]; then
|
||||
__docker_image_repos_and_tags
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_save()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_search()
|
||||
{
|
||||
COMPREPLY=( $( compgen -W "-notrunc" "-stars" "-trusted" -- "$cur" ) )
|
||||
}
|
||||
|
||||
_docker_start()
|
||||
{
|
||||
__docker_containers_stopped
|
||||
}
|
||||
|
||||
_docker_stop()
|
||||
{
|
||||
case "$prev" in
|
||||
-t)
|
||||
-s|--stars)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
|
@ -450,7 +542,38 @@ _docker_stop()
|
|||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--no-trunc -t --trusted -s --stars" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_start()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-a --attach -i --interactive" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_stopped
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_stop()
|
||||
{
|
||||
case "$prev" in
|
||||
-t|--time)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_running
|
||||
|
@ -460,12 +583,31 @@ _docker_stop()
|
|||
|
||||
_docker_tag()
|
||||
{
|
||||
COMPREPLY=( $( compgen -W "-f" -- "$cur" ) )
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-f --force" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
fi
|
||||
(( counter++ ))
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_top()
|
||||
{
|
||||
if [ $cpos -eq $cword ]; then
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_running
|
||||
fi
|
||||
}
|
||||
|
@ -482,7 +624,6 @@ _docker_wait()
|
|||
|
||||
_docker()
|
||||
{
|
||||
local cur prev words cword command="docker" counter=1 word cpos
|
||||
local commands="
|
||||
attach
|
||||
build
|
||||
|
@ -498,6 +639,7 @@ _docker()
|
|||
insert
|
||||
inspect
|
||||
kill
|
||||
load
|
||||
login
|
||||
logs
|
||||
port
|
||||
|
@ -508,6 +650,7 @@ _docker()
|
|||
rm
|
||||
rmi
|
||||
run
|
||||
save
|
||||
search
|
||||
start
|
||||
stop
|
||||
|
@ -518,18 +661,20 @@ _docker()
|
|||
"
|
||||
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
local command='docker'
|
||||
local counter=1
|
||||
while [ $counter -lt $cword ]; do
|
||||
word="${words[$counter]}"
|
||||
case "$word" in
|
||||
case "${words[$counter]}" in
|
||||
-H)
|
||||
(( counter++ ))
|
||||
;;
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
command="$word"
|
||||
command="${words[$counter]}"
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
|
|
|
@ -174,7 +174,7 @@ __docker_subcommand () {
|
|||
(ps)
|
||||
_arguments '-a[Show all containers. Only running containers are shown by default]' \
|
||||
'-h[Show help]' \
|
||||
'-beforeId=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
|
||||
'-before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
|
||||
'-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)'
|
||||
;;
|
||||
(tag)
|
||||
|
@ -189,9 +189,9 @@ __docker_subcommand () {
|
|||
'-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \
|
||||
'-c=-[CPU shares (relative weight)]:CPU shares: ' \
|
||||
'-d[Detached mode: leave the container running in the background]' \
|
||||
'*-dns=[Set custom dns servers]:dns server: ' \
|
||||
'*--dns=[Set custom dns servers]:dns server: ' \
|
||||
'*-e=[Set environment variables]:environment variable: ' \
|
||||
'-entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
|
||||
'--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
|
||||
'-h=-[Container host name]:hostname:_hosts' \
|
||||
'-i[Keep stdin open even if not attached]' \
|
||||
'-m=-[Memory limit (in bytes)]:limit: ' \
|
||||
|
@ -199,7 +199,7 @@ __docker_subcommand () {
|
|||
'-t=-[Allocate a pseudo-tty]:toggle:(true false)' \
|
||||
'-u=-[Username or UID]:user:_users' \
|
||||
'*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\
|
||||
'-volumes-from=-[Mount volumes from the specified container]:volume: ' \
|
||||
'--volumes-from=-[Mount volumes from the specified container]:volume: ' \
|
||||
'(-):images:__docker_images' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal'
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.io
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStartPre=/bin/mount --make-rprivate /
|
||||
ExecStart=/usr/bin/docker -d
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
11
contrib/init/systemd/socket-activation/docker.service
Normal file
11
contrib/init/systemd/socket-activation/docker.service
Normal file
|
@ -0,0 +1,11 @@
|
|||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.io
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/docker -d -H fd://
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
8
contrib/init/systemd/socket-activation/docker.socket
Normal file
8
contrib/init/systemd/socket-activation/docker.socket
Normal file
|
@ -0,0 +1,8 @@
|
|||
[Unit]
|
||||
Description=Docker Socket for the API
|
||||
|
||||
[Socket]
|
||||
ListenStream=/var/run/docker.sock
|
||||
|
||||
[Install]
|
||||
WantedBy=sockets.target
|
92
contrib/mkimage-arch-pacman.conf
Normal file
92
contrib/mkimage-arch-pacman.conf
Normal file
|
@ -0,0 +1,92 @@
|
|||
#
|
||||
# /etc/pacman.conf
|
||||
#
|
||||
# See the pacman.conf(5) manpage for option and repository directives
|
||||
|
||||
#
|
||||
# GENERAL OPTIONS
|
||||
#
|
||||
[options]
|
||||
# The following paths are commented out with their default values listed.
|
||||
# If you wish to use different paths, uncomment and update the paths.
|
||||
#RootDir = /
|
||||
#DBPath = /var/lib/pacman/
|
||||
#CacheDir = /var/cache/pacman/pkg/
|
||||
#LogFile = /var/log/pacman.log
|
||||
#GPGDir = /etc/pacman.d/gnupg/
|
||||
HoldPkg = pacman glibc
|
||||
#XferCommand = /usr/bin/curl -C - -f %u > %o
|
||||
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
|
||||
#CleanMethod = KeepInstalled
|
||||
#UseDelta = 0.7
|
||||
Architecture = auto
|
||||
|
||||
# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup
|
||||
#IgnorePkg =
|
||||
#IgnoreGroup =
|
||||
|
||||
#NoUpgrade =
|
||||
#NoExtract =
|
||||
|
||||
# Misc options
|
||||
#UseSyslog
|
||||
#Color
|
||||
#TotalDownload
|
||||
# We cannot check disk space from within a chroot environment
|
||||
#CheckSpace
|
||||
#VerbosePkgLists
|
||||
|
||||
# By default, pacman accepts packages signed by keys that its local keyring
|
||||
# trusts (see pacman-key and its man page), as well as unsigned packages.
|
||||
SigLevel = Required DatabaseOptional
|
||||
LocalFileSigLevel = Optional
|
||||
#RemoteFileSigLevel = Required
|
||||
|
||||
# NOTE: You must run `pacman-key --init` before first using pacman; the local
|
||||
# keyring can then be populated with the keys of all official Arch Linux
|
||||
# packagers with `pacman-key --populate archlinux`.
|
||||
|
||||
#
|
||||
# REPOSITORIES
|
||||
# - can be defined here or included from another file
|
||||
# - pacman will search repositories in the order defined here
|
||||
# - local/custom mirrors can be added here or in separate files
|
||||
# - repositories listed first will take precedence when packages
|
||||
# have identical names, regardless of version number
|
||||
# - URLs will have $repo replaced by the name of the current repo
|
||||
# - URLs will have $arch replaced by the name of the architecture
|
||||
#
|
||||
# Repository entries are of the format:
|
||||
# [repo-name]
|
||||
# Server = ServerName
|
||||
# Include = IncludePath
|
||||
#
|
||||
# The header [repo-name] is crucial - it must be present and
|
||||
# uncommented to enable the repo.
|
||||
#
|
||||
|
||||
# The testing repositories are disabled by default. To enable, uncomment the
|
||||
# repo name header and Include lines. You can add preferred servers immediately
|
||||
# after the header, and they will be used before the default mirrors.
|
||||
|
||||
#[testing]
|
||||
#Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
[core]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
[extra]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
#[community-testing]
|
||||
#Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
[community]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
# An example of a custom package repository. See the pacman manpage for
|
||||
# tips on creating your own repositories.
|
||||
#[custom]
|
||||
#SigLevel = Optional TrustAll
|
||||
#Server = file:///home/custompkgs
|
||||
|
|
@ -1,30 +1,29 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Generate a minimal filesystem for archlinux and load it into the local
|
||||
# docker as "archlinux"
|
||||
# requires root
|
||||
set -e
|
||||
|
||||
PACSTRAP=$(which pacstrap)
|
||||
[ "$PACSTRAP" ] || {
|
||||
hash pacstrap &>/dev/null || {
|
||||
echo "Could not find pacstrap. Run pacman -S arch-install-scripts"
|
||||
exit 1
|
||||
}
|
||||
EXPECT=$(which expect)
|
||||
[ "$EXPECT" ] || {
|
||||
|
||||
hash expect &>/dev/null || {
|
||||
echo "Could not find expect. Run pacman -S expect"
|
||||
exit 1
|
||||
}
|
||||
|
||||
ROOTFS=~/rootfs-arch-$$-$RANDOM
|
||||
mkdir $ROOTFS
|
||||
ROOTFS=$(mktemp -d /tmp/rootfs-archlinux-XXXXXXXXXX)
|
||||
chmod 755 $ROOTFS
|
||||
|
||||
#packages to ignore for space savings
|
||||
# packages to ignore for space savings
|
||||
PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
|
||||
|
||||
|
||||
expect <<EOF
|
||||
set timeout 60
|
||||
set send_slow {1 1}
|
||||
spawn pacstrap -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
|
||||
spawn pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
|
||||
expect {
|
||||
"Install anyway?" { send n\r; exp_continue }
|
||||
"(default=all)" { send \r; exp_continue }
|
||||
|
@ -38,29 +37,26 @@ EOF
|
|||
|
||||
arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
|
||||
arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"
|
||||
cat > $ROOTFS/etc/locale.gen <<DELIM
|
||||
en_US.UTF-8 UTF-8
|
||||
en_US ISO-8859-1
|
||||
DELIM
|
||||
echo 'en_US.UTF-8 UTF-8' > $ROOTFS/etc/locale.gen
|
||||
arch-chroot $ROOTFS locale-gen
|
||||
arch-chroot $ROOTFS /bin/sh -c 'echo "Server = http://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist'
|
||||
|
||||
# udev doesn't work in containers, rebuild /dev
|
||||
DEV=${ROOTFS}/dev
|
||||
mv ${DEV} ${DEV}.old
|
||||
mkdir -p ${DEV}
|
||||
mknod -m 666 ${DEV}/null c 1 3
|
||||
mknod -m 666 ${DEV}/zero c 1 5
|
||||
mknod -m 666 ${DEV}/random c 1 8
|
||||
mknod -m 666 ${DEV}/urandom c 1 9
|
||||
mkdir -m 755 ${DEV}/pts
|
||||
mkdir -m 1777 ${DEV}/shm
|
||||
mknod -m 666 ${DEV}/tty c 5 0
|
||||
mknod -m 600 ${DEV}/console c 5 1
|
||||
mknod -m 666 ${DEV}/tty0 c 4 0
|
||||
mknod -m 666 ${DEV}/full c 1 7
|
||||
mknod -m 600 ${DEV}/initctl p
|
||||
mknod -m 666 ${DEV}/ptmx c 5 2
|
||||
DEV=$ROOTFS/dev
|
||||
rm -rf $DEV
|
||||
mkdir -p $DEV
|
||||
mknod -m 666 $DEV/null c 1 3
|
||||
mknod -m 666 $DEV/zero c 1 5
|
||||
mknod -m 666 $DEV/random c 1 8
|
||||
mknod -m 666 $DEV/urandom c 1 9
|
||||
mkdir -m 755 $DEV/pts
|
||||
mkdir -m 1777 $DEV/shm
|
||||
mknod -m 666 $DEV/tty c 5 0
|
||||
mknod -m 600 $DEV/console c 5 1
|
||||
mknod -m 666 $DEV/tty0 c 4 0
|
||||
mknod -m 666 $DEV/full c 1 7
|
||||
mknod -m 600 $DEV/initctl p
|
||||
mknod -m 666 $DEV/ptmx c 5 2
|
||||
|
||||
tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux
|
||||
docker run -i -t archlinux echo Success.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Generate a very minimal filesystem based on busybox-static,
|
||||
# and load it into the local docker under the name "busybox".
|
||||
|
||||
|
|
75
contrib/mkimage-crux.sh
Executable file
75
contrib/mkimage-crux.sh
Executable file
|
@ -0,0 +1,75 @@
|
|||
#!/usr/bin/env bash
|
||||
# Generate a minimal filesystem for CRUX/Linux and load it into the local
|
||||
# docker as "cruxlinux"
|
||||
# requires root and the crux iso (http://crux.nu)
|
||||
|
||||
set -e
|
||||
|
||||
die () {
|
||||
echo >&2 "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso"
|
||||
|
||||
ISO=${1}
|
||||
|
||||
ROOTFS=$(mktemp -d /tmp/rootfs-crux-XXXXXXXXXX)
|
||||
CRUX=$(mktemp -d /tmp/crux-XXXXXXXXXX)
|
||||
TMP=$(mktemp -d /tmp/XXXXXXXXXX)
|
||||
|
||||
VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/')
|
||||
|
||||
# Mount the ISO
|
||||
mount -o ro,loop $ISO $CRUX
|
||||
|
||||
# Extract pkgutils
|
||||
tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz
|
||||
|
||||
# Put pkgadd in the $PATH
|
||||
export PATH="$TMP/usr/bin:$PATH"
|
||||
|
||||
# Install core packages
|
||||
mkdir -p $ROOTFS/var/lib/pkg
|
||||
touch $ROOTFS/var/lib/pkg/db
|
||||
for pkg in $CRUX/crux/core/*; do
|
||||
pkgadd -r $ROOTFS $pkg
|
||||
done
|
||||
|
||||
# Remove agetty and inittab config
|
||||
if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then
|
||||
echo "Removing agetty from /etc/inittab ..."
|
||||
chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab
|
||||
chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab
|
||||
chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab
|
||||
fi
|
||||
|
||||
# Remove kernel source
|
||||
rm -rf $ROOTFS/usr/src/*
|
||||
|
||||
# udev doesn't work in containers, rebuild /dev
|
||||
DEV=$ROOTFS/dev
|
||||
rm -rf $DEV
|
||||
mkdir -p $DEV
|
||||
mknod -m 666 $DEV/null c 1 3
|
||||
mknod -m 666 $DEV/zero c 1 5
|
||||
mknod -m 666 $DEV/random c 1 8
|
||||
mknod -m 666 $DEV/urandom c 1 9
|
||||
mkdir -m 755 $DEV/pts
|
||||
mkdir -m 1777 $DEV/shm
|
||||
mknod -m 666 $DEV/tty c 5 0
|
||||
mknod -m 600 $DEV/console c 5 1
|
||||
mknod -m 666 $DEV/tty0 c 4 0
|
||||
mknod -m 666 $DEV/full c 1 7
|
||||
mknod -m 600 $DEV/initctl p
|
||||
mknod -m 666 $DEV/ptmx c 5 2
|
||||
|
||||
IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION)
|
||||
docker tag $IMAGE_ID crux:latest
|
||||
docker run -i -t crux echo Success.
|
||||
|
||||
# Cleanup
|
||||
umount $CRUX
|
||||
rm -rf $ROOTFS
|
||||
rm -rf $CRUX
|
||||
rm -rf $TMP
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
variant='minbase'
|
||||
|
@ -117,6 +117,11 @@ target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
|
|||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||
returnTo="$(pwd -P)"
|
||||
|
||||
if [ "$suite" = 'lucid' ]; then
|
||||
# lucid fails and doesn't include gpgv in minbase; "apt-get update" fails
|
||||
include+=',gpgv'
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
# bootstrap
|
||||
|
@ -138,18 +143,26 @@ if [ -z "$strictDebootstrap" ]; then
|
|||
# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
|
||||
sudo chroot . apt-get clean
|
||||
|
||||
# while we're at it, apt is unnecessarily slow inside containers
|
||||
# this forces dpkg not to call sync() after package extraction and speeds up install
|
||||
# the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
|
||||
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||
if strings usr/bin/dpkg | grep -q unsafe-io; then
|
||||
# while we're at it, apt is unnecessarily slow inside containers
|
||||
# this forces dpkg not to call sync() after package extraction and speeds up install
|
||||
# the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
|
||||
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
|
||||
# we have this wrapped up in an "if" because the "force-unsafe-io"
|
||||
# option was added in dpkg 1.15.8.6
|
||||
# (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82),
|
||||
# and ubuntu lucid/10.04 only has 1.15.5.6
|
||||
fi
|
||||
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||
{
|
||||
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
|
||||
echo "DPkg::Post-Invoke { ${aptGetClean} };"
|
||||
echo "APT::Update::Post-Invoke { ${aptGetClean} };"
|
||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
||||
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
# and remove the translations, too
|
||||
|
||||
# and remove the translations, too
|
||||
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
|
||||
|
||||
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
|
||||
|
@ -190,6 +203,9 @@ if [ -z "$strictDebootstrap" ]; then
|
|||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# make sure our packages lists are as up to date as we can get them
|
||||
sudo chroot . apt-get update
|
||||
fi
|
||||
|
||||
if [ "$justTar" ]; then
|
||||
|
|
|
@ -1,4 +1,11 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Create a base CentOS Docker image.
|
||||
|
||||
# This script is useful on systems with rinse available (e.g.,
|
||||
# building a CentOS image on Debian). See contrib/mkimage-yum.sh for
|
||||
# a way to build CentOS images on systems with yum installed.
|
||||
|
||||
set -e
|
||||
|
||||
repo="$1"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Generate a very minimal filesystem based on busybox-static,
|
||||
# and load it into the local docker under the name "docker-ut".
|
||||
|
||||
|
|
90
contrib/mkimage-yum.sh
Executable file
90
contrib/mkimage-yum.sh
Executable file
|
@ -0,0 +1,90 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Create a base CentOS Docker image.
|
||||
#
|
||||
# This script is useful on systems with yum installed (e.g., building
|
||||
# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way
|
||||
# to build CentOS images on other systems.
|
||||
|
||||
usage() {
|
||||
cat <<EOOPTS
|
||||
$(basename $0) [OPTIONS] <name>
|
||||
OPTIONS:
|
||||
-y <yumconf> The path to the yum config to install packages from. The
|
||||
default is /etc/yum.conf.
|
||||
EOOPTS
|
||||
exit 1
|
||||
}
|
||||
|
||||
# option defaults
|
||||
yum_config=/etc/yum.conf
|
||||
while getopts ":y:h" opt; do
|
||||
case $opt in
|
||||
y)
|
||||
yum_config=$OPTARG
|
||||
;;
|
||||
h)
|
||||
usage
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
name=$1
|
||||
|
||||
if [[ -z $name ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
#--------------------
|
||||
|
||||
target=$(mktemp -d --tmpdir $(basename $0).XXXXXX)
|
||||
|
||||
set -x
|
||||
|
||||
for dev in console null zero urandom; do
|
||||
/sbin/MAKEDEV -d "$target"/dev -x $dev
|
||||
done
|
||||
|
||||
yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \
|
||||
--setopt=group_package_types=mandatory -y groupinstall Core
|
||||
yum -c "$yum_config" --installroot="$mount" -y clean all
|
||||
|
||||
cat > "$target"/etc/sysconfig/network <<EOF
|
||||
NETWORKING=yes
|
||||
HOSTNAME=localhost.localdomain
|
||||
EOF
|
||||
|
||||
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb
|
||||
# --keep-services "$target". Stolen from mkimage-rinse.sh
|
||||
# locales
|
||||
rm -rf "$target"/usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
|
||||
# docs
|
||||
rm -rf "$target"/usr/share/{man,doc,info,gnome/help}
|
||||
# cracklib
|
||||
rm -rf "$target"/usr/share/cracklib
|
||||
# i18n
|
||||
rm -rf "$target"/usr/share/i18n
|
||||
# sln
|
||||
rm -rf "$target"/sbin/sln
|
||||
# ldconfig
|
||||
rm -rf "$target"/etc/ld.so.cache
|
||||
rm -rf "$target"/var/cache/ldconfig/*
|
||||
|
||||
version=
|
||||
if [ -r "$target"/etc/redhat-release ]; then
|
||||
version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/redhat-release)"
|
||||
fi
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
echo >&2 "warning: cannot autodetect OS version, using '$name' as tag"
|
||||
version=$name
|
||||
fi
|
||||
|
||||
tar --numeric-owner -c -C "$target" . | docker import - $name:$version
|
||||
docker run -i -t $name:$version echo success
|
||||
|
||||
rm -rf "$target"
|
|
@ -41,7 +41,7 @@ use warnings;
|
|||
|
||||
if( -t ) {
|
||||
print STDERR "Helper script to make seccomp filters for Docker/LXC.\n";
|
||||
print STDERR "Usage: mkseccomp.pl [files...]\n";
|
||||
print STDERR "Usage: mkseccomp.pl < [files...]\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -195,6 +195,7 @@ shutdown
|
|||
socket // (*)
|
||||
socketcall
|
||||
socketpair
|
||||
sethostname // (*)
|
||||
|
||||
// Signal related
|
||||
pause
|
||||
|
@ -261,7 +262,7 @@ vmsplice
|
|||
|
||||
// Process control
|
||||
capget
|
||||
//capset
|
||||
capset // (*)
|
||||
clone // (*)
|
||||
execve // (*)
|
||||
exit // (*)
|
||||
|
@ -401,7 +402,6 @@ tkill
|
|||
//quotactl
|
||||
//reboot
|
||||
//setdomainname
|
||||
//sethostname
|
||||
//setns
|
||||
//settimeofday
|
||||
//sgetmask // Obsolete
|
||||
|
|
7
contrib/prepare-commit-msg.hook
Normal file
7
contrib/prepare-commit-msg.hook
Normal file
|
@ -0,0 +1,7 @@
|
|||
#!/bin/sh
|
||||
# Auto sign all commits to allow them to be used by the Docker project.
|
||||
# see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work
|
||||
#
|
||||
GH_USER=$(git config --get github.user)
|
||||
SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p")
|
||||
grep -qs "^$SOB" "$1" || echo "\n$SOB" >> "$1"
|
|
@ -1,15 +1,17 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/sysinit"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/api"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
flag "github.com/dotcloud/docker/pkg/mflag"
|
||||
"github.com/dotcloud/docker/sysinit"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -25,25 +27,26 @@ func main() {
|
|||
}
|
||||
|
||||
var (
|
||||
flVersion = flag.Bool("v", false, "Print version information and quit")
|
||||
flDaemon = flag.Bool("d", false, "Enable daemon mode")
|
||||
flDebug = flag.Bool("D", false, "Enable debug mode")
|
||||
flAutoRestart = flag.Bool("r", true, "Restart previously running containers")
|
||||
bridgeName = flag.String("b", "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
|
||||
bridgeIp = flag.String("bip", "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
|
||||
pidfile = flag.String("p", "/var/run/docker.pid", "Path to use for daemon PID file")
|
||||
flRoot = flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime")
|
||||
flEnableCors = flag.Bool("api-enable-cors", false, "Enable CORS headers in the remote API")
|
||||
flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit")
|
||||
flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
|
||||
flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
|
||||
flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers")
|
||||
bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
|
||||
bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
|
||||
pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
|
||||
flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime")
|
||||
flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
|
||||
flDns = docker.NewListOpts(docker.ValidateIp4Address)
|
||||
flEnableIptables = flag.Bool("iptables", true, "Disable docker's addition of iptables rules")
|
||||
flDefaultIp = flag.String("ip", "0.0.0.0", "Default IP address to use when binding container ports")
|
||||
flInterContainerComm = flag.Bool("icc", true, "Enable inter-container communication")
|
||||
flGraphDriver = flag.String("s", "", "Force the docker runtime to use a specific storage driver")
|
||||
flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Disable docker's addition of iptables rules")
|
||||
flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Disable enabling of net.ipv4.ip_forward")
|
||||
flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
|
||||
flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication")
|
||||
flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver")
|
||||
flHosts = docker.NewListOpts(docker.ValidateHost)
|
||||
flMtu = flag.Int("mtu", docker.DefaultNetworkMtu, "Set the containers network mtu")
|
||||
flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available")
|
||||
)
|
||||
flag.Var(&flDns, "dns", "Force docker to use specific DNS servers")
|
||||
flag.Var(&flHosts, "H", "Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise")
|
||||
flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers")
|
||||
flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
|
@ -56,13 +59,13 @@ func main() {
|
|||
|
||||
if defaultHost == "" || *flDaemon {
|
||||
// If we do not have a host, default to unix socket
|
||||
defaultHost = fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)
|
||||
defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET)
|
||||
}
|
||||
flHosts.Set(defaultHost)
|
||||
}
|
||||
|
||||
if *bridgeName != "" && *bridgeIp != "" {
|
||||
log.Fatal("You specified -b & -bip, mutually exclusive options. Please specify only one.")
|
||||
log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.")
|
||||
}
|
||||
|
||||
if *flDebug {
|
||||
|
@ -81,15 +84,15 @@ func main() {
|
|||
log.Fatal(err)
|
||||
}
|
||||
// Load plugin: httpapi
|
||||
job := eng.Job("initapi")
|
||||
job := eng.Job("initserver")
|
||||
job.Setenv("Pidfile", *pidfile)
|
||||
job.Setenv("Root", *flRoot)
|
||||
job.SetenvBool("AutoRestart", *flAutoRestart)
|
||||
job.SetenvBool("EnableCors", *flEnableCors)
|
||||
job.SetenvList("Dns", flDns.GetAll())
|
||||
job.SetenvBool("EnableIptables", *flEnableIptables)
|
||||
job.SetenvBool("EnableIpForward", *flEnableIpForward)
|
||||
job.Setenv("BridgeIface", *bridgeName)
|
||||
job.Setenv("BridgeIp", *bridgeIp)
|
||||
job.Setenv("BridgeIP", *bridgeIp)
|
||||
job.Setenv("DefaultIp", *flDefaultIp)
|
||||
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
|
||||
job.Setenv("GraphDriver", *flGraphDriver)
|
||||
|
@ -100,6 +103,8 @@ func main() {
|
|||
// Serve api
|
||||
job = eng.Job("serveapi", flHosts.GetAll()...)
|
||||
job.SetenvBool("Logging", true)
|
||||
job.SetenvBool("EnableCors", *flEnableCors)
|
||||
job.Setenv("Version", VERSION)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -1,20 +1,19 @@
|
|||
from ubuntu:12.04
|
||||
maintainer Nick Stinemates
|
||||
FROM ubuntu:12.04
|
||||
MAINTAINER Nick Stinemates
|
||||
#
|
||||
# docker build -t docker:docs . && docker run -p 8000:8000 docker:docs
|
||||
#
|
||||
|
||||
run apt-get update
|
||||
run apt-get install -y python-setuptools make
|
||||
run easy_install pip
|
||||
#from docs/requirements.txt, but here to increase cacheability
|
||||
run pip install Sphinx==1.1.3
|
||||
run pip install sphinxcontrib-httpdomain==1.1.9
|
||||
add . /docs
|
||||
run cd /docs; make docs
|
||||
# TODO switch to http://packages.ubuntu.com/trusty/python-sphinxcontrib-httpdomain once trusty is released
|
||||
|
||||
expose 8000
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq make python-pip python-setuptools
|
||||
# pip installs from docs/requirements.txt, but here to increase cacheability
|
||||
RUN pip install Sphinx==1.2.1
|
||||
RUN pip install sphinxcontrib-httpdomain==1.2.0
|
||||
ADD . /docs
|
||||
RUN make -C /docs clean docs
|
||||
|
||||
workdir /docs/_build/html
|
||||
|
||||
entrypoint ["python", "-m", "SimpleHTTPServer"]
|
||||
WORKDIR /docs/_build/html
|
||||
CMD ["python", "-m", "SimpleHTTPServer"]
|
||||
# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
|
||||
EXPOSE 8000
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
|
||||
Ken Cochrane <ken@dotcloud.com> (@kencochrane)
|
||||
James Turnbull <james@lovedthanlost.net> (@jamtur01)
|
||||
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
Sphinx==1.1.3
|
||||
sphinxcontrib-httpdomain==1.1.9
|
||||
Sphinx==1.2.1
|
||||
sphinxcontrib-httpdomain==1.2.0
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Solomon Hykes <solomon@dotcloud.com> (@shykes)
|
File diff suppressed because it is too large
Load diff
|
@ -34,10 +34,13 @@ It can be as simple as this to create an Ubuntu base image::
|
|||
DISTRIB_DESCRIPTION="Ubuntu 13.04"
|
||||
|
||||
There are more example scripts for creating base images in the
|
||||
Docker Github Repo:
|
||||
Docker GitHub Repo:
|
||||
|
||||
* `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
|
||||
* `CentOS / Scientific Linux CERN (SLC)
|
||||
* CentOS / Scientific Linux CERN (SLC) `on Debian/Ubuntu
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh>`_
|
||||
or
|
||||
`on CentOS/RHEL/SLC/etc.
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh>`_
|
||||
* `Debian / Ubuntu
|
||||
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh>`_
|
15
docs/sources/articles/index.rst
Normal file
15
docs/sources/articles/index.rst
Normal file
|
@ -0,0 +1,15 @@
|
|||
:title: Docker articles
|
||||
:description: various articles related to Docker
|
||||
:keywords: docker, articles
|
||||
|
||||
.. _articles_list:
|
||||
|
||||
Articles
|
||||
========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
security
|
||||
baseimages
|
||||
runmetrics
|
463
docs/sources/articles/runmetrics.rst
Normal file
463
docs/sources/articles/runmetrics.rst
Normal file
|
@ -0,0 +1,463 @@
|
|||
:title: Runtime Metrics
|
||||
:description: Measure the behavior of running containers
|
||||
:keywords: docker, metrics, CPU, memory, disk, IO, run, runtime
|
||||
|
||||
.. _run_metrics:
|
||||
|
||||
|
||||
Runtime Metrics
|
||||
===============
|
||||
|
||||
Linux Containers rely on `control groups
|
||||
<https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt>`_ which
|
||||
not only track groups of processes, but also expose metrics about CPU,
|
||||
memory, and block I/O usage. You can access those metrics and obtain
|
||||
network usage metrics as well. This is relevant for "pure" LXC
|
||||
containers, as well as for Docker containers.
|
||||
|
||||
Control Groups
|
||||
--------------
|
||||
|
||||
Control groups are exposed through a pseudo-filesystem. In recent
|
||||
distros, you should find this filesystem under
|
||||
``/sys/fs/cgroup``. Under that directory, you will see multiple
|
||||
sub-directories, called devices, freezer, blkio, etc.; each
|
||||
sub-directory actually corresponds to a different cgroup hierarchy.
|
||||
|
||||
On older systems, the control groups might be mounted on ``/cgroup``,
|
||||
without distinct hierarchies. In that case, instead of seeing the
|
||||
sub-directories, you will see a bunch of files in that directory, and
|
||||
possibly some directories corresponding to existing containers.
|
||||
|
||||
To figure out where your control groups are mounted, you can run:
|
||||
|
||||
::
|
||||
|
||||
grep cgroup /proc/mounts
|
||||
|
||||
.. _run_findpid:
|
||||
|
||||
Enumerating Cgroups
|
||||
-------------------
|
||||
|
||||
You can look into ``/proc/cgroups`` to see the different control group
|
||||
subsystems known to the system, the hierarchy they belong to, and how
|
||||
many groups they contain.
|
||||
|
||||
You can also look at ``/proc/<pid>/cgroup`` to see which control
|
||||
groups a process belongs to. The control group will be shown as a path
|
||||
relative to the root of the hierarchy mountpoint; e.g. ``/`` means
|
||||
“this process has not been assigned into a particular group”, while
|
||||
``/lxc/pumpkin`` means that the process is likely to be a member of a
|
||||
container named ``pumpkin``.
|
||||
|
||||
Finding the Cgroup for a Given Container
|
||||
----------------------------------------
|
||||
|
||||
For each container, one cgroup will be created in each hierarchy. On
|
||||
older systems with older versions of the LXC userland tools, the name
|
||||
of the cgroup will be the name of the container. With more recent
|
||||
versions of the LXC tools, the cgroup will be ``lxc/<container_name>.``
|
||||
|
||||
For Docker containers using cgroups, the container name will be the
|
||||
full ID or long ID of the container. If a container shows up as
|
||||
ae836c95b4c3 in ``docker ps``, its long ID might be something like
|
||||
``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You
|
||||
can look it up with ``docker inspect`` or ``docker ps -notrunc``.
|
||||
|
||||
Putting everything together to look at the memory metrics for a Docker
|
||||
container, take a look at ``/sys/fs/cgroup/memory/lxc/<longid>/``.
|
||||
|
||||
Metrics from Cgroups: Memory, CPU, Block IO
|
||||
-------------------------------------------
|
||||
|
||||
For each subsystem (memory, CPU, and block I/O), you will find one or
|
||||
more pseudo-files containing statistics.
|
||||
|
||||
Memory Metrics: ``memory.stat``
|
||||
...............................
|
||||
|
||||
Memory metrics are found in the "memory" cgroup. Note that the memory
|
||||
control group adds a little overhead, because it does very
|
||||
fine-grained accounting of the memory usage on your host. Therefore,
|
||||
many distros chose to not enable it by default. Generally, to enable
|
||||
it, all you have to do is to add some kernel command-line parameters:
|
||||
``cgroup_enable=memory swapaccount=1``.
|
||||
|
||||
The metrics are in the pseudo-file ``memory.stat``. Here is what it
|
||||
will look like:
|
||||
|
||||
::
|
||||
|
||||
cache 11492564992
|
||||
rss 1930993664
|
||||
mapped_file 306728960
|
||||
pgpgin 406632648
|
||||
pgpgout 403355412
|
||||
swap 0
|
||||
pgfault 728281223
|
||||
pgmajfault 1724
|
||||
inactive_anon 46608384
|
||||
active_anon 1884520448
|
||||
inactive_file 7003344896
|
||||
active_file 4489052160
|
||||
unevictable 32768
|
||||
hierarchical_memory_limit 9223372036854775807
|
||||
hierarchical_memsw_limit 9223372036854775807
|
||||
total_cache 11492564992
|
||||
total_rss 1930993664
|
||||
total_mapped_file 306728960
|
||||
total_pgpgin 406632648
|
||||
total_pgpgout 403355412
|
||||
total_swap 0
|
||||
total_pgfault 728281223
|
||||
total_pgmajfault 1724
|
||||
total_inactive_anon 46608384
|
||||
total_active_anon 1884520448
|
||||
total_inactive_file 7003344896
|
||||
total_active_file 4489052160
|
||||
total_unevictable 32768
|
||||
|
||||
The first half (without the ``total_`` prefix) contains statistics
|
||||
relevant to the processes within the cgroup, excluding
|
||||
sub-cgroups. The second half (with the ``total_`` prefix) includes
|
||||
sub-cgroups as well.
|
||||
|
||||
Some metrics are "gauges", i.e. values that can increase or decrease
|
||||
(e.g. swap, the amount of swap space used by the members of the
|
||||
cgroup). Some others are "counters", i.e. values that can only go up,
|
||||
because they represent occurrences of a specific event (e.g. pgfault,
|
||||
which indicates the number of page faults which happened since the
|
||||
creation of the cgroup; this number can never decrease).
|
||||
|
||||
cache
|
||||
the amount of memory used by the processes of this control group
|
||||
that can be associated precisely with a block on a block
|
||||
device. When you read from and write to files on disk, this amount
|
||||
will increase. This will be the case if you use "conventional" I/O
|
||||
(``open``, ``read``, ``write`` syscalls) as well as mapped files
|
||||
(with ``mmap``). It also accounts for the memory used by ``tmpfs``
|
||||
mounts, though the reasons are unclear.
|
||||
|
||||
rss
|
||||
the amount of memory that *doesn't* correspond to anything on
|
||||
disk: stacks, heaps, and anonymous memory maps.
|
||||
|
||||
mapped_file
|
||||
indicates the amount of memory mapped by the processes in the
|
||||
control group. It doesn't give you information about *how much*
|
||||
memory is used; it rather tells you *how* it is used.
|
||||
|
||||
pgfault and pgmajfault
|
||||
indicate the number of times that a process of the cgroup triggered
|
||||
a "page fault" and a "major fault", respectively. A page fault
|
||||
happens when a process accesses a part of its virtual memory space
|
||||
which is nonexistent or protected. The former can happen if the
|
||||
process is buggy and tries to access an invalid address (it will
|
||||
then be sent a ``SIGSEGV`` signal, typically killing it with the
|
||||
famous ``Segmentation fault`` message). The latter can happen when
|
||||
the process reads from a memory zone which has been swapped out, or
|
||||
which corresponds to a mapped file: in that case, the kernel will
|
||||
load the page from disk, and let the CPU complete the memory
|
||||
access. It can also happen when the process writes to a
|
||||
copy-on-write memory zone: likewise, the kernel will preempt the
|
||||
process, duplicate the memory page, and resume the write operation
|
||||
on the process' own copy of the page. "Major" faults happen when the
|
||||
kernel actually has to read the data from disk. When it just has to
|
||||
duplicate an existing page, or allocate an empty page, it's a
|
||||
regular (or "minor") fault.
|
||||
|
||||
swap
|
||||
the amount of swap currently used by the processes in this cgroup.
|
||||
|
||||
active_anon and inactive_anon
|
||||
the amount of *anonymous* memory that has been identified has
|
||||
respectively *active* and *inactive* by the kernel. "Anonymous"
|
||||
memory is the memory that is *not* linked to disk pages. In other
|
||||
words, that's the equivalent of the rss counter described above. In
|
||||
fact, the very definition of the rss counter is **active_anon** +
|
||||
**inactive_anon** - **tmpfs** (where tmpfs is the amount of memory
|
||||
used up by ``tmpfs`` filesystems mounted by this control
|
||||
group). Now, what's the difference between "active" and "inactive"?
|
||||
Pages are initially "active"; and at regular intervals, the kernel
|
||||
sweeps over the memory, and tags some pages as "inactive". Whenever
|
||||
they are accessed again, they are immediately retagged
|
||||
"active". When the kernel is almost out of memory, and time comes to
|
||||
swap out to disk, the kernel will swap "inactive" pages.
|
||||
|
||||
active_file and inactive_file
|
||||
cache memory, with *active* and *inactive* similar to the *anon*
|
||||
memory above. The exact formula is cache = **active_file** +
|
||||
**inactive_file** + **tmpfs**. The exact rules used by the kernel to
|
||||
move memory pages between active and inactive sets are different
|
||||
from the ones used for anonymous memory, but the general principle
|
||||
is the same. Note that when the kernel needs to reclaim memory, it
|
||||
is cheaper to reclaim a clean (=non modified) page from this pool,
|
||||
since it can be reclaimed immediately (while anonymous pages and
|
||||
dirty/modified pages have to be written to disk first).
|
||||
|
||||
unevictable
|
||||
the amount of memory that cannot be reclaimed; generally, it will
|
||||
account for memory that has been "locked" with ``mlock``. It is
|
||||
often used by crypto frameworks to make sure that secret keys and
|
||||
other sensitive material never gets swapped out to disk.
|
||||
|
||||
memory and memsw limits
|
||||
These are not really metrics, but a reminder of the limits applied
|
||||
to this cgroup. The first one indicates the maximum amount of
|
||||
physical memory that can be used by the processes of this control
|
||||
group; the second one indicates the maximum amount of RAM+swap.
|
||||
|
||||
Accounting for memory in the page cache is very complex. If two
|
||||
processes in different control groups both read the same file
|
||||
(ultimately relying on the same blocks on disk), the corresponding
|
||||
memory charge will be split between the control groups. It's nice, but
|
||||
it also means that when a cgroup is terminated, it could increase the
|
||||
memory usage of another cgroup, because they are not splitting the
|
||||
cost anymore for those memory pages.
|
||||
|
||||
CPU metrics: ``cpuacct.stat``
|
||||
.............................
|
||||
|
||||
Now that we've covered memory metrics, everything else will look very
|
||||
simple in comparison. CPU metrics will be found in the ``cpuacct``
|
||||
controller.
|
||||
|
||||
For each container, you will find a pseudo-file ``cpuacct.stat``,
|
||||
containing the CPU usage accumulated by the processes of the
|
||||
container, broken down between ``user`` and ``system`` time. If you're
|
||||
not familiar with the distinction, ``user`` is the time during which
|
||||
the processes were in direct control of the CPU (i.e. executing
|
||||
process code), and ``system`` is the time during which the CPU was
|
||||
executing system calls on behalf of those processes.
|
||||
|
||||
Those times are expressed in ticks of 1/100th of a second. Actually,
|
||||
they are expressed in "user jiffies". There are ``USER_HZ``
|
||||
*"jiffies"* per second, and on x86 systems, ``USER_HZ`` is 100. This
|
||||
used to map exactly to the number of scheduler "ticks" per second; but
|
||||
with the advent of higher frequency scheduling, as well as `tickless
|
||||
kernels <http://lwn.net/Articles/549580/>`_, the number of kernel
|
||||
ticks wasn't relevant anymore. It stuck around anyway, mainly for
|
||||
legacy and compatibility reasons.
|
||||
|
||||
Block I/O metrics
|
||||
.................
|
||||
|
||||
Block I/O is accounted in the ``blkio`` controller. Different metrics
|
||||
are scattered across different files. While you can find in-depth
|
||||
details in the `blkio-controller
|
||||
<https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt>`_
|
||||
file in the kernel documentation, here is a short list of the most
|
||||
relevant ones:
|
||||
|
||||
blkio.sectors
|
||||
contain the number of 512-bytes sectors read and written by the
|
||||
processes member of the cgroup, device by device. Reads and writes
|
||||
are merged in a single counter.
|
||||
|
||||
blkio.io_service_bytes
|
||||
indicates the number of bytes read and written by the cgroup. It has
|
||||
4 counters per device, because for each device, it differentiates
|
||||
between synchronous vs. asynchronous I/O, and reads vs. writes.
|
||||
|
||||
blkio.io_serviced
|
||||
the number of I/O operations performed, regardless of their size. It
|
||||
also has 4 counters per device.
|
||||
|
||||
blkio.io_queued
|
||||
indicates the number of I/O operations currently queued for this
|
||||
cgroup. In other words, if the cgroup isn't doing any I/O, this will
|
||||
be zero. Note that the opposite is not true. In other words, if
|
||||
there is no I/O queued, it does not mean that the cgroup is idle
|
||||
(I/O-wise). It could be doing purely synchronous reads on an
|
||||
otherwise quiescent device, which is therefore able to handle them
|
||||
immediately, without queuing. Also, while it is helpful to figure
|
||||
out which cgroup is putting stress on the I/O subsystem, keep in
|
||||
mind that is is a relative quantity. Even if a process group does
|
||||
not perform more I/O, its queue size can increase just because the
|
||||
device load increases because of other devices.
|
||||
|
||||
Network Metrics
|
||||
---------------
|
||||
|
||||
Network metrics are not exposed directly by control groups. There is a
|
||||
good explanation for that: network interfaces exist within the context
|
||||
of *network namespaces*. The kernel could probably accumulate metrics
|
||||
about packets and bytes sent and received by a group of processes, but
|
||||
those metrics wouldn't be very useful. You want per-interface metrics
|
||||
(because traffic happening on the local ``lo`` interface doesn't
|
||||
really count). But since processes in a single cgroup can belong to
|
||||
multiple network namespaces, those metrics would be harder to
|
||||
interpret: multiple network namespaces means multiple ``lo``
|
||||
interfaces, potentially multiple ``eth0`` interfaces, etc.; so this is
|
||||
why there is no easy way to gather network metrics with control
|
||||
groups.
|
||||
|
||||
Instead we can gather network metrics from other sources:
|
||||
|
||||
IPtables
|
||||
........
|
||||
|
||||
IPtables (or rather, the netfilter framework for which iptables is
|
||||
just an interface) can do some serious accounting.
|
||||
|
||||
For instance, you can setup a rule to account for the outbound HTTP
|
||||
traffic on a web server:
|
||||
|
||||
::
|
||||
|
||||
iptables -I OUTPUT -p tcp --sport 80
|
||||
|
||||
|
||||
There is no ``-j`` or ``-g`` flag, so the rule will just count matched
|
||||
packets and go to the following rule.
|
||||
|
||||
Later, you can check the values of the counters, with:
|
||||
|
||||
::
|
||||
|
||||
iptables -nxvL OUTPUT
|
||||
|
||||
Technically, ``-n`` is not required, but it will prevent iptables from
|
||||
doing DNS reverse lookups, which are probably useless in this
|
||||
scenario.
|
||||
|
||||
Counters include packets and bytes. If you want to setup metrics for
|
||||
container traffic like this, you could execute a ``for`` loop to add
|
||||
two ``iptables`` rules per container IP address (one in each
|
||||
direction), in the ``FORWARD`` chain. This will only meter traffic
|
||||
going through the NAT layer; you will also have to add traffic going
|
||||
through the userland proxy.
|
||||
|
||||
Then, you will need to check those counters on a regular basis. If you
|
||||
happen to use ``collectd``, there is a nice plugin to automate
|
||||
iptables counters collection.
|
||||
|
||||
Interface-level counters
|
||||
........................
|
||||
|
||||
Since each container has a virtual Ethernet interface, you might want
|
||||
to check directly the TX and RX counters of this interface. You will
|
||||
notice that each container is associated to a virtual Ethernet
|
||||
interface in your host, with a name like ``vethKk8Zqi``. Figuring out
|
||||
which interface corresponds to which container is, unfortunately,
|
||||
difficult.
|
||||
|
||||
But for now, the best way is to check the metrics *from within the
|
||||
containers*. To accomplish this, you can run an executable from the
|
||||
host environment within the network namespace of a container using
|
||||
**ip-netns magic**.
|
||||
|
||||
The ``ip-netns exec`` command will let you execute any program
|
||||
(present in the host system) within any network namespace visible to
|
||||
the current process. This means that your host will be able to enter
|
||||
the network namespace of your containers, but your containers won't be
|
||||
able to access the host, nor their sibling containers. Containers will
|
||||
be able to “see” and affect their sub-containers, though.
|
||||
|
||||
The exact format of the command is::
|
||||
|
||||
ip netns exec <nsname> <command...>
|
||||
|
||||
For example::
|
||||
|
||||
ip netns exec mycontainer netstat -i
|
||||
|
||||
``ip netns`` finds the "mycontainer" container by using namespaces
|
||||
pseudo-files. Each process belongs to one network namespace, one PID
|
||||
namespace, one ``mnt`` namespace, etc., and those namespaces are
|
||||
materialized under ``/proc/<pid>/ns/``. For example, the network
|
||||
namespace of PID 42 is materialized by the pseudo-file
|
||||
``/proc/42/ns/net``.
|
||||
|
||||
When you run ``ip netns exec mycontainer ...``, it expects
|
||||
``/var/run/netns/mycontainer`` to be one of those
|
||||
pseudo-files. (Symlinks are accepted.)
|
||||
|
||||
In other words, to execute a command within the network namespace of a
|
||||
container, we need to:
|
||||
|
||||
* Find out the PID of any process within the container that we want to
|
||||
investigate;
|
||||
* Create a symlink from ``/var/run/netns/<somename>`` to
|
||||
``/proc/<thepid>/ns/net``
|
||||
* Execute ``ip netns exec <somename> ....``
|
||||
|
||||
Please review :ref:`run_findpid` to learn how to find the cgroup of a
|
||||
pprocess running in the container of which you want to measure network
|
||||
usage. From there, you can examine the pseudo-file named ``tasks``,
|
||||
which containes the PIDs that are in the control group (i.e. in the
|
||||
container). Pick any one of them.
|
||||
|
||||
Putting everything together, if the "short ID" of a container is held
|
||||
in the environment variable ``$CID``, then you can do this::
|
||||
|
||||
TASKS=/sys/fs/cgroup/devices/$CID*/tasks
|
||||
PID=$(head -n 1 $TASKS)
|
||||
mkdir -p /var/run/netns
|
||||
ln -sf /proc/$PID/ns/net /var/run/netns/$CID
|
||||
ip netns exec $CID netstat -i
|
||||
|
||||
|
||||
Tips for high-performance metric collection
|
||||
-------------------------------------------
|
||||
|
||||
Note that running a new process each time you want to update metrics
|
||||
is (relatively) expensive. If you want to collect metrics at high
|
||||
resolutions, and/or over a large number of containers (think 1000
|
||||
containers on a single host), you do not want to fork a new process
|
||||
each time.
|
||||
|
||||
Here is how to collect metrics from a single process. You will have to
|
||||
write your metric collector in C (or any language that lets you do
|
||||
low-level system calls). You need to use a special system call,
|
||||
``setns()``, which lets the current process enter any arbitrary
|
||||
namespace. It requires, however, an open file descriptor to the
|
||||
namespace pseudo-file (remember: that’s the pseudo-file in
|
||||
``/proc/<pid>/ns/net``).
|
||||
|
||||
However, there is a catch: you must not keep this file descriptor
|
||||
open. If you do, when the last process of the control group exits, the
|
||||
namespace will not be destroyed, and its network resources (like the
|
||||
virtual interface of the container) will stay around for ever (or
|
||||
until you close that file descriptor).
|
||||
|
||||
The right approach would be to keep track of the first PID of each
|
||||
container, and re-open the namespace pseudo-file each time.
|
||||
|
||||
Collecting metrics when a container exits
|
||||
-----------------------------------------
|
||||
|
||||
Sometimes, you do not care about real time metric collection, but when
|
||||
a container exits, you want to know how much CPU, memory, etc. it has
|
||||
used.
|
||||
|
||||
Docker makes this difficult because it relies on ``lxc-start``, which
|
||||
carefully cleans up after itself, but it is still possible. It is
|
||||
usually easier to collect metrics at regular intervals (e.g. every
|
||||
minute, with the collectd LXC plugin) and rely on that instead.
|
||||
|
||||
But, if you'd still like to gather the stats when a container stops,
|
||||
here is how:
|
||||
|
||||
For each container, start a collection process, and move it to the
|
||||
control groups that you want to monitor by writing its PID to the
|
||||
tasks file of the cgroup. The collection process should periodically
|
||||
re-read the tasks file to check if it's the last process of the
|
||||
control group. (If you also want to collect network statistics as
|
||||
explained in the previous section, you should also move the process to
|
||||
the appropriate network namespace.)
|
||||
|
||||
When the container exits, ``lxc-start`` will try to delete the control
|
||||
groups. It will fail, since the control group is still in use; but
|
||||
that’s fine. You process should now detect that it is the only one
|
||||
remaining in the group. Now is the right time to collect all the
|
||||
metrics you need!
|
||||
|
||||
Finally, your process should move itself back to the root control
|
||||
group, and remove the container control group. To remove a control
|
||||
group, just ``rmdir`` its directory. It's counter-intuitive to
|
||||
``rmdir`` a directory as it still contains files; but remember that
|
||||
this is a pseudo-filesystem, so usual rules don't apply. After the
|
||||
cleanup is done, the collection process can exit safely.
|
||||
|
|
@ -62,7 +62,7 @@ master_doc = 'toctree'
|
|||
|
||||
# General information about the project.
|
||||
project = u'Docker'
|
||||
copyright = u'2013, Team Docker'
|
||||
copyright = u'2014 Docker, Inc.'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
|
@ -175,7 +175,7 @@ html_show_sourcelink = False
|
|||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
|
@ -235,8 +235,10 @@ latex_documents = [
|
|||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('commandline/cli', 'docker', u'Docker Documentation',
|
||||
[u'Team Docker'], 1)
|
||||
('reference/commandline/cli', 'docker', u'Docker CLI Documentation',
|
||||
[u'Team Docker'], 1),
|
||||
('reference/builder', 'Dockerfile', u'Dockerfile Documentation',
|
||||
[u'Team Docker'], 5),
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
|
|
|
@ -41,7 +41,7 @@ This time, we're requesting shared access to ``$COUCH1``'s volumes.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
COUCH2=$(sudo docker run -d -p 5984 -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||
COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||
|
||||
Browse data on the second database
|
||||
----------------------------------
|
||||
|
|
|
@ -9,25 +9,23 @@ Hello World
|
|||
|
||||
.. _running_examples:
|
||||
|
||||
Running the Examples
|
||||
====================
|
||||
Check your Docker install
|
||||
-------------------------
|
||||
|
||||
All the examples assume your machine is running the ``docker`` daemon. To
|
||||
run the ``docker`` daemon in the background, simply type:
|
||||
This guide assumes you have a working installation of Docker. To check
|
||||
your Docker install, run the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker -d &
|
||||
# Check that you have a working install
|
||||
docker info
|
||||
|
||||
Now you can run Docker in client mode: by default all commands will be
|
||||
forwarded to the ``docker`` daemon via a protected Unix socket, so you
|
||||
must run as the ``root`` or via the ``sudo`` command.
|
||||
If you get ``docker: command not found`` or something like
|
||||
``/var/lib/docker/repositories: permission denied`` you may have an incomplete
|
||||
Docker installation or insufficient privileges to access docker on your machine.
|
||||
|
||||
.. code-block:: bash
|
||||
Please refer to :ref:`installation_list` for installation instructions.
|
||||
|
||||
sudo docker help
|
||||
|
||||
----
|
||||
|
||||
.. _hello_world:
|
||||
|
||||
|
@ -72,10 +70,12 @@ See the example in action
|
|||
|
||||
.. raw:: html
|
||||
|
||||
<div style="margin-top:10px;">
|
||||
<iframe width="560" height="350" src="http://ascii.io/a/2603/raw" frameborder="0"></iframe>
|
||||
</div>
|
||||
|
||||
<iframe width="560" height="400" frameborder="0"
|
||||
sandbox="allow-same-origin allow-scripts"
|
||||
srcdoc="<body><script type="text/javascript"
|
||||
src="https://asciinema.org/a/2603.js"
|
||||
id="asciicast-2603" async></script></body>">
|
||||
</iframe>
|
||||
|
||||
----
|
||||
|
||||
|
@ -88,9 +88,7 @@ Hello World Daemon
|
|||
|
||||
And now for the most boring daemon ever written!
|
||||
|
||||
This example assumes you have Docker installed and the Ubuntu
|
||||
image already imported with ``docker pull ubuntu``. We will use the Ubuntu
|
||||
image to run a simple hello world daemon that will just print hello
|
||||
We will use the Ubuntu image to run a simple hello world daemon that will just print hello
|
||||
world to standard out every second. It will continue to do this until
|
||||
we stop it.
|
||||
|
||||
|
@ -167,9 +165,12 @@ See the example in action
|
|||
|
||||
.. raw:: html
|
||||
|
||||
<div style="margin-top:10px;">
|
||||
<iframe width="560" height="350" src="http://ascii.io/a/2562/raw" frameborder="0"></iframe>
|
||||
</div>
|
||||
<iframe width="560" height="400" frameborder="0"
|
||||
sandbox="allow-same-origin allow-scripts"
|
||||
srcdoc="<body><script type="text/javascript"
|
||||
src="https://asciinema.org/a/2562.js"
|
||||
id="asciicast-2562" async></script></body>">
|
||||
</iframe>
|
||||
|
||||
The next example in the series is a :ref:`python_web_app` example, or
|
||||
you could skip to any of the other examples:
|
||||
|
|
|
@ -43,7 +43,7 @@ container. The ``BUILD_JOB`` environment variable will be set with the new conta
|
|||
[...]
|
||||
|
||||
While this container is running, we can attach to the new container to
|
||||
see what is going on. The flag ``-sig-proxy`` set as ``false`` allows you to connect and
|
||||
see what is going on. The flag ``--sig-proxy`` set as ``false`` allows you to connect and
|
||||
disconnect (Ctrl-C) to it without stopping the container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -107,8 +107,11 @@ See the example in action
|
|||
|
||||
.. raw:: html
|
||||
|
||||
<div style="margin-top:10px;">
|
||||
<iframe width="720" height="350" src="http://ascii.io/a/2573/raw" frameborder="0"></iframe>
|
||||
</div>
|
||||
<iframe width="720" height="400" frameborder="0"
|
||||
sandbox="allow-same-origin allow-scripts"
|
||||
srcdoc="<body><script type="text/javascript"
|
||||
src="https://asciinema.org/a/2573.js"
|
||||
id="asciicast-2573" async></script></body>">
|
||||
</iframe>
|
||||
|
||||
Continue to :ref:`running_ssh_service`.
|
||||
|
|
|
@ -44,7 +44,7 @@ use a container link to provide access to our Redis database.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -name redis -d <your username>/redis
|
||||
sudo docker run --name redis -d <your username>/redis
|
||||
|
||||
Create your web application container
|
||||
-------------------------------------
|
||||
|
@ -56,7 +56,7 @@ Redis instance running inside that container to only this container.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -link redis:db -i -t ubuntu:12.10 /bin/bash
|
||||
sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash
|
||||
|
||||
Once inside our freshly created container we need to install Redis to get the
|
||||
``redis-cli`` binary to test our connection.
|
||||
|
|
|
@ -25,9 +25,12 @@ smooth, but it gives you a good idea.
|
|||
|
||||
.. raw:: html
|
||||
|
||||
<div style="margin-top:10px;">
|
||||
<iframe width="800" height="400" src="http://ascii.io/a/2637/raw" frameborder="0"></iframe>
|
||||
</div>
|
||||
<iframe width="815" height="450" frameborder="0"
|
||||
sandbox="allow-same-origin allow-scripts"
|
||||
srcdoc="<body><script type="text/javascript"
|
||||
src="https://asciinema.org/a/2637.js"
|
||||
id="asciicast-2637" async></script></body>">
|
||||
</iframe>
|
||||
|
||||
You can also get this sshd container by using:
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ Let's see what is inside our ``supervisord.conf`` file.
|
|||
command=/usr/sbin/sshd -D
|
||||
|
||||
[program:apache2]
|
||||
command=/bin/bash -c "source /etc/apache2/envvars && /usr/sbin/apache2 -DFOREGROUND"
|
||||
command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND"
|
||||
|
||||
The ``supervisord.conf`` configuration file contains directives that configure
|
||||
Supervisor and the processes it manages. The first block ``[supervisord]``
|
||||
|
|
|
@ -26,7 +26,7 @@ Does Docker run on Mac OS X or Windows?
|
|||
Not at this time, Docker currently only runs on Linux, but you can
|
||||
use VirtualBox to run Docker in a virtual machine on your box, and
|
||||
get the best of both worlds. Check out the
|
||||
:ref:`install_using_vagrant` and :ref:`windows` installation
|
||||
:ref:`macosx` and :ref:`windows` installation
|
||||
guides.
|
||||
|
||||
How do containers compare to virtual machines?
|
||||
|
@ -172,8 +172,9 @@ Linux:
|
|||
- Fedora 19/20+
|
||||
- RHEL 6.5+
|
||||
- Centos 6+
|
||||
- Gento
|
||||
- Gentoo
|
||||
- ArchLinux
|
||||
- openSUSE 12.3+
|
||||
|
||||
Cloud:
|
||||
|
||||
|
@ -195,7 +196,7 @@ Where can I find more answers?
|
|||
* `Docker user mailinglist`_
|
||||
* `Docker developer mailinglist`_
|
||||
* `IRC, docker on freenode`_
|
||||
* `Github`_
|
||||
* `GitHub`_
|
||||
* `Ask questions on Stackoverflow`_
|
||||
* `Join the conversation on Twitter`_
|
||||
|
||||
|
|
|
@ -5,26 +5,21 @@
|
|||
Introduction
|
||||
------------
|
||||
|
||||
``docker``, the Linux Container Runtime, runs Unix processes with
|
||||
strong guarantees of isolation across servers. Your software runs
|
||||
repeatably everywhere because its :ref:`container_def` includes any
|
||||
dependencies.
|
||||
Docker is an open-source engine to easily create lightweight, portable,
|
||||
self-sufficient containers from any application. The same container that a
|
||||
developer builds and tests on a laptop can run at scale, in production, on
|
||||
VMs, bare metal, OpenStack clusters, or any major infrastructure provider.
|
||||
|
||||
``docker`` runs three ways:
|
||||
Common use cases for Docker include:
|
||||
|
||||
* as a daemon to manage LXC containers on your :ref:`Linux host
|
||||
<kernel>` (``sudo docker -d``)
|
||||
* as a :ref:`CLI <cli>` which talks to the daemon's `REST API
|
||||
<api/docker_remote_api>`_ (``docker run ...``)
|
||||
* as a client of :ref:`Repositories <working_with_the_repository>`
|
||||
that let you share what you've built (``docker pull, docker
|
||||
commit``).
|
||||
- Automating the packaging and deployment of web applications.
|
||||
- Automated testing and continuous integration/deployment.
|
||||
- Deploying and scaling databases and backend services in a service-oriented environment.
|
||||
- Building custom PaaS environments, either from scratch or as an extension of off-the-shelf platforms like OpenShift or Cloud Foundry.
|
||||
|
||||
Each use of ``docker`` is documented here. The features of Docker are
|
||||
currently in active development, so this documentation will change
|
||||
frequently.
|
||||
Please note Docker is currently under heavy developement. It should not be used in production (yet).
|
||||
|
||||
For an overview of Docker, please see the `Introduction
|
||||
For a high-level overview of Docker, please see the `Introduction
|
||||
<http://www.docker.io/learn_more/>`_. When you're ready to start working with
|
||||
Docker, we have a `quick start <http://www.docker.io/gettingstarted>`_
|
||||
and a more in-depth guide to :ref:`ubuntu_linux` and other
|
||||
|
|
|
@ -71,21 +71,3 @@ To start on system boot:
|
|||
::
|
||||
|
||||
sudo systemctl enable docker
|
||||
|
||||
Network Configuration
|
||||
---------------------
|
||||
|
||||
IPv4 packet forwarding is disabled by default on Arch, so internet access from inside
|
||||
the container may not work.
|
||||
|
||||
To enable the forwarding, run as root on the host system:
|
||||
|
||||
::
|
||||
|
||||
sysctl net.ipv4.ip_forward=1
|
||||
|
||||
And, to make it persistent across reboots, enable it on the host's **/etc/sysctl.d/docker.conf**:
|
||||
|
||||
::
|
||||
|
||||
net.ipv4.ip_forward=1
|
||||
|
|
|
@ -12,19 +12,37 @@ Binaries
|
|||
**This instruction set is meant for hackers who want to try out Docker
|
||||
on a variety of environments.**
|
||||
|
||||
Before following these directions, you should really check if a packaged version
|
||||
of Docker is already available for your distribution. We have packages for many
|
||||
distributions, and more keep showing up all the time!
|
||||
Before following these directions, you should really check if a
|
||||
packaged version of Docker is already available for your distribution.
|
||||
We have packages for many distributions, and more keep showing up all
|
||||
the time!
|
||||
|
||||
Check Your Kernel
|
||||
-----------------
|
||||
|
||||
Your host's Linux kernel must meet the Docker :ref:`kernel`
|
||||
|
||||
Check for User Space Tools
|
||||
Check runtime dependencies
|
||||
--------------------------
|
||||
|
||||
You must have a working installation of the `lxc <http://linuxcontainers.org>`_ utilities and library.
|
||||
.. DOC COMMENT: this should be kept in sync with
|
||||
https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md#runtime-dependencies
|
||||
|
||||
To run properly, docker needs the following software to be installed at runtime:
|
||||
|
||||
- iproute2 version 3.5 or later (build after 2012-05-21), and
|
||||
specifically the "ip" utility
|
||||
- iptables version 1.4 or later
|
||||
- The LXC utility scripts (http://lxc.sourceforge.net) version 0.8 or later
|
||||
- Git version 1.7 or later
|
||||
- XZ Utils 4.9 or later
|
||||
|
||||
|
||||
Check kernel dependencies
|
||||
-------------------------
|
||||
|
||||
Docker in daemon mode has specific kernel requirements. For details,
|
||||
check your distribution in :ref:`installation_list`.
|
||||
|
||||
Note that Docker also has a client mode, which can run on virtually
|
||||
any linux kernel (it even builds on OSX!).
|
||||
|
||||
|
||||
Get the docker binary:
|
||||
----------------------
|
||||
|
@ -44,6 +62,40 @@ Run the docker daemon
|
|||
sudo ./docker -d &
|
||||
|
||||
|
||||
.. _dockergroup:
|
||||
|
||||
Giving non-root access
|
||||
----------------------
|
||||
|
||||
The ``docker`` daemon always runs as the root user, and since Docker
|
||||
version 0.5.2, the ``docker`` daemon binds to a Unix socket instead of
|
||||
a TCP port. By default that Unix socket is owned by the user *root*,
|
||||
and so, by default, you can access it with ``sudo``.
|
||||
|
||||
Starting in version 0.5.3, if you (or your Docker installer) create a
|
||||
Unix group called *docker* and add users to it, then the ``docker``
|
||||
daemon will make the ownership of the Unix socket read/writable by the
|
||||
*docker* group when the daemon starts. The ``docker`` daemon must
|
||||
always run as the root user, but if you run the ``docker`` client as a
|
||||
user in the *docker* group then you don't need to add ``sudo`` to all
|
||||
the client commands.
|
||||
|
||||
.. warning:: The *docker* group is root-equivalent.
|
||||
|
||||
|
||||
Upgrades
|
||||
--------
|
||||
|
||||
To upgrade your manual installation of Docker, first kill the docker
|
||||
daemon:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
killall docker
|
||||
|
||||
Then follow the regular installation steps.
|
||||
|
||||
|
||||
Run your first container!
|
||||
-------------------------
|
||||
|
||||
|
|
|
@ -60,21 +60,3 @@ To start on system boot:
|
|||
::
|
||||
|
||||
sudo systemctl enable lxc-docker
|
||||
|
||||
Network Configuration
|
||||
---------------------
|
||||
|
||||
IPv4 packet forwarding is disabled by default on FrugalWare, so Internet access from inside
|
||||
the container may not work.
|
||||
|
||||
To enable packet forwarding, run the following command as the ``root`` user on the host system:
|
||||
|
||||
::
|
||||
|
||||
sysctl net.ipv4.ip_forward=1
|
||||
|
||||
And, to make it persistent across reboots, add the following to a file named **/etc/sysctl.d/docker.conf**:
|
||||
|
||||
::
|
||||
|
||||
net.ipv4.ip_forward=1
|
||||
|
|
|
@ -82,19 +82,3 @@ To start on system boot:
|
|||
.. code-block:: bash
|
||||
|
||||
sudo systemctl enable docker.service
|
||||
|
||||
Network Configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
IPv4 packet forwarding is disabled by default, so internet access from inside
|
||||
the container will not work unless ``net.ipv4.ip_forward`` is enabled:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sysctl -w net.ipv4.ip_forward=1
|
||||
|
||||
Or, to enable it more permanently:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
echo net.ipv4.ip_forward = 1 | sudo tee /etc/sysctl.d/docker.conf
|
||||
|
|
|
@ -43,21 +43,14 @@
|
|||
$ gcutil ssh docker-playground
|
||||
docker-playground:~$
|
||||
|
||||
5. Enable IP forwarding:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ echo net.ipv4.ip_forward=1 | sudo tee /etc/sysctl.d/99-docker.conf
|
||||
docker-playground:~$ sudo sysctl --system
|
||||
|
||||
6. Install the latest Docker release and configure it to start when the instance boots:
|
||||
5. Install the latest Docker release and configure it to start when the instance boots:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ curl get.docker.io | bash
|
||||
docker-playground:~$ sudo update-rc.d docker defaults
|
||||
|
||||
7. If running in zones: ``us-central1-a``, ``europe-west1-1``, and ``europe-west1-b``, the docker daemon must be started with the ``-mtu`` flag. Without the flag, you may experience intermittent network pauses.
|
||||
6. If running in zones: ``us-central1-a``, ``europe-west1-1``, and ``europe-west1-b``, the docker daemon must be started with the ``-mtu`` flag. Without the flag, you may experience intermittent network pauses.
|
||||
`See this issue <https://code.google.com/p/google-compute-engine/issues/detail?id=57>`_ for more details.
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -65,7 +58,7 @@
|
|||
docker-playground:~$ echo 'DOCKER_OPTS="$DOCKER_OPTS -mtu 1460"' | sudo tee -a /etc/default/docker
|
||||
docker-playground:~$ sudo service docker restart
|
||||
|
||||
8. Start a new container:
|
||||
7. Start a new container:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
|
|
@ -22,13 +22,11 @@ Contents:
|
|||
fedora
|
||||
archlinux
|
||||
gentoolinux
|
||||
openSUSE
|
||||
frugalware
|
||||
vagrant
|
||||
mac
|
||||
windows
|
||||
amazon
|
||||
rackspace
|
||||
google
|
||||
kernel
|
||||
binaries
|
||||
security
|
||||
upgrading
|
||||
|
|
|
@ -1,152 +0,0 @@
|
|||
:title: Kernel Requirements
|
||||
:description: Kernel supports
|
||||
:keywords: kernel requirements, kernel support, docker, installation, cgroups, namespaces
|
||||
|
||||
.. _kernel:
|
||||
|
||||
Kernel Requirements
|
||||
===================
|
||||
|
||||
In short, Docker has the following kernel requirements:
|
||||
|
||||
- Linux version 3.8 or above.
|
||||
|
||||
- Cgroups and namespaces must be enabled.
|
||||
|
||||
*Note: as of 0.7 docker no longer requires aufs. AUFS support is still available as an optional driver.*
|
||||
|
||||
The officially supported kernel is the one recommended by the
|
||||
:ref:`ubuntu_linux` installation path. It is the one that most developers
|
||||
will use, and the one that receives the most attention from the core
|
||||
contributors. If you decide to go with a different kernel and hit a bug,
|
||||
please try to reproduce it with the official kernels first.
|
||||
|
||||
If you cannot or do not want to use the "official" kernels,
|
||||
here is some technical background about the features (both optional and
|
||||
mandatory) that docker needs to run successfully.
|
||||
|
||||
|
||||
Linux version 3.8 or above
|
||||
--------------------------
|
||||
|
||||
Kernel versions 3.2 to 3.5 are not stable when used with docker.
|
||||
In some circumstances, you will experience kernel "oopses", or even crashes.
|
||||
The symptoms include:
|
||||
|
||||
- a container being killed in the middle of an operation (e.g. an ``apt-get``
|
||||
command doesn't complete);
|
||||
- kernel messages including mentioning calls to ``mntput`` or
|
||||
``d_hash_and_lookup``;
|
||||
- kernel crash causing the machine to freeze for a few minutes, or even
|
||||
completely.
|
||||
|
||||
Additionally, kernels prior 3.4 did not implement ``reboot_pid_ns``,
|
||||
which means that the ``reboot()`` syscall could reboot the host machine,
|
||||
instead of terminating the container. To work around that problem,
|
||||
LXC userland tools (since version 0.8) automatically drop the ``SYS_BOOT``
|
||||
capability when necessary. Still, if you run a pre-3.4 kernel with pre-0.8
|
||||
LXC tools, be aware that containers can reboot the whole host! This is
|
||||
not something that Docker wants to address in the short term, since you
|
||||
shouldn't use kernels prior 3.8 with Docker anyway.
|
||||
|
||||
While it is still possible to use older kernels for development, it is
|
||||
really not advised to do so.
|
||||
|
||||
Docker checks the kernel version when it starts, and emits a warning if it
|
||||
detects something older than 3.8.
|
||||
|
||||
See issue `#407 <https://github.com/dotcloud/docker/issues/407>`_ for details.
|
||||
|
||||
|
||||
Cgroups and namespaces
|
||||
----------------------
|
||||
|
||||
You need to enable namespaces and cgroups, to the extent of what is needed
|
||||
to run LXC containers. Technically, while namespaces have been introduced
|
||||
in the early 2.6 kernels, we do not advise to try any kernel before 2.6.32
|
||||
to run LXC containers. Note that 2.6.32 has some documented issues regarding
|
||||
network namespace setup and teardown; those issues are not a risk if you
|
||||
run containers in a private environment, but can lead to denial-of-service
|
||||
attacks if you want to run untrusted code in your containers. For more details,
|
||||
see `LP#720095 <https://bugs.launchpad.net/ubuntu/+source/linux/+bug/720095>`_.
|
||||
|
||||
Kernels 2.6.38, and every version since 3.2, have been deployed successfully
|
||||
to run containerized production workloads. Feature-wise, there is no huge
|
||||
improvement between 2.6.38 and up to 3.6 (as far as docker is concerned!).
|
||||
|
||||
|
||||
|
||||
|
||||
Extra Cgroup Controllers
|
||||
------------------------
|
||||
|
||||
Most control groups can be enabled or disabled individually. For instance,
|
||||
you can decide that you do not want to compile support for the CPU or memory
|
||||
controller. In some cases, the feature can be enabled or disabled at boot
|
||||
time. It is worth mentioning that some distributions (like Debian) disable
|
||||
"expensive" features, like the memory controller, because they can have
|
||||
a significant performance impact.
|
||||
|
||||
In the specific case of the memory cgroup, docker will detect if the cgroup
|
||||
is available or not. If it's not, it will print a warning, and it won't
|
||||
use the feature. If you want to enable that feature -- read on!
|
||||
|
||||
|
||||
Memory and Swap Accounting on Debian/Ubuntu
|
||||
-------------------------------------------
|
||||
|
||||
If you use Debian or Ubuntu kernels, and want to enable memory and swap
|
||||
accounting, you must add the following command-line parameters to your kernel::
|
||||
|
||||
cgroup_enable=memory swapaccount=1
|
||||
|
||||
On Debian or Ubuntu systems, if you use the default GRUB bootloader, you can
|
||||
add those parameters by editing ``/etc/default/grub`` and extending
|
||||
``GRUB_CMDLINE_LINUX``. Look for the following line::
|
||||
|
||||
GRUB_CMDLINE_LINUX=""
|
||||
|
||||
And replace it by the following one::
|
||||
|
||||
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||
|
||||
Then run ``update-grub``, and reboot.
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
To automatically check some of the requirements below, you can run `lxc-checkconfig`.
|
||||
|
||||
Networking:
|
||||
|
||||
- CONFIG_BRIDGE
|
||||
- CONFIG_NETFILTER_XT_MATCH_ADDRTYPE
|
||||
- CONFIG_NF_NAT
|
||||
- CONFIG_NF_NAT_IPV4
|
||||
- CONFIG_NF_NAT_NEEDED
|
||||
|
||||
LVM:
|
||||
|
||||
- CONFIG_BLK_DEV_DM
|
||||
- CONFIG_DM_THIN_PROVISIONING
|
||||
- CONFIG_EXT4_FS
|
||||
|
||||
Namespaces:
|
||||
|
||||
- CONFIG_NAMESPACES
|
||||
- CONFIG_UTS_NS
|
||||
- CONFIG_IPC_NS
|
||||
- CONFIG_UID_NS
|
||||
- CONFIG_PID_NS
|
||||
- CONFIG_NET_NS
|
||||
|
||||
Cgroups:
|
||||
|
||||
- CONFIG_CGROUPS
|
||||
|
||||
Cgroup controllers (optional but highly recommended):
|
||||
|
||||
- CONFIG_CGROUP_CPUACCT
|
||||
- CONFIG_BLK_CGROUP
|
||||
- CONFIG_MEMCG
|
||||
- CONFIG_MEMCG_SWAP
|
160
docs/sources/installation/mac.rst
Normal file
160
docs/sources/installation/mac.rst
Normal file
|
@ -0,0 +1,160 @@
|
|||
:title: Requirements and Installation on Mac OS X 10.6 Snow Leopard
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linux, os x, osx, mac
|
||||
|
||||
.. _macosx:
|
||||
|
||||
========
|
||||
Mac OS X
|
||||
========
|
||||
|
||||
.. note::
|
||||
|
||||
These instructions are available with the new release of Docker
|
||||
(version 0.8). However, they are subject to change.
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer.
|
||||
|
||||
How To Install Docker On Mac OS X
|
||||
=================================
|
||||
|
||||
VirtualBox
|
||||
----------
|
||||
|
||||
Docker on OS X needs VirtualBox to run. To begin with, head over to
|
||||
`VirtualBox Download Page`_ and get the tool for ``OS X hosts x86/amd64``.
|
||||
|
||||
.. _VirtualBox Download Page: https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
Once the download is complete, open the disk image, run the set up file
|
||||
(i.e. ``VirtualBox.pkg``) and install VirtualBox. Do not simply copy the
|
||||
package without running the installer.
|
||||
|
||||
boot2docker
|
||||
-----------
|
||||
|
||||
`boot2docker`_ provides a handy script to easily manage the VM running the
|
||||
``docker`` daemon. It also takes care of the installation for the OS image
|
||||
that is used for the job.
|
||||
|
||||
.. _GitHub page: https://github.com/steeve/boot2docker
|
||||
|
||||
Open up a new terminal window, if you have not already.
|
||||
|
||||
Run the following commands to get boot2docker:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Enter the installation directory
|
||||
cd ~/bin
|
||||
|
||||
# Get the file
|
||||
curl https://raw.github.com/steeve/boot2docker/master/boot2docker > boot2docker
|
||||
|
||||
# Mark it executable
|
||||
chmod +x boot2docker
|
||||
|
||||
Docker OS X Client
|
||||
------------------
|
||||
|
||||
The ``docker`` daemon is accessed using the ``docker`` client.
|
||||
|
||||
Run the following commands to get it downloaded and set up:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Get the file
|
||||
curl -o docker http://get.docker.io/builds/Darwin/x86_64/docker-latest
|
||||
|
||||
# Mark it executable
|
||||
chmod +x docker
|
||||
|
||||
# Set the environment variable for the docker daemon
|
||||
export DOCKER_HOST=tcp://
|
||||
|
||||
# Copy the executable file
|
||||
sudo cp docker /usr/local/bin/
|
||||
|
||||
And that’s it! Let’s check out how to use it.
|
||||
|
||||
How To Use Docker On Mac OS X
|
||||
=============================
|
||||
|
||||
The ``docker`` daemon (via boot2docker)
|
||||
---------------------------------------
|
||||
|
||||
Inside the ``~/bin`` directory, run the following commands:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Initiate the VM
|
||||
./boot2docker init
|
||||
|
||||
# Run the VM (the docker daemon)
|
||||
./boot2docker up
|
||||
|
||||
# To see all available commands:
|
||||
./boot2docker
|
||||
|
||||
# Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download}
|
||||
|
||||
The ``docker`` client
|
||||
---------------------
|
||||
|
||||
Once the VM with the ``docker`` daemon is up, you can use the ``docker``
|
||||
client just like any other application.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker version
|
||||
# Client version: 0.7.6
|
||||
# Go version (client): go1.2
|
||||
# Git commit (client): bc3b2ec
|
||||
# Server version: 0.7.5
|
||||
# Git commit (server): c348c04
|
||||
# Go version (server): go1.2
|
||||
|
||||
SSH-ing The VM
|
||||
--------------
|
||||
|
||||
If you feel the need to connect to the VM, you can simply run:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./boot2docker ssh
|
||||
|
||||
# User: docker
|
||||
# Pwd: tcuser
|
||||
|
||||
You can now continue with the :ref:`hello_world` example.
|
||||
|
||||
Learn More
|
||||
==========
|
||||
|
||||
boot2docker:
|
||||
------------
|
||||
|
||||
See the GitHub page for `boot2docker`_.
|
||||
|
||||
.. _boot2docker: https://github.com/steeve/boot2docker
|
||||
|
||||
If SSH complains about keys:
|
||||
----------------------------
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ssh-keygen -R '[localhost]:2022'
|
||||
|
||||
About the way Docker works on Mac OS X:
|
||||
---------------------------------------
|
||||
|
||||
Docker has two key components: the ``docker`` daemon and the ``docker``
|
||||
client. The tool works by client commanding the daemon. In order to
|
||||
work and do its magic, the daemon makes use of some Linux Kernel
|
||||
features (e.g. LXC, name spaces etc.), which are not supported by OS X.
|
||||
Therefore, the solution of getting Docker to run on OS X consists of
|
||||
running it inside a lightweight virtual machine. In order to simplify
|
||||
things, Docker comes with a bash script to make this whole process as
|
||||
easy as possible (i.e. boot2docker).
|
73
docs/sources/installation/openSUSE.rst
Normal file
73
docs/sources/installation/openSUSE.rst
Normal file
|
@ -0,0 +1,73 @@
|
|||
:title: Installation on openSUSE
|
||||
:description: Docker installation on openSUSE.
|
||||
:keywords: openSUSE, virtualbox, docker, documentation, installation
|
||||
|
||||
.. _openSUSE:
|
||||
|
||||
openSUSE
|
||||
========
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Docker is available in **openSUSE 12.3 and later**. Please note that due to the
|
||||
current Docker limitations Docker is able to run only on the **64 bit**
|
||||
architecture.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
The ``docker`` package from the `Virtualization project`_ on `OBS`_ provides
|
||||
Docker on openSUSE.
|
||||
|
||||
|
||||
To proceed with Docker installation please add the right Virtualization
|
||||
repository.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# openSUSE 12.3
|
||||
sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization
|
||||
|
||||
# openSUSE 13.1
|
||||
sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization
|
||||
|
||||
|
||||
Install the Docker package.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo zypper in docker
|
||||
|
||||
It's also possible to install Docker using openSUSE's 1-click install. Just
|
||||
visit `this`_ page, select your openSUSE version and click on the installation
|
||||
link. This will add the right repository to your system and it will
|
||||
also install the `docker` package.
|
||||
|
||||
Now that it's installed, let's start the Docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl start docker
|
||||
|
||||
If we want Docker to start at boot, we should also:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo systemctl enable docker
|
||||
|
||||
The `docker` package creates a new group named `docker`. Users, other than
|
||||
`root` user, need to be part of this group in order to interact with the
|
||||
Docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo usermod -G docker <username>
|
||||
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
.. _Virtualization project: https://build.opensuse.org/project/show/Virtualization
|
||||
.. _OBS: https://build.opensuse.org/
|
||||
.. _this: http://software.opensuse.org/package/docker
|
|
@ -35,7 +35,7 @@ Dependencies
|
|||
|
||||
**Linux kernel 3.8**
|
||||
|
||||
Due to a bug in LXC, docker works best on the 3.8 kernel. Precise
|
||||
Due to a bug in LXC, Docker works best on the 3.8 kernel. Precise
|
||||
comes with a 3.2 kernel, so we need to upgrade it. The kernel you'll
|
||||
install when following these steps comes with AUFS built in. We also
|
||||
include the generic headers to enable packages that depend on them,
|
||||
|
@ -167,13 +167,73 @@ Type ``exit`` to exit
|
|||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
|
||||
Giving non-root access
|
||||
----------------------
|
||||
|
||||
The ``docker`` daemon always runs as the root user, and since Docker version
|
||||
0.5.2, the ``docker`` daemon binds to a Unix socket instead of a TCP port. By
|
||||
default that Unix socket is owned by the user *root*, and so, by default, you
|
||||
can access it with ``sudo``.
|
||||
|
||||
Starting in version 0.5.3, if you (or your Docker installer) create a
|
||||
Unix group called *docker* and add users to it, then the ``docker``
|
||||
daemon will make the ownership of the Unix socket read/writable by the
|
||||
*docker* group when the daemon starts. The ``docker`` daemon must
|
||||
always run as the root user, but if you run the ``docker`` client as a user in
|
||||
the *docker* group then you don't need to add ``sudo`` to all the
|
||||
client commands.
|
||||
|
||||
.. warning:: The *docker* group is root-equivalent.
|
||||
|
||||
**Example:**
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the docker group if it doesn't already exist.
|
||||
sudo groupadd docker
|
||||
|
||||
# Add the connected user "${USER}" to the docker group.
|
||||
# Change the user name to match your preferred user.
|
||||
# You may have to logout and log back in again for
|
||||
# this to take effect.
|
||||
sudo gpasswd -a ${USER} docker
|
||||
|
||||
# Restart the Docker daemon.
|
||||
sudo service docker restart
|
||||
|
||||
|
||||
Upgrade
|
||||
--------
|
||||
|
||||
To install the latest version of docker, use the standard ``apt-get`` method:
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# update your sources list
|
||||
sudo apt-get update
|
||||
|
||||
# install the latest
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
Troubleshooting
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
On Linux Mint, the ``cgroups-lite`` package is not installed by default.
|
||||
Before Docker will work correctly, you will need to install this via:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get update && sudo apt-get install cgroups-lite
|
||||
|
||||
.. _ufw:
|
||||
|
||||
Docker and UFW
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Docker uses a bridge to manage container networking. By default, UFW drops all
|
||||
`forwarding` traffic. As a result will you need to enable UFW forwarding:
|
||||
`forwarding` traffic. As a result you will need to enable UFW forwarding:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
:title: Upgrading
|
||||
:description: These instructions are for upgrading Docker
|
||||
:keywords: Docker, Docker documentation, upgrading docker, upgrade
|
||||
|
||||
.. _upgrading:
|
||||
|
||||
Upgrading
|
||||
=========
|
||||
|
||||
The technique for upgrading ``docker`` to a newer version depends on
|
||||
how you installed ``docker``.
|
||||
|
||||
.. versionadded:: 0.5.3
|
||||
You may wish to add a ``docker`` group to your system to avoid using sudo with ``docker``. (see :ref:`dockergroup`)
|
||||
|
||||
|
||||
After ``apt-get``
|
||||
-----------------
|
||||
|
||||
If you installed Docker using ``apt-get`` or Vagrant, then you should
|
||||
use ``apt-get`` to upgrade.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
Add Docker repository information to your system first.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
|
||||
# update your sources list
|
||||
sudo apt-get update
|
||||
|
||||
# install the latest
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
|
||||
After manual installation
|
||||
-------------------------
|
||||
|
||||
If you installed the Docker :ref:`binaries` then follow these steps:
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# kill the running docker daemon
|
||||
killall docker
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# get the latest binary
|
||||
wget http://get.docker.io/builds/Linux/x86_64/docker-latest -O docker
|
||||
|
||||
# make it executable
|
||||
chmod +x docker
|
||||
|
||||
|
||||
Start docker in daemon mode (``-d``) and disconnect, running the
|
||||
daemon in the background (``&``). Starting as ``./docker`` guarantees
|
||||
to run the version in your current directory rather than a version
|
||||
which might reside in your path.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# start the new version
|
||||
sudo ./docker -d &
|
||||
|
||||
|
||||
Alternatively you can replace the docker binary in ``/usr/local/bin``.
|
|
@ -1,80 +0,0 @@
|
|||
:title: Using Vagrant (Mac, Linux)
|
||||
:description: This guide will setup a new virtualbox virtual machine with docker installed on your computer.
|
||||
:keywords: Docker, Docker documentation, virtualbox, vagrant, git, ssh, putty, cygwin
|
||||
|
||||
.. _install_using_vagrant:
|
||||
|
||||
Using Vagrant (Mac, Linux)
|
||||
==========================
|
||||
|
||||
This guide will setup a new virtualbox virtual machine with docker
|
||||
installed on your computer. This works on most operating systems,
|
||||
including MacOSX, Windows, Linux, FreeBSD and others. If you can
|
||||
install these and have at least 400MB RAM to spare you should be good.
|
||||
|
||||
Install Vagrant and Virtualbox
|
||||
------------------------------
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
#. Install virtualbox from https://www.virtualbox.org/ (or use your
|
||||
package manager)
|
||||
#. Install vagrant from http://www.vagrantup.com/ (or use your package
|
||||
manager)
|
||||
#. Install git if you had not installed it before, check if it is
|
||||
installed by running ``git`` in a terminal window
|
||||
|
||||
|
||||
Spin it up
|
||||
----------
|
||||
|
||||
1. Fetch the docker sources (this includes the ``Vagrantfile`` for
|
||||
machine setup).
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/dotcloud/docker.git
|
||||
|
||||
2. Change directory to docker
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd docker
|
||||
|
||||
3. Run vagrant from the sources directory
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant up
|
||||
|
||||
Vagrant will:
|
||||
|
||||
* Download the 'official' Precise64 base ubuntu virtual machine image from vagrantup.com
|
||||
* Boot this image in virtualbox
|
||||
* Follow official :ref:`ubuntu_linux` installation path
|
||||
|
||||
You now have a Ubuntu Virtual Machine running with docker pre-installed.
|
||||
|
||||
Connect
|
||||
-------
|
||||
|
||||
To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
|
||||
``vagrant up``. Vagrant will connect you to the correct VM.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant ssh
|
||||
|
||||
Run
|
||||
-----
|
||||
|
||||
Now you are in the VM, run docker
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker
|
||||
|
||||
|
||||
Continue with the :ref:`hello_world` example.
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
.. _windows:
|
||||
|
||||
Using Vagrant (Windows)
|
||||
=======================
|
||||
Installing Docker on Windows
|
||||
============================
|
||||
|
||||
Docker can run on Windows using a VM like VirtualBox. You then run
|
||||
Linux within the VM.
|
||||
|
|
|
@ -26,15 +26,33 @@ Docker Remote API
|
|||
2. Versions
|
||||
===========
|
||||
|
||||
The current version of the API is 1.8
|
||||
The current version of the API is 1.9
|
||||
|
||||
Calling /images/<name>/insert is the same as calling
|
||||
/v1.8/images/<name>/insert
|
||||
/v1.9/images/<name>/insert
|
||||
|
||||
You can still call an old version of the api using
|
||||
/v1.0/images/<name>/insert
|
||||
|
||||
|
||||
v1.9
|
||||
****
|
||||
|
||||
Full Documentation
|
||||
------------------
|
||||
|
||||
:doc:`docker_remote_api_v1.9`
|
||||
|
||||
What's new
|
||||
----------
|
||||
|
||||
.. http:post:: /build
|
||||
|
||||
**New!** This endpoint now takes a serialized ConfigFile which it uses to
|
||||
resolve the proper registry auth credentials for pulling the base image.
|
||||
Clients which previously implemented the version accepting an AuthConfig
|
||||
object must be updated.
|
||||
|
||||
v1.8
|
||||
****
|
||||
|
||||
|
@ -139,7 +157,7 @@ What's new
|
|||
|
||||
[
|
||||
{
|
||||
"RepoTag": [
|
||||
"RepoTags": [
|
||||
"ubuntu:12.04",
|
||||
"ubuntu:precise",
|
||||
"ubuntu:latest"
|
||||
|
@ -150,7 +168,7 @@ What's new
|
|||
"VirtualSize": 131506275
|
||||
},
|
||||
{
|
||||
"RepoTag": [
|
||||
"RepoTags": [
|
||||
"ubuntu:12.10",
|
||||
"ubuntu:quantal"
|
||||
],
|
|
@ -643,7 +643,7 @@ List Images
|
|||
|
||||
[
|
||||
{
|
||||
"RepoTag": [
|
||||
"RepoTags": [
|
||||
"ubuntu:12.04",
|
||||
"ubuntu:precise",
|
||||
"ubuntu:latest"
|
||||
|
@ -654,7 +654,7 @@ List Images
|
|||
"VirtualSize": 131506275
|
||||
},
|
||||
{
|
||||
"RepoTag": [
|
||||
"RepoTags": [
|
||||
"ubuntu:12.10",
|
||||
"ubuntu:quantal"
|
||||
],
|
1281
docs/sources/reference/api/docker_remote_api_v1.8.rst
Normal file
1281
docs/sources/reference/api/docker_remote_api_v1.8.rst
Normal file
File diff suppressed because it is too large
Load diff
1281
docs/sources/reference/api/docker_remote_api_v1.9.rst
Normal file
1281
docs/sources/reference/api/docker_remote_api_v1.9.rst
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,12 +1,12 @@
|
|||
:title: Build Images (Dockerfile Reference)
|
||||
:title: Dockerfile Reference
|
||||
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
|
||||
:keywords: builder, docker, Dockerfile, automation, image creation
|
||||
|
||||
.. _dockerbuilder:
|
||||
|
||||
===================================
|
||||
Build Images (Dockerfile Reference)
|
||||
===================================
|
||||
====================
|
||||
Dockerfile Reference
|
||||
====================
|
||||
|
||||
**Docker can act as a builder** and read instructions from a text
|
||||
``Dockerfile`` to automate the steps you would otherwise take manually
|
||||
|
@ -40,9 +40,31 @@ build succeeds:
|
|||
``sudo docker build -t shykes/myapp .``
|
||||
|
||||
The Docker daemon will run your steps one-by-one, committing the
|
||||
result if necessary, before finally outputting the ID of your new
|
||||
image. The Docker daemon will automatically clean up the context you
|
||||
sent.
|
||||
result to a new image if necessary, before finally outputting the
|
||||
ID of your new image. The Docker daemon will automatically clean
|
||||
up the context you sent.
|
||||
|
||||
Note that each instruction is run independently, and causes a new image
|
||||
to be created - so ``RUN cd /tmp`` will not have any effect on the next
|
||||
instructions.
|
||||
|
||||
Whenever possible, Docker will re-use the intermediate images,
|
||||
accelerating ``docker build`` significantly (indicated by ``Using cache``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ docker build -t SvenDowideit/ambassador .
|
||||
Uploading context 10.24 kB
|
||||
Uploading context
|
||||
Step 1 : FROM docker-ut
|
||||
---> cbba202fe96b
|
||||
Step 2 : MAINTAINER SvenDowideit@home.org.au
|
||||
---> Using cache
|
||||
---> 51182097be13
|
||||
Step 3 : CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top
|
||||
---> Using cache
|
||||
---> 1a5ffc17324d
|
||||
Successfully built 1a5ffc17324d
|
||||
|
||||
When you're done with your build, you're ready to look into
|
||||
:ref:`image_push`.
|
||||
|
@ -125,17 +147,23 @@ the generated images.
|
|||
3.3 RUN
|
||||
-------
|
||||
|
||||
``RUN <command>``
|
||||
RUN has 2 forms:
|
||||
|
||||
The ``RUN`` instruction will execute any commands on the current image
|
||||
and commit the results. The resulting committed image will be used for
|
||||
the next step in the Dockerfile.
|
||||
* ``RUN <command>`` (the command is run in a shell - ``/bin/sh -c``)
|
||||
* ``RUN ["executable", "param1", "param2"]`` (*exec* form)
|
||||
|
||||
The ``RUN`` instruction will execute any commands in a new layer on top
|
||||
of the current image and commit the results. The resulting committed image
|
||||
will be used for the next step in the Dockerfile.
|
||||
|
||||
Layering ``RUN`` instructions and generating commits conforms to the
|
||||
core concepts of Docker where commits are cheap and containers can be
|
||||
created from any point in an image's history, much like source
|
||||
control.
|
||||
|
||||
The *exec* form makes it possible to avoid shell string munging, and to ``RUN``
|
||||
commands using a base image that does not contain ``/bin/sh``.
|
||||
|
||||
Known Issues (RUN)
|
||||
..................
|
||||
|
||||
|
@ -374,6 +402,64 @@ the image.
|
|||
The ``WORKDIR`` instruction sets the working directory in which
|
||||
the command given by ``CMD`` is executed.
|
||||
|
||||
3.11 ONBUILD
|
||||
------------
|
||||
|
||||
``ONBUILD [INSTRUCTION]``
|
||||
|
||||
The ``ONBUILD`` instruction adds to the image a "trigger" instruction to be
|
||||
executed at a later time, when the image is used as the base for another build.
|
||||
The trigger will be executed in the context of the downstream build, as if it
|
||||
had been inserted immediately after the *FROM* instruction in the downstream
|
||||
Dockerfile.
|
||||
|
||||
Any build instruction can be registered as a trigger.
|
||||
|
||||
This is useful if you are building an image which will be used as a base to build
|
||||
other images, for example an application build environment or a daemon which may be
|
||||
customized with user-specific configuration.
|
||||
|
||||
For example, if your image is a reusable python application builder, it will require
|
||||
application source code to be added in a particular directory, and it might require
|
||||
a build script to be called *after* that. You can't just call *ADD* and *RUN* now,
|
||||
because you don't yet have access to the application source code, and it will be
|
||||
different for each application build. You could simply provide application developers
|
||||
with a boilerplate Dockerfile to copy-paste into their application, but that is
|
||||
inefficient, error-prone and difficult to update because it mixes with
|
||||
application-specific code.
|
||||
|
||||
The solution is to use *ONBUILD* to register in advance instructions to run later,
|
||||
during the next build stage.
|
||||
|
||||
Here's how it works:
|
||||
|
||||
1. When it encounters an *ONBUILD* instruction, the builder adds a trigger to
|
||||
the metadata of the image being built.
|
||||
The instruction does not otherwise affect the current build.
|
||||
|
||||
2. At the end of the build, a list of all triggers is stored in the image manifest,
|
||||
under the key *OnBuild*. They can be inspected with *docker inspect*.
|
||||
|
||||
3. Later the image may be used as a base for a new build, using the *FROM* instruction.
|
||||
As part of processing the *FROM* instruction, the downstream builder looks for *ONBUILD*
|
||||
triggers, and executes them in the same order they were registered. If any of the
|
||||
triggers fail, the *FROM* instruction is aborted which in turn causes the build
|
||||
to fail. If all triggers succeed, the FROM instruction completes and the build
|
||||
continues as usual.
|
||||
|
||||
4. Triggers are cleared from the final image after being executed. In other words
|
||||
they are not inherited by "grand-children" builds.
|
||||
|
||||
For example you might add something like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
[...]
|
||||
ONBUILD ADD . /app/src
|
||||
ONBUILD RUN /usr/local/bin/python-build --dir /app/src
|
||||
[...]
|
||||
|
||||
|
||||
.. _dockerfile_examples:
|
||||
|
||||
4. Dockerfile Examples
|
|
@ -18,6 +18,45 @@ To list available commands, either run ``docker`` with no parameters or execute
|
|||
|
||||
...
|
||||
|
||||
.. _cli_options:
|
||||
|
||||
Types of Options
|
||||
----------------
|
||||
|
||||
Boolean
|
||||
~~~~~~~
|
||||
|
||||
Boolean options look like ``-d=false``. The value you see is the
|
||||
default value which gets set if you do **not** use the boolean
|
||||
flag. If you do call ``run -d``, that sets the opposite boolean value,
|
||||
so in this case, ``true``, and so ``docker run -d`` **will** run in
|
||||
"detached" mode, in the background. Other boolean options are similar
|
||||
-- specifying them will set the value to the opposite of the default
|
||||
value.
|
||||
|
||||
Multi
|
||||
~~~~~
|
||||
|
||||
Options like ``-a=[]`` indicate they can be specified multiple times::
|
||||
|
||||
docker run -a stdin -a stdout -a stderr -i -t ubuntu /bin/bash
|
||||
|
||||
Sometimes this can use a more complex value string, as for ``-v``::
|
||||
|
||||
docker run -v /host:/container example/mysql
|
||||
|
||||
Strings and Integers
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Options like ``-name=""`` expect a string, and they can only be
|
||||
specified once. Options like ``-c=0`` expect an integer, and they can
|
||||
only be specified once.
|
||||
|
||||
----
|
||||
|
||||
Commands
|
||||
--------
|
||||
|
||||
.. _cli_daemon:
|
||||
|
||||
``daemon``
|
||||
|
@ -26,22 +65,22 @@ To list available commands, either run ``docker`` with no parameters or execute
|
|||
::
|
||||
|
||||
Usage of docker:
|
||||
-D=false: Enable debug mode
|
||||
-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use. When host=[0.0.0.0], port=[4243] or path=[/var/run/docker.sock] is omitted, default values are used.
|
||||
-api-enable-cors=false: Enable CORS headers in the remote API
|
||||
-b="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
|
||||
-bip="": Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of -b
|
||||
-d=false: Enable daemon mode
|
||||
-dns="": Force docker to use specific DNS servers
|
||||
-g="/var/lib/docker": Path to use as the root of the docker runtime
|
||||
-icc=true: Enable inter-container communication
|
||||
-ip="0.0.0.0": Default IP address to use when binding container ports
|
||||
-iptables=true: Disable docker's addition of iptables rules
|
||||
-mtu=1500: Set the containers network mtu
|
||||
-p="/var/run/docker.pid": Path to use for daemon PID file
|
||||
-r=true: Restart previously running containers
|
||||
-s="": Force the docker runtime to use a specific storage driver
|
||||
-v=false: Print version information and quit
|
||||
-D, --debug=false: Enable debug mode
|
||||
-H, --host=[]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise. systemd socket activation can be used with fd://[socketfd].
|
||||
--api-enable-cors=false: Enable CORS headers in the remote API
|
||||
-b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
|
||||
--bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b
|
||||
-d, --daemon=false: Enable daemon mode
|
||||
--dns=[]: Force docker to use specific DNS servers
|
||||
-g, --graph="/var/lib/docker": Path to use as the root of the docker runtime
|
||||
--icc=true: Enable inter-container communication
|
||||
--ip="0.0.0.0": Default IP address to use when binding container ports
|
||||
--iptables=true: Disable docker's addition of iptables rules
|
||||
-p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file
|
||||
-r, --restart=true: Restart previously running containers
|
||||
-s, --storage-driver="": Force the docker runtime to use a specific storage driver
|
||||
-v, --version=false: Print version information and quit
|
||||
-mtu, --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available
|
||||
|
||||
The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the
|
||||
daemon and client. To run the daemon you provide the ``-d`` flag.
|
||||
|
@ -64,6 +103,11 @@ the ``-H`` flag for the client.
|
|||
# both are equal
|
||||
|
||||
|
||||
To run the daemon with `systemd socket activation <http://0pointer.de/blog/projects/socket-activation.html>`_, use ``docker -d -H fd://``.
|
||||
Using ``fd://`` will work perfectly for most setups but you can also specify individual sockets too ``docker -d -H fd://3``.
|
||||
If the specified socket activated files aren't found then docker will exit.
|
||||
You can find examples of using systemd socket activation with docker and systemd in the `docker source tree <https://github.com/dotcloud/docker/blob/master/contrib/init/systemd/socket-activation/>`_.
|
||||
|
||||
.. _cli_attach:
|
||||
|
||||
``attach``
|
||||
|
@ -75,8 +119,8 @@ the ``-H`` flag for the client.
|
|||
|
||||
Attach to a running container.
|
||||
|
||||
-nostdin=false: Do not attach stdin
|
||||
-sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
|
||||
--no-stdin=false: Do not attach stdin
|
||||
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
|
||||
|
||||
You can detach from the container again (and leave it running) with
|
||||
``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
|
||||
|
@ -135,11 +179,11 @@ Examples:
|
|||
|
||||
Usage: docker build [OPTIONS] PATH | URL | -
|
||||
Build a new container image from the source code at PATH
|
||||
-t="": Repository name (and optionally a tag) to be applied
|
||||
-t, --time="": Repository name (and optionally a tag) to be applied
|
||||
to the resulting image in case of success.
|
||||
-q=false: Suppress verbose build output.
|
||||
-no-cache: Do not use the cache when building the image.
|
||||
-rm: Remove intermediate containers after a successful build
|
||||
-q, --quiet=false: Suppress verbose build output.
|
||||
--no-cache: Do not use the cache when building the image.
|
||||
--rm: Remove intermediate containers after a successful build
|
||||
|
||||
The files at ``PATH`` or ``URL`` are called the "context" of the build. The
|
||||
build process may refer to any of the files in the context, for example when
|
||||
|
@ -233,9 +277,9 @@ by using the ``git://`` schema.
|
|||
|
||||
Create a new image from a container's changes
|
||||
|
||||
-m="": Commit message
|
||||
-author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
|
||||
-run="": Configuration to be applied when the image is launched with `docker run`.
|
||||
-m, --message="": Commit message
|
||||
-a, --author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
|
||||
--run="": Configuration to be applied when the image is launched with `docker run`.
|
||||
(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
|
||||
|
||||
.. _cli_commit_examples:
|
||||
|
@ -279,7 +323,7 @@ run ``ls /etc``.
|
|||
Full -run example
|
||||
.................
|
||||
|
||||
The ``-run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
|
||||
The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
|
||||
or ``config`` when running ``docker inspect IMAGEID``.
|
||||
|
||||
(Multiline is okay within a single quote ``'``)
|
||||
|
@ -379,7 +423,7 @@ For example:
|
|||
|
||||
Get real time events from the server
|
||||
|
||||
-since="": Show previously created events and then stream.
|
||||
--since="": Show previously created events and then stream.
|
||||
(either seconds since epoch, or date string as below)
|
||||
|
||||
.. _cli_events_example:
|
||||
|
@ -459,8 +503,8 @@ For example:
|
|||
|
||||
Show the history of an image
|
||||
|
||||
-notrunc=false: Don't truncate output
|
||||
-q=false: only show numeric IDs
|
||||
--no-trunc=false: Don't truncate output
|
||||
-q, --quiet=false: only show numeric IDs
|
||||
|
||||
To see how the ``docker:latest`` image was built:
|
||||
|
||||
|
@ -507,11 +551,11 @@ To see how the ``docker:latest`` image was built:
|
|||
|
||||
List images
|
||||
|
||||
-a=false: show all images (by default filter out the intermediate images used to build)
|
||||
-notrunc=false: Don't truncate output
|
||||
-q=false: only show numeric IDs
|
||||
-tree=false: output graph in tree format
|
||||
-viz=false: output graph in graphviz format
|
||||
-a, --all=false: show all images (by default filter out the intermediate images used to build)
|
||||
--no-trunc=false: Don't truncate output
|
||||
-q, --quiet=false: only show numeric IDs
|
||||
--tree=false: output graph in tree format
|
||||
--viz=false: output graph in graphviz format
|
||||
|
||||
Listing the most recently created images
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -535,7 +579,7 @@ Listing the full length image IDs
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images -notrunc | head
|
||||
$ sudo docker images --no-trunc | head
|
||||
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
|
||||
<none> <none> 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB
|
||||
committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB
|
||||
|
@ -552,7 +596,7 @@ Displaying images visually
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images -viz | dot -Tpng -o docker.png
|
||||
$ sudo docker images --viz | dot -Tpng -o docker.png
|
||||
|
||||
.. image:: docker_images.gif
|
||||
:alt: Example inheritance graph of Docker images.
|
||||
|
@ -563,7 +607,7 @@ Displaying image hierarchy
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker images -tree
|
||||
$ sudo docker images --tree
|
||||
|
||||
├─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
|
||||
└─27cf78414709 Size: 180.1 MB (virtual 180.1 MB)
|
||||
|
@ -702,7 +746,7 @@ Insert file from GitHub
|
|||
|
||||
Return low-level information on a container/image
|
||||
|
||||
-format="": Format the output using the given go template.
|
||||
-f, --format="": Format the output using the given go template.
|
||||
|
||||
By default, this will render all results in a JSON array. If a format
|
||||
is specified, the given template will be executed for each result.
|
||||
|
@ -721,7 +765,7 @@ fairly straightforward manner.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID
|
||||
$ sudo docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID
|
||||
|
||||
List All Port Bindings
|
||||
......................
|
||||
|
@ -755,17 +799,21 @@ we ask for the ``HostPort`` field to get the public address.
|
|||
|
||||
::
|
||||
|
||||
Usage: docker kill CONTAINER [CONTAINER...]
|
||||
Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...]
|
||||
|
||||
Kill a running container (Send SIGKILL)
|
||||
Kill a running container (send SIGKILL, or specified signal)
|
||||
|
||||
The main process inside the container will be sent SIGKILL.
|
||||
-s, --signal="KILL": Signal to send to the container
|
||||
|
||||
The main process inside the container will be sent SIGKILL, or any signal specified with option ``--signal``.
|
||||
|
||||
Known Issues (kill)
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* :issue:`197` indicates that ``docker kill`` may leave directories
|
||||
behind and make it difficult to remove the container.
|
||||
* :issue:`3844` lxc 1.0.0 beta3 removed ``lcx-kill`` which is used by Docker versions before 0.8.0;
|
||||
see the issue for a workaround.
|
||||
|
||||
.. _cli_load:
|
||||
|
||||
|
@ -790,9 +838,9 @@ Known Issues (kill)
|
|||
|
||||
Register or Login to the docker registry server
|
||||
|
||||
-e="": email
|
||||
-p="": password
|
||||
-u="": username
|
||||
-e, --email="": email
|
||||
-p, --password="": password
|
||||
-u, --username="": username
|
||||
|
||||
If you want to login to a private registry you can
|
||||
specify this by adding the server name.
|
||||
|
@ -812,12 +860,14 @@ Known Issues (kill)
|
|||
|
||||
Fetch the logs of a container
|
||||
|
||||
-f, --follow=false: Follow log output
|
||||
|
||||
The ``docker logs`` command is a convenience which batch-retrieves whatever
|
||||
logs are present at the time of execution. This does not guarantee execution
|
||||
order when combined with a ``docker run`` (i.e. your run may not have generated
|
||||
any logs at the time you execute ``docker logs``).
|
||||
|
||||
The ``docker logs -f`` command combines ``docker logs`` and ``docker attach``:
|
||||
The ``docker logs --follow`` command combines ``docker logs`` and ``docker attach``:
|
||||
it will first return all logs from the beginning and then continue streaming
|
||||
new output from the container's stdout and stderr.
|
||||
|
||||
|
@ -845,9 +895,9 @@ new output from the container's stdout and stderr.
|
|||
|
||||
List containers
|
||||
|
||||
-a=false: Show all containers. Only running containers are shown by default.
|
||||
-notrunc=false: Don't truncate output
|
||||
-q=false: Only display numeric IDs
|
||||
-a, --all=false: Show all containers. Only running containers are shown by default.
|
||||
--no-trunc=false: Don't truncate output
|
||||
-q, --quiet=false: Only display numeric IDs
|
||||
|
||||
Running ``docker ps`` showing 2 linked containers.
|
||||
|
||||
|
@ -856,7 +906,10 @@ Running ``docker ps`` showing 2 linked containers.
|
|||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
|
||||
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
|
||||
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
|
||||
fd2645e2e2b5 busybox:latest top 10 days ago Ghost insane_ptolemy
|
||||
|
||||
The last container is marked as a ``Ghost`` container. It is a container that was running when the docker daemon was restarted (upgraded, or ``-H`` settings changed). The container is still running, but as this docker daemon process is not able to manage it, you can't attach to it. To bring them out of ``Ghost`` Status, you need to use ``docker kill`` or ``docker restart``.
|
||||
|
||||
.. _cli_pull:
|
||||
|
||||
|
@ -903,7 +956,7 @@ Running ``docker ps`` showing 2 linked containers.
|
|||
Usage: docker rm [OPTIONS] CONTAINER
|
||||
|
||||
Remove one or more containers
|
||||
-link="": Remove the link instead of the actual container
|
||||
--link="": Remove the link instead of the actual container
|
||||
|
||||
Known Issues (rm)
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
@ -926,7 +979,7 @@ This will remove the container referenced under the link ``/redis``.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker rm -link /webapp/redis
|
||||
$ sudo docker rm --link /webapp/redis
|
||||
/webapp/redis
|
||||
|
||||
|
||||
|
@ -996,31 +1049,31 @@ image is removed.
|
|||
|
||||
Run a command in a new container
|
||||
|
||||
-a=map[]: Attach to stdin, stdout or stderr
|
||||
-c=0: CPU shares (relative weight)
|
||||
-cidfile="": Write the container ID to the file
|
||||
-d=false: Detached mode: Run container in the background, print new container id
|
||||
-e=[]: Set environment variables
|
||||
-h="": Container host name
|
||||
-i=false: Keep stdin open even if not attached
|
||||
-privileged=false: Give extended privileges to this container
|
||||
-m="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
|
||||
-n=true: Enable networking for this container
|
||||
-p=[]: Map a network port to the container
|
||||
-rm=false: Automatically remove the container when it exits (incompatible with -d)
|
||||
-t=false: Allocate a pseudo-tty
|
||||
-u="": Username or UID
|
||||
-dns=[]: Set custom dns servers for the container
|
||||
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume.
|
||||
-volumes-from="": Mount all volumes from the given container(s)
|
||||
-entrypoint="": Overwrite the default entrypoint set by the image
|
||||
-w="": Working directory inside the container
|
||||
-lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
|
||||
-sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
|
||||
-expose=[]: Expose a port from the container without publishing it to your host
|
||||
-link="": Add link to another container (name:alias)
|
||||
-name="": Assign the specified name to the container. If no name is specific docker will generate a random name
|
||||
-P=false: Publish all exposed ports to the host interfaces
|
||||
-a, --attach=map[]: Attach to stdin, stdout or stderr
|
||||
-c, --cpu-shares=0: CPU shares (relative weight)
|
||||
--cidfile="": Write the container ID to the file
|
||||
-d, --detach=false: Detached mode: Run container in the background, print new container id
|
||||
-e, --env=[]: Set environment variables
|
||||
-h, --host="": Container host name
|
||||
-i, --interactive=false: Keep stdin open even if not attached
|
||||
--privileged=false: Give extended privileges to this container
|
||||
-m, --memory="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
|
||||
-n, --networking=true: Enable networking for this container
|
||||
-p, --publish=[]: Map a network port to the container
|
||||
--rm=false: Automatically remove the container when it exits (incompatible with -d)
|
||||
-t, --tty=false: Allocate a pseudo-tty
|
||||
-u, --user="": Username or UID
|
||||
--dns=[]: Set custom dns servers for the container
|
||||
-v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume.
|
||||
--volumes-from="": Mount all volumes from the given container(s)
|
||||
--entrypoint="": Overwrite the default entrypoint set by the image
|
||||
-w, --workdir="": Working directory inside the container
|
||||
--lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
|
||||
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
|
||||
--expose=[]: Expose a port from the container without publishing it to your host
|
||||
--link="": Add link to another container (name:alias)
|
||||
--name="": Assign the specified name to the container. If no name is specific docker will generate a random name
|
||||
-P, --publish-all=false: Publish all exposed ports to the host interfaces
|
||||
|
||||
The ``docker run`` command first ``creates`` a writeable container layer over
|
||||
the specified image, and then ``starts`` it using the specified command. That
|
||||
|
@ -1042,7 +1095,7 @@ Examples:
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||
$ sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||
|
||||
This will create a container and print ``test`` to the console. The
|
||||
``cidfile`` flag makes Docker attempt to create a new file and write the
|
||||
|
@ -1051,7 +1104,7 @@ error. Docker will close this file when ``docker run`` exits.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -t -i -rm ubuntu bash
|
||||
$ sudo docker run -t -i --rm ubuntu bash
|
||||
root@bc338942ef20:/# mount -t tmpfs none /mnt
|
||||
mount: permission denied
|
||||
|
||||
|
@ -1063,7 +1116,7 @@ allow it to run:
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -privileged ubuntu bash
|
||||
$ sudo docker run --privileged ubuntu bash
|
||||
root@50e3f57e16e6:/# mount -t tmpfs none /mnt
|
||||
root@50e3f57e16e6:/# df -h
|
||||
Filesystem Size Used Avail Use% Mounted on
|
||||
|
@ -1096,7 +1149,24 @@ using the container, but inside the current working directory.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash
|
||||
$ sudo docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash
|
||||
|
||||
When the host directory of a bind-mounted volume doesn't exist, Docker
|
||||
will automatically create this directory on the host for you. In the
|
||||
example above, Docker will create the ``/doesnt/exist`` folder before
|
||||
starting your container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh
|
||||
|
||||
By bind-mounting the docker unix socket and statically linked docker binary
|
||||
(such as that provided by https://get.docker.io), you give the container
|
||||
the full access to create and manipulate the host's docker daemon.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash
|
||||
|
||||
This binds port ``8080`` of the container to port ``80`` on ``127.0.0.1`` of the
|
||||
host machine. :ref:`port_redirection` explains in detail how to manipulate ports
|
||||
|
@ -1104,7 +1174,7 @@ in Docker.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -expose 80 ubuntu bash
|
||||
$ sudo docker run --expose 80 ubuntu bash
|
||||
|
||||
This exposes port ``80`` of the container for use within a link without
|
||||
publishing the port to the host system's interfaces. :ref:`port_redirection`
|
||||
|
@ -1112,28 +1182,28 @@ explains in detail how to manipulate ports in Docker.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -name console -t -i ubuntu bash
|
||||
$ sudo docker run --name console -t -i ubuntu bash
|
||||
|
||||
This will create and run a new container with the container name
|
||||
being ``console``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -link /redis:redis -name console ubuntu bash
|
||||
$ sudo docker run --link /redis:redis --name console ubuntu bash
|
||||
|
||||
The ``-link`` flag will link the container named ``/redis`` into the
|
||||
The ``--link`` flag will link the container named ``/redis`` into the
|
||||
newly created container with the alias ``redis``. The new container
|
||||
can access the network and environment of the redis container via
|
||||
environment variables. The ``-name`` flag will assign the name ``console``
|
||||
environment variables. The ``--name`` flag will assign the name ``console``
|
||||
to the newly created container.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
|
||||
$ sudo docker run --volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
|
||||
|
||||
The ``-volumes-from`` flag mounts all the defined volumes from the
|
||||
The ``--volumes-from`` flag mounts all the defined volumes from the
|
||||
referenced containers. Containers can be specified by a comma seperated
|
||||
list or by repetitions of the ``-volumes-from`` argument. The container
|
||||
list or by repetitions of the ``--volumes-from`` argument. The container
|
||||
ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
|
||||
read-only or read-write mode, respectively. By default, the volumes are mounted
|
||||
in the same mode (read write or read only) as the reference container.
|
||||
|
@ -1143,11 +1213,11 @@ A complete example
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -d -name static static-web-files sh
|
||||
$ sudo docker run -d -expose=8098 -name riak riakserver
|
||||
$ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro -name app appserver
|
||||
$ sudo docker run -d -p 1443:443 -dns=dns.dev.org -v /var/log/httpd -volumes-from static -link riak -link app -h www.sven.dev.org -name web webserver
|
||||
$ sudo docker run -t -i -rm -volumes-from web -w /var/log/httpd busybox tail -f access.log
|
||||
$ sudo docker run -d --name static static-web-files sh
|
||||
$ sudo docker run -d --expose=8098 --name riak riakserver
|
||||
$ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver
|
||||
$ sudo docker run -d -p 1443:443 --dns=dns.dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
|
||||
$ sudo docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log
|
||||
|
||||
This example shows 5 containers that might be set up to test a web application change:
|
||||
|
||||
|
@ -1181,9 +1251,9 @@ This example shows 5 containers that might be set up to test a web application c
|
|||
|
||||
Search the docker index for images
|
||||
|
||||
-notrunc=false: Don't truncate output
|
||||
-stars=0: Only displays with at least xxx stars
|
||||
-trusted=false: Only show trusted builds
|
||||
--no-trunc=false: Don't truncate output
|
||||
-s, --stars=0: Only displays with at least xxx stars
|
||||
-t, --trusted=false: Only show trusted builds
|
||||
|
||||
.. _cli_start:
|
||||
|
||||
|
@ -1196,8 +1266,8 @@ This example shows 5 containers that might be set up to test a web application c
|
|||
|
||||
Start a stopped container
|
||||
|
||||
-a=false: Attach container's stdout/stderr and forward all signals to the process
|
||||
-i=false: Attach container's stdin
|
||||
-a, --attach=false: Attach container's stdout/stderr and forward all signals to the process
|
||||
-i, --interactive=false: Attach container's stdin
|
||||
|
||||
.. _cli_stop:
|
||||
|
||||
|
@ -1210,7 +1280,7 @@ This example shows 5 containers that might be set up to test a web application c
|
|||
|
||||
Stop a running container (Send SIGTERM, and then SIGKILL after grace period)
|
||||
|
||||
-t=10: Number of seconds to wait for the container to stop before killing it.
|
||||
-t, --time=10: Number of seconds to wait for the container to stop before killing it.
|
||||
|
||||
The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL
|
||||
|
||||
|
@ -1225,7 +1295,7 @@ The main process inside the container will receive SIGTERM, and after a grace pe
|
|||
|
||||
Tag an image into a repository
|
||||
|
||||
-f=false: Force
|
||||
-f, --force=false: Force
|
||||
|
||||
.. _cli_top:
|
||||
|
Before ![]() (image error) Size: 35 KiB After ![]() (image error) Size: 35 KiB ![]() ![]() |
18
docs/sources/reference/index.rst
Normal file
18
docs/sources/reference/index.rst
Normal file
|
@ -0,0 +1,18 @@
|
|||
:title: Docker Reference Manual
|
||||
:description: References
|
||||
:keywords: docker, references, api, command line, commands
|
||||
|
||||
.. _references:
|
||||
|
||||
Reference Manual
|
||||
================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
commandline/index
|
||||
builder
|
||||
run
|
||||
api/index
|
419
docs/sources/reference/run.rst
Normal file
419
docs/sources/reference/run.rst
Normal file
|
@ -0,0 +1,419 @@
|
|||
:title: Docker Run Reference
|
||||
:description: Configure containers at runtime
|
||||
:keywords: docker, run, configure, runtime
|
||||
|
||||
.. _run_docker:
|
||||
|
||||
====================
|
||||
Docker Run Reference
|
||||
====================
|
||||
|
||||
**Docker runs processes in isolated containers**. When an operator
|
||||
executes ``docker run``, she starts a process with its own file
|
||||
system, its own networking, and its own isolated process tree. The
|
||||
:ref:`image_def` which starts the process may define defaults related
|
||||
to the binary to run, the networking to expose, and more, but ``docker
|
||||
run`` gives final control to the operator who starts the container
|
||||
from the image. That's the main reason :ref:`cli_run` has more options
|
||||
than any other ``docker`` command.
|
||||
|
||||
Every one of the :ref:`example_list` shows running containers, and so
|
||||
here we try to give more in-depth guidance.
|
||||
|
||||
.. contents:: Table of Contents
|
||||
:depth: 2
|
||||
|
||||
.. _run_running:
|
||||
|
||||
General Form
|
||||
============
|
||||
|
||||
As you've seen in the :ref:`example_list`, the basic `run` command
|
||||
takes this form::
|
||||
|
||||
docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...]
|
||||
|
||||
To learn how to interpret the types of ``[OPTIONS]``, see
|
||||
:ref:`cli_options`.
|
||||
|
||||
The list of ``[OPTIONS]`` breaks down into two groups:
|
||||
|
||||
1. Settings exclusive to operators, including:
|
||||
|
||||
* Detached or Foreground running,
|
||||
* Container Identification,
|
||||
* Network settings, and
|
||||
* Runtime Constraints on CPU and Memory
|
||||
* Privileges and LXC Configuration
|
||||
|
||||
2. Setting shared between operators and developers, where operators
|
||||
can override defaults developers set in images at build time.
|
||||
|
||||
Together, the ``docker run [OPTIONS]`` give complete control over
|
||||
runtime behavior to the operator, allowing them to override all
|
||||
defaults set by the developer during ``docker build`` and nearly all
|
||||
the defaults set by the Docker runtime itself.
|
||||
|
||||
Operator Exclusive Options
|
||||
==========================
|
||||
|
||||
Only the operator (the person executing ``docker run``) can set the
|
||||
following options.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
Detached vs Foreground
|
||||
----------------------
|
||||
|
||||
When starting a Docker container, you must first decide if you want to
|
||||
run the container in the background in a "detached" mode or in the
|
||||
default foreground mode::
|
||||
|
||||
-d=false: Detached mode: Run container in the background, print new container id
|
||||
|
||||
Detached (-d)
|
||||
.............
|
||||
|
||||
In detached mode (``-d=true`` or just ``-d``), all I/O should be done
|
||||
through network connections or shared volumes because the container is
|
||||
no longer listening to the commandline where you executed ``docker
|
||||
run``. You can reattach to a detached container with ``docker``
|
||||
:ref:`cli_attach`. If you choose to run a container in the detached
|
||||
mode, then you cannot use the ``-rm`` option.
|
||||
|
||||
Foreground
|
||||
..........
|
||||
|
||||
In foreground mode (the default when ``-d`` is not specified),
|
||||
``docker run`` can start the process in the container and attach the
|
||||
console to the process's standard input, output, and standard
|
||||
error. It can even pretend to be a TTY (this is what most commandline
|
||||
executables expect) and pass along signals. All of that is
|
||||
configurable::
|
||||
|
||||
-a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr``
|
||||
-t=false : Allocate a pseudo-tty
|
||||
-sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
|
||||
-i=false : Keep STDIN open even if not attached
|
||||
|
||||
If you do not specify ``-a`` then Docker will `attach everything
|
||||
(stdin,stdout,stderr)
|
||||
<https://github.com/dotcloud/docker/blob/75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797>`_. You
|
||||
can specify to which of the three standard streams (``stdin``, ``stdout``,
|
||||
``stderr``) you'd like to connect instead, as in::
|
||||
|
||||
docker run -a stdin -a stdout -i -t ubuntu /bin/bash
|
||||
|
||||
For interactive processes (like a shell) you will typically want a tty
|
||||
as well as persistent standard input (``stdin``), so you'll use ``-i
|
||||
-t`` together in most interactive cases.
|
||||
|
||||
Container Identification
|
||||
------------------------
|
||||
|
||||
Name (-name)
|
||||
............
|
||||
|
||||
The operator can identify a container in three ways:
|
||||
|
||||
* UUID long identifier ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778")
|
||||
* UUID short identifier ("f78375b1c487")
|
||||
* Name ("evil_ptolemy")
|
||||
|
||||
The UUID identifiers come from the Docker daemon, and if you do not
|
||||
assign a name to the container with ``-name`` then the daemon will
|
||||
also generate a random string name too. The name can become a handy
|
||||
way to add meaning to a container since you can use this name when
|
||||
defining :ref:`links <working_with_links_names>` (or any other place
|
||||
you need to identify a container). This works for both background and
|
||||
foreground Docker containers.
|
||||
|
||||
PID Equivalent
|
||||
..............
|
||||
|
||||
And finally, to help with automation, you can have Docker write the
|
||||
container ID out to a file of your choosing. This is similar to how
|
||||
some programs might write out their process ID to a file (you've seen
|
||||
them as PID files)::
|
||||
|
||||
-cidfile="": Write the container ID to the file
|
||||
|
||||
Network Settings
|
||||
----------------
|
||||
|
||||
::
|
||||
-n=true : Enable networking for this container
|
||||
-dns=[] : Set custom dns servers for the container
|
||||
|
||||
By default, all containers have networking enabled and they can make
|
||||
any outgoing connections. The operator can completely disable
|
||||
networking with ``docker run -n`` which disables all incoming and outgoing
|
||||
networking. In cases like this, you would perform I/O through files or
|
||||
STDIN/STDOUT only.
|
||||
|
||||
Your container will use the same DNS servers as the host by default,
|
||||
but you can override this with ``-dns``.
|
||||
|
||||
Clean Up (-rm)
|
||||
--------------
|
||||
|
||||
By default a container's file system persists even after the container
|
||||
exits. This makes debugging a lot easier (since you can inspect the
|
||||
final state) and you retain all your data by default. But if you are
|
||||
running short-term **foreground** processes, these container file
|
||||
systems can really pile up. If instead you'd like Docker to
|
||||
**automatically clean up the container and remove the file system when
|
||||
the container exits**, you can add the ``-rm`` flag::
|
||||
|
||||
-rm=false: Automatically remove the container when it exits (incompatible with -d)
|
||||
|
||||
|
||||
Runtime Constraints on CPU and Memory
|
||||
-------------------------------------
|
||||
|
||||
The operator can also adjust the performance parameters of the container::
|
||||
|
||||
-m="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
|
||||
-c=0 : CPU shares (relative weight)
|
||||
|
||||
The operator can constrain the memory available to a container easily
|
||||
with ``docker run -m``. If the host supports swap memory, then the
|
||||
``-m`` memory setting can be larger than physical RAM.
|
||||
|
||||
Similarly the operator can increase the priority of this container
|
||||
with the ``-c`` option. By default, all containers run at the same
|
||||
priority and get the same proportion of CPU cycles, but you can tell
|
||||
the kernel to give more shares of CPU time to one or more containers
|
||||
when you start them via Docker.
|
||||
|
||||
Runtime Privilege and LXC Configuration
|
||||
---------------------------------------
|
||||
|
||||
::
|
||||
|
||||
-privileged=false: Give extended privileges to this container
|
||||
-lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
|
||||
|
||||
By default, Docker containers are "unprivileged" and cannot, for
|
||||
example, run a Docker daemon inside a Docker container. This is
|
||||
because by default a container is not allowed to access any devices,
|
||||
but a "privileged" container is given access to all devices (see
|
||||
lxc-template.go_ and documentation on `cgroups devices
|
||||
<https://www.kernel.org/doc/Documentation/cgroups/devices.txt>`_).
|
||||
|
||||
When the operator executes ``docker run -privileged``, Docker will
|
||||
enable to access to all devices on the host as well as set some
|
||||
configuration in AppArmor to allow the container nearly all the same
|
||||
access to the host as processes running outside containers on the
|
||||
host. Additional information about running with ``-privileged`` is
|
||||
available on the `Docker Blog
|
||||
<http://blog.docker.io/2013/09/docker-can-now-run-within-docker/>`_.
|
||||
|
||||
An operator can also specify LXC options using one or more
|
||||
``-lxc-conf`` parameters. These can be new parameters or override
|
||||
existing parameters from the lxc-template.go_. Note that in the
|
||||
future, a given host's Docker daemon may not use LXC, so this is an
|
||||
implementation-specific configuration meant for operators already
|
||||
familiar with using LXC directly.
|
||||
|
||||
.. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go
|
||||
|
||||
|
||||
Overriding ``Dockerfile`` Image Defaults
|
||||
========================================
|
||||
|
||||
When a developer builds an image from a :ref:`Dockerfile
|
||||
<dockerbuilder>` or when she commits it, the developer can set a
|
||||
number of default parameters that take effect when the image starts up
|
||||
as a container.
|
||||
|
||||
Four of the ``Dockerfile`` commands cannot be overridden at runtime:
|
||||
``FROM, MAINTAINER, RUN``, and ``ADD``. Everything else has a
|
||||
corresponding override in ``docker run``. We'll go through what the
|
||||
developer might have set in each ``Dockerfile`` instruction and how the
|
||||
operator can override that setting.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
CMD (Default Command or Options)
|
||||
--------------------------------
|
||||
|
||||
Recall the optional ``COMMAND`` in the Docker commandline::
|
||||
|
||||
docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...]
|
||||
|
||||
This command is optional because the person who created the ``IMAGE``
|
||||
may have already provided a default ``COMMAND`` using the ``Dockerfile``
|
||||
``CMD``. As the operator (the person running a container from the
|
||||
image), you can override that ``CMD`` just by specifying a new
|
||||
``COMMAND``.
|
||||
|
||||
If the image also specifies an ``ENTRYPOINT`` then the ``CMD`` or
|
||||
``COMMAND`` get appended as arguments to the ``ENTRYPOINT``.
|
||||
|
||||
|
||||
ENTRYPOINT (Default Command to Execute at Runtime
|
||||
-------------------------------------------------
|
||||
|
||||
::
|
||||
|
||||
-entrypoint="": Overwrite the default entrypoint set by the image
|
||||
|
||||
The ENTRYPOINT of an image is similar to a ``COMMAND`` because it
|
||||
specifies what executable to run when the container starts, but it is
|
||||
(purposely) more difficult to override. The ``ENTRYPOINT`` gives a
|
||||
container its default nature or behavior, so that when you set an
|
||||
``ENTRYPOINT`` you can run the container *as if it were that binary*,
|
||||
complete with default options, and you can pass in more options via
|
||||
the ``COMMAND``. But, sometimes an operator may want to run something else
|
||||
inside the container, so you can override the default ``ENTRYPOINT`` at
|
||||
runtime by using a string to specify the new ``ENTRYPOINT``. Here is an
|
||||
example of how to run a shell in a container that has been set up to
|
||||
automatically run something else (like ``/usr/bin/redis-server``)::
|
||||
|
||||
docker run -i -t -entrypoint /bin/bash example/redis
|
||||
|
||||
or two examples of how to pass more parameters to that ENTRYPOINT::
|
||||
|
||||
docker run -i -t -entrypoint /bin/bash example/redis -c ls -l
|
||||
docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help
|
||||
|
||||
|
||||
EXPOSE (Incoming Ports)
|
||||
-----------------------
|
||||
|
||||
The ``Dockerfile`` doesn't give much control over networking, only
|
||||
providing the ``EXPOSE`` instruction to give a hint to the operator
|
||||
about what incoming ports might provide services. The following
|
||||
options work with or override the ``Dockerfile``'s exposed defaults::
|
||||
|
||||
-expose=[]: Expose a port from the container
|
||||
without publishing it to your host
|
||||
-P=false : Publish all exposed ports to the host interfaces
|
||||
-p=[] : Publish a container's port to the host (format:
|
||||
ip:hostPort:containerPort | ip::containerPort |
|
||||
hostPort:containerPort)
|
||||
(use 'docker port' to see the actual mapping)
|
||||
-link="" : Add link to another container (name:alias)
|
||||
|
||||
As mentioned previously, ``EXPOSE`` (and ``-expose``) make a port
|
||||
available **in** a container for incoming connections. The port number
|
||||
on the inside of the container (where the service listens) does not
|
||||
need to be the same number as the port exposed on the outside of the
|
||||
container (where clients connect), so inside the container you might
|
||||
have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in
|
||||
the ``Dockerfile``), but outside the container the port might be 42800.
|
||||
|
||||
To help a new client container reach the server container's internal
|
||||
port operator ``-expose``'d by the operator or ``EXPOSE``'d by the
|
||||
developer, the operator has three choices: start the server container
|
||||
with ``-P`` or ``-p,`` or start the client container with ``-link``.
|
||||
|
||||
If the operator uses ``-P`` or ``-p`` then Docker will make the
|
||||
exposed port accessible on the host and the ports will be available to
|
||||
any client that can reach the host. To find the map between the host
|
||||
ports and the exposed ports, use ``docker port``)
|
||||
|
||||
If the operator uses ``-link`` when starting the new client container,
|
||||
then the client container can access the exposed port via a private
|
||||
networking interface. Docker will set some environment variables in
|
||||
the client container to help indicate which interface and port to use.
|
||||
|
||||
ENV (Environment Variables)
|
||||
---------------------------
|
||||
|
||||
The operator can **set any environment variable** in the container by
|
||||
using one or more ``-e`` flags, even overriding those already defined by the
|
||||
developer with a Dockefile ``ENV``::
|
||||
|
||||
$ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export
|
||||
declare -x HOME="/"
|
||||
declare -x HOSTNAME="85bc26a0e200"
|
||||
declare -x OLDPWD
|
||||
declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
declare -x PWD="/"
|
||||
declare -x SHLVL="1"
|
||||
declare -x container="lxc"
|
||||
declare -x deep="purple"
|
||||
|
||||
Similarly the operator can set the **hostname** with ``-h``.
|
||||
|
||||
``-link name:alias`` also sets environment variables, using the
|
||||
*alias* string to define environment variables within the container
|
||||
that give the IP and PORT information for connecting to the service
|
||||
container. Let's imagine we have a container running Redis::
|
||||
|
||||
# Start the service container, named redis-name
|
||||
$ docker run -d -name redis-name dockerfiles/redis
|
||||
4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3
|
||||
|
||||
# The redis-name container exposed port 6379
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
4241164edf6f dockerfiles/redis:latest /redis-stable/src/re 5 seconds ago Up 4 seconds 6379/tcp redis-name
|
||||
|
||||
# Note that there are no public ports exposed since we didn't use -p or -P
|
||||
$ docker port 4241164edf6f 6379
|
||||
2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f
|
||||
|
||||
|
||||
Yet we can get information about the Redis container's exposed ports
|
||||
with ``-link``. Choose an alias that will form a valid environment
|
||||
variable!
|
||||
|
||||
::
|
||||
|
||||
$ docker run -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c export
|
||||
declare -x HOME="/"
|
||||
declare -x HOSTNAME="acda7f7b1cdc"
|
||||
declare -x OLDPWD
|
||||
declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
declare -x PWD="/"
|
||||
declare -x REDIS_ALIAS_NAME="/distracted_wright/redis"
|
||||
declare -x REDIS_ALIAS_PORT="tcp://172.17.0.32:6379"
|
||||
declare -x REDIS_ALIAS_PORT_6379_TCP="tcp://172.17.0.32:6379"
|
||||
declare -x REDIS_ALIAS_PORT_6379_TCP_ADDR="172.17.0.32"
|
||||
declare -x REDIS_ALIAS_PORT_6379_TCP_PORT="6379"
|
||||
declare -x REDIS_ALIAS_PORT_6379_TCP_PROTO="tcp"
|
||||
declare -x SHLVL="1"
|
||||
declare -x container="lxc"
|
||||
|
||||
And we can use that information to connect from another container as a client::
|
||||
|
||||
$ docker run -i -t -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
|
||||
172.17.0.32:6379>
|
||||
|
||||
VOLUME (Shared Filesystems)
|
||||
---------------------------
|
||||
|
||||
::
|
||||
|
||||
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
|
||||
If "container-dir" is missing, then docker creates a new volume.
|
||||
-volumes-from="": Mount all volumes from the given container(s)
|
||||
|
||||
The volumes commands are complex enough to have their own
|
||||
documentation in section :ref:`volume_def`. A developer can define one
|
||||
or more ``VOLUME``\s associated with an image, but only the operator can
|
||||
give access from one container to another (or from a container to a
|
||||
volume mounted on the host).
|
||||
|
||||
USER
|
||||
----
|
||||
|
||||
The default user within a container is ``root`` (id = 0), but if the
|
||||
developer created additional users, those are accessible too. The
|
||||
developer can set a default user to run the first process with the
|
||||
``Dockerfile USER`` command, but the operator can override it ::
|
||||
|
||||
-u="": Username or UID
|
||||
|
||||
WORKDIR
|
||||
-------
|
||||
|
||||
The default working directory for running binaries within a container is the root directory (``/``), but the developer can set a different default with the ``Dockerfile WORKDIR`` command. The operator can override this with::
|
||||
|
||||
-w="": Working directory inside the container
|
||||
|
|
@ -14,8 +14,9 @@ This documentation has the following resources:
|
|||
installation/index
|
||||
use/index
|
||||
examples/index
|
||||
commandline/index
|
||||
reference/index
|
||||
contributing/index
|
||||
api/index
|
||||
terms/index
|
||||
articles/index
|
||||
faq
|
||||
|
||||
|
|
|
@ -1,26 +1,27 @@
|
|||
:title: Learn Basic Commands
|
||||
:title: First steps with Docker
|
||||
:description: Common usage and commands
|
||||
:keywords: Examples, Usage, basic commands, docker, documentation, examples
|
||||
|
||||
|
||||
Learn Basic Commands
|
||||
====================
|
||||
First steps with Docker
|
||||
=======================
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
Check your Docker install
|
||||
-------------------------
|
||||
|
||||
If you have used one of the quick install paths, Docker may have been
|
||||
installed with upstart, Ubuntu's system for starting processes at boot
|
||||
time. You should be able to run ``sudo docker help`` and get output.
|
||||
|
||||
If you get ``docker: command not found`` or something like
|
||||
``/var/lib/docker/repositories: permission denied`` you will need to
|
||||
specify the path to it and manually start it.
|
||||
This guide assumes you have a working installation of Docker. To check
|
||||
your Docker install, run the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Run docker in daemon mode
|
||||
sudo <path to>/docker -d &
|
||||
# Check that you have a working install
|
||||
docker info
|
||||
|
||||
If you get ``docker: command not found`` or something like
|
||||
``/var/lib/docker/repositories: permission denied`` you may have an incomplete
|
||||
docker installation or insufficient privileges to access Docker on your machine.
|
||||
|
||||
Please refer to :ref:`installation_list` for installation instructions.
|
||||
|
||||
Download a pre-built image
|
||||
--------------------------
|
||||
|
@ -51,42 +52,6 @@ Running an interactive shell
|
|||
# use the escape sequence Ctrl-p + Ctrl-q
|
||||
sudo docker run -i -t ubuntu /bin/bash
|
||||
|
||||
.. _dockergroup:
|
||||
|
||||
The sudo command and the docker Group
|
||||
-------------------------------------
|
||||
|
||||
The ``docker`` daemon always runs as the root user, and since Docker version
|
||||
0.5.2, the ``docker`` daemon binds to a Unix socket instead of a TCP port. By
|
||||
default that Unix socket is owned by the user *root*, and so, by default, you
|
||||
can access it with ``sudo``.
|
||||
|
||||
Starting in version 0.5.3, if you (or your Docker installer) create a
|
||||
Unix group called *docker* and add users to it, then the ``docker``
|
||||
daemon will make the ownership of the Unix socket read/writable by the
|
||||
*docker* group when the daemon starts. The ``docker`` daemon must
|
||||
always run as the root user, but if you run the ``docker`` client as a user in
|
||||
the *docker* group then you don't need to add ``sudo`` to all the
|
||||
client commands.
|
||||
|
||||
.. warning:: The *docker* group is root-equivalent.
|
||||
|
||||
**Example:**
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the docker group if it doesn't already exist.
|
||||
sudo groupadd docker
|
||||
|
||||
# Add the connected user "${USER}" to the docker group.
|
||||
# Change the user name to match your preferred user.
|
||||
# You may have to logout and log back in again for
|
||||
# this to take effect.
|
||||
sudo gpasswd -a ${USER} docker
|
||||
|
||||
# Restart the docker daemon.
|
||||
sudo service docker restart
|
||||
|
||||
.. _bind_docker:
|
||||
|
||||
Bind Docker to another host/port or a Unix socket
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue