Compare commits

..

7 Commits

Author SHA1 Message Date
Guillaume J. Charmes
38b8373434 Implement the COPY operator within the builder 2013-04-24 14:28:51 -07:00
Guillaume J. Charmes
03b5f8a585 Make sure the destination directory exists when using docker insert 2013-04-24 13:51:28 -07:00
Guillaume J. Charmes
bc260f0225 Add insert command in order to insert external files within an image 2013-04-24 13:37:00 -07:00
Guillaume J. Charmes
45dcd1125b Add a Builder.Commit method 2013-04-24 13:35:57 -07:00
Guillaume J. Charmes
d2e063d9e1 make builder.Run public it now runs only given arguments without sh -c 2013-04-24 12:31:20 -07:00
Guillaume J. Charmes
567a484b66 Clear the containers/images upon failure 2013-04-24 12:02:00 -07:00
Guillaume J. Charmes
5d4b886ad6 Add build command 2013-04-24 11:03:01 -07:00
1004 changed files with 18781 additions and 167571 deletions

14
.gitignore vendored
View File

@@ -1,6 +1,3 @@
# Docker project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
.vagrant*
bin
docker/docker
@@ -8,18 +5,13 @@ docker/docker
a.out
*.orig
build_src
command-line-arguments.test
.flymake*
docker.test
auth/auth.test
.idea
.DS_Store
docs/_build
docs/_static
docs/_templates
.gopath/
.dotcloud
*.test
bundles/
.hg/
.git/
vendor/pkg/
pyenv
Vagrantfile

View File

@@ -1,11 +1,9 @@
# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf
<charles.hooper@dotcloud.com> <chooper@plumata.com>
# Generate AUTHORS: git log --all --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
<charles.hooper@dotcloud.com> <chooper@plumata.com>
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@docker.com>
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
Guillaume J. Charmes <guillaume.charmes@dotcloud.com> creack <charmes.guillaume@gmail.com>
<guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
<kencochrane@gmail.com> <KenCochrane@gmail.com>
<sridharr@activestate.com> <github@srid.name>
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@dotcloud.com>
@@ -17,44 +15,5 @@ Joffrey F <joffrey@dotcloud.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Andy Smith <github@anarkystic.com>
<kalessin@kalessin.fr> <louis@dotcloud.com>
<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
<victor.vieux@docker.com> <victor@dotcloud.com>
<victor.vieux@docker.com> <dev@vvieux.com>
<victor.vieux@docker.com> <victor@docker.com>
<victor.vieux@docker.com> <vieux@docker.com>
<victor.vieux@dotcloud.com> <victor@dotcloud.com>
<dominik@honnef.co> <dominikh@fork-bomb.org>
Thatcher Peskens <thatcher@dotcloud.com>
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
Walter Stanish <walter@pratyeka.org>
<daniel@gasienica.ch> <dgasienica@zynga.com>
Roberto Hashioka <roberto_hashioka@hotmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
David Sissitka <me@dsissitka.com>
Nolan Darilek <nolan@thewordnerd.info>
<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
Benoit Chesneau <bchesneau@gmail.com>
Jordan Arentsen <blissdev@gmail.com>
Daniel Garcia <daniel@danielgarcia.info>
Miguel Angel Fernández <elmendalerenda@gmail.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
Faiz Khan <faizkhan00@gmail.com>
Victor Lyuboslavsky <victor@victoreda.com>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
<mosoni@ebay.com> <mohitsoni1989@gmail.com>
Shih-Yuan Lee <fourdollars@gmail.com>
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
<proppy@google.com> <proppy@aminche.com>
<michael@crosbymichael.com> <crosby.michael@gmail.com>
<github@metaliveblog.com> <github@developersupport.net>
<brandon@ifup.org> <brandon@ifup.co>
<dano@spotify.com> <daniel.norberg@gmail.com>
<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
<shawn@churchofgit.com> <shawnlandden@gmail.com>
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
<solomon@dotcloud.com> <solomon.hykes@dotcloud.com>
<SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
Sven Dowideit <SvenDowideit@home.org.au> ¨Sven <¨SvenDowideit@home.org.au¨>
unclejack <unclejacksons@gmail.com> <unclejack@users.noreply.github.com>

View File

@@ -1,30 +0,0 @@
# Note: right now we don't use go-specific features of travis.
# Later we might automate "go test" etc. (or do it inside a docker container...?)
language: go
go: 1.2
# Disable the normal go build.
install: true
before_script:
- env | sort
- sudo apt-get update -qq
- sudo apt-get install -qq python-yaml
- git remote add upstream git://github.com/dotcloud/docker.git
- upstream=master;
if [ "$TRAVIS_PULL_REQUEST" != false ]; then
upstream=$TRAVIS_BRANCH;
fi;
git fetch --append --no-tags upstream refs/heads/$upstream:refs/remotes/upstream/$upstream
# sometimes we have upstream master already as origin/master (PRs), but other times we don't, so let's just make sure we have a completely unambiguous way to specify "upstream master" from here out
# but if it's a PR against non-master, we need that upstream branch instead :)
- sudo pip install -r docs/requirements.txt
script:
- hack/travis/dco.py
- hack/travis/gofmt.py
- make -sC docs SPHINXOPTS=-qW docs man
# vim:set sw=2 ts=2:

308
AUTHORS
View File

@@ -1,345 +1,45 @@
# This file lists all individuals having contributed content to the repository.
# If you're submitting a patch, please add your name here in alphabetical order as part of the patch.
#
# For a list of active project maintainers, see the MAINTAINERS file.
#
Aanand Prasad <aanand.prasad@gmail.com>
Aaron Feng <aaron.feng@gmail.com>
Abel Muiño <amuino@gmail.com>
Alexander Larsson <alexl@redhat.com>
Alexey Shamrin <shamrin@gmail.com>
Alex Gaynor <alex.gaynor@gmail.com>
Alexis THOMAS <fr.alexisthomas@gmail.com>
Al Tobey <al@ooyala.com>
Andrea Luzzardi <aluzzardi@gmail.com>
Andreas Savvides <andreas@editd.com>
Andreas Tiefenthaler <at@an-ti.eu>
Andrew Duckworth <grillopress@gmail.com>
Andrew Macgregor <andrew.macgregor@agworld.com.au>
Andrew Munsell <andrew@wizardapps.net>
Andrews Medina <andrewsmedina@gmail.com>
Andy Chambers <anchambers@paypal.com>
andy diller <dillera@gmail.com>
Andy Rothfusz <github@metaliveblog.com>
Andy Smith <github@anarkystic.com>
Anthony Bishopric <git@anthonybishopric.com>
Anton Nikitin <anton.k.nikitin@gmail.com>
Antony Messerli <amesserl@rackspace.com>
apocas <petermdias@gmail.com>
Asbjørn Enge <asbjorn@hanafjedle.net>
Barry Allard <barry.allard@gmail.com>
Bartłomiej Piotrowski <b@bpiotrowski.pl>
Benoit Chesneau <bchesneau@gmail.com>
Ben Sargent <ben@brokendigits.com>
Ben Toews <mastahyeti@gmail.com>
Ben Wiklund <ben@daisyowl.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
Bouke Haarsma <bouke@webatoom.nl>
Brandon Liu <bdon@bdon.org>
Brandon Philips <brandon@ifup.org>
Brian Dorsey <brian@dorseys.org>
Brian Goff <cpuguy83@gmail.com>
Brian McCallister <brianm@skife.org>
Brian Olsen <brian@maven-group.org>
Brian Shumate <brian@couchbase.com>
Briehan Lombaard <briehan.lombaard@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Caleb Spare <cespare@gmail.com>
Calen Pennington <cale@edx.org>
Carl X. Su <bcbcarl@gmail.com>
Charles Hooper <charles.hooper@dotcloud.com>
Charles Lindsay <chaz@chazomatic.us>
Chia-liang Kao <clkao@clkao.org>
Chris St. Pierre <chris.a.st.pierre@gmail.com>
Christopher Currie <codemonkey+github@gmail.com>
Christopher Rigor <crigor@gmail.com>
Christophe Troestler <christophe.Troestler@umons.ac.be>
Clayton Coleman <ccoleman@redhat.com>
Colin Dunklau <colin.dunklau@gmail.com>
Colin Rice <colin@daedrum.net>
Cory Forsyth <cory.forsyth@gmail.com>
cressie176 <github@stephen-cresswell.net>
Dan Buch <d.buch@modcloth.com>
Dan Hirsch <thequux@upstandinghackers.com>
Daniel Exner <dex@dragonslave.de>
Daniel Garcia <daniel@danielgarcia.info>
Daniel Gasienica <daniel@gasienica.ch>
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
Daniel Norberg <dano@spotify.com>
Daniel Nordberg <dnordberg@gmail.com>
Daniel Robinson <gottagetmac@gmail.com>
Daniel Von Fange <daniel@leancoder.com>
Daniel YC Lin <dlin.tw@gmail.com>
Danny Yates <danny@codeaholics.org>
Darren Coxall <darren@darrencoxall.com>
David Anderson <dave@natulte.net>
David Calavera <david.calavera@gmail.com>
David Mcanulty <github@hellspark.com>
David Sissitka <me@dsissitka.com>
Deni Bertovic <deni@kset.org>
Dinesh Subhraveti <dineshs@altiscale.com>
dkumor <daniel@dkumor.com>
Dmitry Demeshchuk <demeshchuk@gmail.com>
Dominik Honnef <dominik@honnef.co>
Don Spaulding <donspauldingii@gmail.com>
Dražen Lučanin <kermit666@gmail.com>
Dr Nic Williams <drnicwilliams@gmail.com>
Dustin Sallings <dustin@spy.net>
Edmund Wagner <edmund-wagner@web.de>
Elias Probst <mail@eliasprobst.eu>
Emil Hernvall <emil@quench.at>
Emily Rose <emily@contactvibe.com>
Eric Hanchrow <ehanchrow@ine.com>
Eric Lee <thenorthsecedes@gmail.com>
Eric Myhre <hash@exultant.us>
Erno Hopearuoho <erno.hopearuoho@gmail.com>
eugenkrizo <eugen.krizo@gmail.com>
Evan Krall <krall@yelp.com>
Evan Phoenix <evan@fallingsnow.net>
Evan Wies <evan@neomantra.net>
Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
ezbercih <cem.ezberci@gmail.com>
Fabio Falci <fabiofalci@gmail.com>
Fabio Rehm <fgrehm@gmail.com>
Fabrizio Regini <freegenie@gmail.com>
Faiz Khan <faizkhan00@gmail.com>
Fareed Dudhia <fareeddudhia@googlemail.com>
Fernando <fermayo@gmail.com>
Flavio Castelli <fcastelli@suse.com>
Francisco Souza <f@souza.cc>
Frank Macreery <frank@macreery.com>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
Frederik Loeffert <frederik@zitrusmedia.de>
Freek Kalter <freek@kalteronline.org>
Gabe Rosenhouse <gabe@missionst.com>
Gabriel Monroy <gabriel@opdemand.com>
Galen Sampson <galen.sampson@gmail.com>
Gareth Rushgrove <gareth@morethanseven.net>
Gereon Frey <gereon.frey@dynport.de>
Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
Graydon Hoare <graydon@pobox.com>
Greg Thornton <xdissent@me.com>
grunny <mwgrunny@gmail.com>
Guillaume J. Charmes <guillaume.charmes@docker.com>
Gurjeet Singh <gurjeet@singh.im>
Guruprasad <lgp171188@gmail.com>
Harley Laue <losinggeneration@gmail.com>
Hector Castro <hectcastro@gmail.com>
Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
Hunter Blanks <hunter@twilio.com>
inglesp <peter.inglesby@gmail.com>
Isaac Dupree <antispam@idupree.com>
Isao Jonas <isao.jonas@gmail.com>
Jake Moshenko <jake@devtable.com>
James Allen <jamesallen0108@gmail.com>
James Carr <james.r.carr@gmail.com>
James Mills <prologic@shortcircuit.net.au>
James Turnbull <james@lovedthanlost.net>
jaseg <jaseg@jaseg.net>
Jason McVetta <jason.mcvetta@gmail.com>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
Jeff Lindsay <progrium@gmail.com>
Jeremy Grosser <jeremy@synack.me>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
Jesse Dubay <jesse@thefortytwo.net>
Jim Alateras <jima@comware.com.au>
Jimmy Cuadra <jimmy@jimmycuadra.com>
Joe Beda <joe.github@bedafamily.com>
Joe Van Dyk <joe@tanga.com>
Joffrey F <joffrey@dotcloud.com>
Johan Euphrosine <proppy@google.com>
Johannes 'fish' Ziemke <github@freigeist.org>
Johan Rydberg <johan.rydberg@gmail.com>
John Costa <john.costa@gmail.com>
John Feminella <jxf@jxf.me>
John Gardiner Myers <jgmyers@proofpoint.com>
John Warwick <jwarwick@gmail.com>
Jonas Pfenniger <jonas@pfenniger.name>
Jonathan Mueller <j.mueller@apoveda.ch>
Jonathan Rudenberg <jonathan@titanous.com>
Jon Wedaman <jweede@gmail.com>
Joost Cassee <joost@cassee.net>
Jordan Arentsen <blissdev@gmail.com>
Jordan Sissel <jls@semicomplete.com>
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
Joseph Hager <ajhager@gmail.com>
Josh Hawn <josh.hawn@docker.com>
Josh Poimboeuf <jpoimboe@redhat.com>
JP <jpellerin@leapfrogonline.com>
Julien Barbier <write0@gmail.com>
Julien Dubois <julien.dubois@gmail.com>
Justin Force <justin.force@gmail.com>
Justin Plock <jplock@users.noreply.github.com>
Karan Lyons <karan@karanlyons.com>
Karl Grzeszczak <karlgrz@gmail.com>
Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
Keli Hu <dev@keli.hu>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
Ken Cochrane <kencochrane@gmail.com>
Kevin Clark <kevin.clark@gmail.com>
Kevin J. Lynagh <kevin@keminglabs.com>
Kevin Wallace <kevin@pentabarf.net>
Keyvan Fatehi <keyvanfatehi@gmail.com>
kim0 <email.ahmedkamal@googlemail.com>
Kim BKC Carlbacker <kim.carlbacker@gmail.com>
Kimbro Staken <kstaken@kstaken.com>
Kiran Gangadharan <kiran.daredevil@gmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
Kyle Conroy <kyle.j.conroy@gmail.com>
Laurie Voss <github@seldo.com>
Liang-Chi Hsieh <viirya@gmail.com>
Lokesh Mandvekar <lsm5@redhat.com>
Louis Opter <kalessin@kalessin.fr>
lukaspustina <lukas.pustina@centerdevice.com>
Mahesh Tiyyagura <tmahesh@gmail.com>
Manuel Meurer <manuel@krautcomputing.com>
Manuel Woelker <github@manuel.woelker.org>
Marc Kuo <kuomarc2@gmail.com>
Marco Hennings <marco.hennings@freiheit.com>
Marcus Farkas <toothlessgear@finitebox.com>
Marcus Ramberg <marcus@nordaaker.com>
Marek Goldmann <marek.goldmann@gmail.com>
Mark Allen <mrallen1@yahoo.com>
Mark McGranaghan <mmcgrana@gmail.com>
Marko Mikulicic <mmikulicic@gmail.com>
Markus Fix <lispmeister@gmail.com>
Martijn van Oosterhout <kleptog@svana.org>
Martin Redmond <martin@tinychat.com>
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
Matt Apperson <me@mattapperson.com>
Matt Bachmann <bachmann.matt@gmail.com>
Matt Haggard <haggardii@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
mattymo <raytrac3r@gmail.com>
Maxime Petazzoni <max@signalfuse.com>
Maxim Treskin <zerthurd@gmail.com>
meejah <meejah@meejah.ca>
Michael Crosby <michael@crosbymichael.com>
Michael Gorsuch <gorsuch@github.com>
Michael Stapelberg <michael+gh@stapelberg.de>
Miguel Angel Fernández <elmendalerenda@gmail.com>
Mike Gaffney <mike@uberu.com>
Mike Naberezny <mike@naberezny.com>
Mikhail Sobolev <mss@mawhrin.net>
Mohit Soni <mosoni@ebay.com>
Morten Siebuhr <sbhr@sbhr.dk>
Nan Monnand Deng <monnand@gmail.com>
Nate Jones <nate@endot.org>
Nathan Kleyn <nathan@nathankleyn.com>
Nelson Chen <crazysim@gmail.com>
Niall O'Higgins <niallo@unworkable.org>
Nick Payne <nick@kurai.co.uk>
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
Nick Stinemates <nick@stinemates.org>
Nicolas Dudebout <nicolas.dudebout@gatech.edu>
Nicolas Kaiser <nikai@nikai.net>
Nolan Darilek <nolan@thewordnerd.info>
odk- <github@odkurzacz.org>
Oguz Bilgic <fisyonet@gmail.com>
Ole Reifschneider <mail@ole-reifschneider.de>
O.S.Tezer <ostezer@gmail.com>
pandrew <letters@paulnotcom.se>
Pascal Borreli <pascal@borreli.com>
pattichen <craftsbear@gmail.com>
Paul Bowsher <pbowsher@globalpersonals.co.uk>
Paul Hammond <paul@paulhammond.org>
Paul Lietar <paul@lietar.net>
Paul Morie <pmorie@gmail.com>
Paul Nasrat <pnasrat@gmail.com>
Paul <paul9869@gmail.com>
Peter Braden <peterbraden@peterbraden.co.uk>
Peter Waller <peter@scraperwiki.com>
Phil Spitler <pspitler@gmail.com>
Piergiuliano Bossi <pgbossi@gmail.com>
Pierre-Alain RIVIERE <pariviere@ippon.fr>
Piotr Bogdan <ppbogdan@gmail.com>
pysqz <randomq@126.com>
Quentin Brossard <qbrossard@gmail.com>
Rafal Jeczalik <rjeczalik@gmail.com>
Ramkumar Ramachandra <artagnon@gmail.com>
Ramon van Alteren <ramon@vanalteren.nl>
Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
rgstephens <greg@udon.org>
Rhys Hiltner <rhys@twitch.tv>
Richo Healey <richo@psych0tik.net>
Rick Bradley <rick@users.noreply.github.com>
Robert Obryk <robryk@gmail.com>
Roberto G. Hashioka <roberto.hashioka@docker.com>
Roberto Hashioka <roberto_hashioka@hotmail.com>
Rodrigo Vaz <rodrigo.vaz@gmail.com>
Roel Van Nyen <roel.vannyen@gmail.com>
Roger Peppe <rogpeppe@gmail.com>
Ryan Fowler <rwfowler@gmail.com>
Ryan O'Donnell <odonnellryanc@gmail.com>
Ryan Seto <ryanseto@yak.net>
Sam Alba <sam.alba@gmail.com>
Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
Samuel Andaya <samuel@andaya.net>
Scott Bessler <scottbessler@gmail.com>
Sean Cronin <seancron@gmail.com>
Sean P. Kane <skane@newrelic.com>
Shawn Landden <shawn@churchofgit.com>
Shawn Siefkas <shawn.siefkas@meredith.com>
Shih-Yuan Lee <fourdollars@gmail.com>
shin- <joffrey@docker.com>
Silas Sewell <silas@sewell.org>
Simon Taranto <simon.taranto@gmail.com>
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
Solomon Hykes <solomon@dotcloud.com>
Song Gao <song@gao.io>
Sridatta Thatipamala <sthatipamala@gmail.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Steeve Morin <steeve.morin@gmail.com>
Stefan Praszalowicz <stefan@greplin.com>
sudosurootdev <sudosurootdev@gmail.com>
Sven Dowideit <svendowideit@home.org.au>
Sylvain Bellemare <sylvain.bellemare@ezeep.com>
tang0th <tang0th@gmx.com>
Tatsuki Sugiura <sugi@nemui.org>
Tehmasp Chaudhri <tehmasp@gmail.com>
Thatcher Peskens <thatcher@dotcloud.com>
Thermionix <bond711@gmail.com>
Thijs Terlouw <thijsterlouw@gmail.com>
Thomas Bikeev <thomas.bikeev@mac.com>
Thomas Frössman <thomasf@jossystem.se>
Thomas Hansen <thomas.hansen@gmail.com>
Thomas LEVEIL <thomasleveil@gmail.com>
Tianon Gravi <admwiggin@gmail.com>
Tim Bosse <maztaim@users.noreply.github.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Tobias Bieniek <Tobias.Bieniek@gmx.de>
Tobias Schmidt <ts@soundcloud.com>
Tobias Schwab <tobias.schwab@dynport.de>
Todd Lunter <tlunter@gmail.com>
Tom Hulihan <hulihan.tom159@gmail.com>
Tommaso Visconti <tommaso.visconti@gmail.com>
Travis Cline <travis.cline@gmail.com>
Tyler Brock <tyler.brock@gmail.com>
Tzu-Jung Lee <roylee17@gmail.com>
Ulysse Carion <ulyssecarion@gmail.com>
Troy Howard <thoward37@gmail.com>
unclejack <unclejacksons@gmail.com>
vgeta <gopikannan.venugopalsamy@gmail.com>
Victor Coisne <victor.coisne@dotcloud.com>
Victor Lyuboslavsky <victor@victoreda.com>
Victor Vieux <victor.vieux@docker.com>
Vincent Batts <vbatts@redhat.com>
Vincent Bernat <bernat@luffy.cx>
Vincent Woo <me@vincentwoo.com>
Vinod Kulkarni <vinod.kulkarni@gmail.com>
Vitor Monteiro <vmrmonteiro@gmail.com>
Victor Vieux <victor.vieux@dotcloud.com>
Vivek Agarwal <me@vivek.im>
Vladimir Kirillov <proger@wilab.org.ua>
Vladimir Rutsky <iamironbob@gmail.com>
Walter Stanish <walter@pratyeka.org>
WarheadsSE <max@warheads.net>
Wes Morgan <cap10morgan@gmail.com>
Will Dietz <w@wdtz.org>
William Delanoue <william.delanoue@gmail.com>
Will Rouesnel <w.rouesnel@gmail.com>
Will Weaver <monkey@buildingbananas.com>
Xiuming Chen <cc@cxm.cc>
Yang Bai <hamo.by@gmail.com>
Yurii Rashkovskii <yrashk@gmail.com>
Zain Memon <zain@inzain.net>
Zaiste! <oh@zaiste.net>
Zilin Du <zilin.du@gmail.com>
zimbatm <zimbatm@zimbatm.com>

File diff suppressed because it is too large Load Diff

View File

@@ -1,23 +1,10 @@
# Contributing to Docker
Want to hack on Docker? Awesome! Here are instructions to get you
started. They are probably not perfect, please let us know if anything
feels wrong or incomplete.
Want to hack on Docker? Awesome! There are instructions to get you
started on the website: http://docker.io/gettingstarted.html
## Reporting Issues
When reporting [issues](https://github.com/dotcloud/docker/issues)
on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc),
the output of `uname -a` and the output of `docker version` along with
the output of `docker info`. Please include the steps required to reproduce
the problem if possible and applicable.
This information will help us review and fix your issue faster.
## Build Environment
For instructions on setting up your development environment, please
see our dedicated [dev environment setup
docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
They are probably not perfect, please let us know if anything feels
wrong or incomplete.
## Contribution guidelines
@@ -39,7 +26,7 @@ that feature *on top of* docker.
### Discuss your design on the mailing list
We recommend discussing your plans [on the mailing
list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev)
list](https://groups.google.com/forum/?fromgroups#!forum/docker-club)
before starting to code - especially for more ambitious contributions.
This gives other contributors a chance to point you in the right
direction, give feedback on your design, and maybe point out if someone
@@ -47,7 +34,7 @@ else is working on the same thing.
### Create issues...
Any significant improvement should be documented as [a GitHub
Any significant improvement should be documented as [a github
issue](https://github.com/dotcloud/docker/issues) before anybody
starts working on it.
@@ -71,10 +58,8 @@ Submit unit tests for your changes. Go has a great test framework built in; use
it! Take a look at existing tests for inspiration. Run the full test suite on
your branch before submitting a pull request.
Update the documentation when creating or modifying features. Test
your documentation changes for clarity, concision, and correctness, as
well as a clean documentation build. See ``docs/README.md`` for more
information on building the docs and how docs get released.
Make sure you include relevant updates or additions to documentation when
creating or modifying features.
Write clean code. Universally formatted code promotes ease of writing, reading,
and maintenance. Always run `go fmt` before committing your changes. Most
@@ -88,8 +73,6 @@ curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/maste
Pull requests descriptions should be as clear as possible and include a
reference to all the issues that they address.
Pull requests must not contain commits from other users or branches.
Code review comments may be added to your pull request. Discuss, then make the
suggested modifications and push additional commits to your feature branch. Be
sure to post a comment after pushing. The new commits will show up in the pull
@@ -108,97 +91,3 @@ Add your name to the AUTHORS file, but make sure the list is sorted and your
name and email address match your git configuration. The AUTHORS file is
regenerated occasionally from the git commit history, so a mismatch may result
in your changes being overwritten.
### Merge approval
Docker maintainers use LGTM (looks good to me) in comments on the code review
to indicate acceptance.
A change requires LGTMs from an absolute majority of the maintainers of each
component affected. For example, if a change affects docs/ and registry/, it
needs an absolute majority from the maintainers of docs/ AND, separately, an
absolute majority of the maintainers of registry.
For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
### Sign your work
The sign-off is a simple line at the end of the explanation for the
patch, which certifies that you wrote it or otherwise have the right to
pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below (from
[developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
then you just add a line to every git commit message:
Docker-DCO-1.1-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
using your real name (sorry, no pseudonyms or anonymous contributions.)
One way to automate this, is customise your get ``commit.template`` by adding
a ``prepare-commit-msg`` hook to your docker checkout:
```
curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
```
* Note: the above script expects to find your GitHub user name in ``git config --get github.user``
#### Small patch exception
There are several exceptions to the signing requirement. Currently these are:
* Your patch fixes spelling or grammar errors.
* Your patch is a single line change to documentation.
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)
### How can I become a maintainer?
* Step 1: learn the component inside out
* Step 2: make yourself useful by contributing code, bugfixes, support etc.
* Step 3: volunteer on the irc channel (#docker@freenode)
* Step 4: propose yourself at a scheduled docker meeting in #docker-dev
Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
You don't have to be a maintainer to make a difference on the project!

View File

@@ -1,99 +0,0 @@
# This file describes the standard way to build Docker, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/dotcloud/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test
#
# # Publish a release:
# docker run --privileged \
# -e AWS_S3_BUCKET=baz \
# -e AWS_ACCESS_KEY=foo \
# -e AWS_SECRET_KEY=bar \
# -e GPG_PASSPHRASE=gloubiboulga \
# docker hack/release.sh
#
# Note: Apparmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
docker-version 0.6.1
FROM ubuntu:13.10
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
# Packaged dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
apt-utils \
aufs-tools \
automake \
btrfs-tools \
build-essential \
curl \
dpkg-sig \
git \
iptables \
libapparmor-dev \
libcap-dev \
libsqlite3-dev \
mercurial \
reprepro \
ruby1.9.1 \
ruby1.9.1-dev \
s3cmd=1.1.0* \
--no-install-recommends
# Get and compile LXC 0.8 (since it is the most stable)
RUN git clone --no-checkout https://github.com/lxc/lxc.git /usr/local/lxc && cd /usr/local/lxc && git checkout -q lxc-0.8.0
RUN cd /usr/local/lxc && ./autogen.sh && ./configure --disable-docs && make && make install
# Get lvm2 source for compiling statically
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
# Compile and install lvm2
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install Go
RUN curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz
ENV PATH /usr/local/go/bin:$PATH
ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
# Compile Go for cross compilation
ENV DOCKER_CROSSPLATFORMS \
linux/386 linux/arm \
darwin/amd64 darwin/386 \
freebsd/amd64 freebsd/386 freebsd/arm
# (set an explicit GOARM of 5 for maximum compatibility)
ENV GOARM 5
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
# Grab Go's cover tool for dead-simple code coverage testing
RUN go get code.google.com/p/go.tools/cmd/cover
# TODO replace FPM with some very minimal debhelper stuff
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
# Setup s3cmd config
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/dotcloud/docker
ENV DOCKER_BUILDTAGS apparmor selinux
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
ADD . /go/src/github.com/dotcloud/docker

24
FIXME
View File

@@ -1,24 +0,0 @@
## FIXME
This file is a loose collection of things to improve in the codebase, for the internal
use of the maintainers.
They are not big enough to be in the roadmap, not user-facing enough to be github issues,
and not important enough to be discussed in the mailing list.
They are just like FIXME comments in the source code, except we're not sure where in the source
to put them - so we put them here :)
* Run linter on codebase
* Unify build commands and regular commands
* Move source code into src/ subdir for clarity
* docker build: on non-existent local path for ADD, don't show full absolute path on the host
* use size header for progress bar in pull
* Clean up context upload in build!!!
* Parallel pull
* Upgrade dockerd without stopping containers
* Simple command to remove all untagged images (`docker rmi $(docker images | awk '/^<none>/ { print $3 }')`)
* Simple command to clean up containers for disk space
* Clean up the ProgressReader api, it's a PITA to use

13
LICENSE
View File

@@ -176,7 +176,18 @@
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,7 +0,0 @@
Solomon Hykes <solomon@dotcloud.com> (@shykes)
Guillaume J. Charmes <guillaume@docker.com> (@creack)
Victor Vieux <vieux@docker.com> (@vieux)
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)

View File

@@ -1,52 +1,78 @@
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli
DOCKER_PACKAGE := github.com/dotcloud/docker
RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
SRCRELEASE := docker-$(RELEASE_VERSION)
BINRELEASE := docker-$(RELEASE_VERSION).tgz
# to allow `make BINDDIR=. shell` or `make BINDDIR= test`
BINDDIR := bundles
# to allow `make DOCSPORT=9000 docs`
DOCSPORT := 8000
GIT_ROOT := $(shell git rev-parse --show-toplevel)
BUILD_DIR := $(CURDIR)/.gopath
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
GOPATH ?= $(BUILD_DIR)
export GOPATH
DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
DOCKER_RUN_DOCS := docker run --rm -it -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)"
GO_OPTIONS ?=
ifeq ($(VERBOSE), 1)
GO_OPTIONS += -v
endif
default: binary
GIT_COMMIT = $(shell git rev-parse --short HEAD)
GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
all: build
$(DOCKER_RUN_DOCKER) hack/make.sh
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
binary: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary
SRC_DIR := $(GOPATH)/src
cross: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
DOCKER_DIR := $(SRC_DIR)/$(DOCKER_PACKAGE)
DOCKER_MAIN := $(DOCKER_DIR)/docker
docs: docs-build
$(DOCKER_RUN_DOCS)
DOCKER_BIN_RELATIVE := bin/docker
DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
docs-shell: docs-build
$(DOCKER_RUN_DOCS) bash
.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
test: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli
all: $(DOCKER_BIN)
test-integration: build
$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
$(DOCKER_BIN): $(DOCKER_DIR)
@mkdir -p $(dir $@)
@(cd $(DOCKER_MAIN); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
@echo $(DOCKER_BIN_RELATIVE) is created.
test-integration-cli: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
$(DOCKER_DIR):
@mkdir -p $(dir $@)
@rm -f $@
@ln -sf $(CURDIR)/ $@
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS))
shell: build
$(DOCKER_RUN_DOCKER) bash
whichrelease:
echo $(RELEASE_VERSION)
build: bundles
docker build -t "$(DOCKER_IMAGE)" .
release: $(BINRELEASE)
srcrelease: $(SRCRELEASE)
deps: $(DOCKER_DIR)
docs-build:
docker build -t "$(DOCKER_DOCS_IMAGE)" docs
# A clean checkout of $RELEASE_VERSION, with vendored dependencies
$(SRCRELEASE):
rm -fr $(SRCRELEASE)
git clone $(GIT_ROOT) $(SRCRELEASE)
cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
bundles:
mkdir bundles
# A binary release ready to be uploaded to a mirror
$(BINRELEASE): $(SRCRELEASE)
rm -f $(BINRELEASE)
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
clean:
@rm -rf $(dir $(DOCKER_BIN))
ifeq ($(GOPATH), $(BUILD_DIR))
@rm -rf $(BUILD_DIR)
else ifneq ($(DOCKER_DIR), $(realpath $(DOCKER_DIR)))
@rm -f $(DOCKER_DIR)
endif
test: all
@(cd $(DOCKER_DIR); sudo -E go test $(GO_OPTIONS))
fmt:
@gofmt -s -l -w .
hack:
cd $(CURDIR)/buildbot && vagrant up

19
NOTICE
View File

@@ -1,19 +1,6 @@
Docker
Copyright 2012-2014 Docker, Inc.
Copyright 2012-2013 dotCloud, inc.
This product includes software developed at Docker, Inc. (http://www.docker.com).
This product includes software developed at dotCloud, inc. (http://www.dotcloud.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License.

429
README.md
View File

@@ -1,192 +1,317 @@
Docker: the Linux container engine
==================================
Docker: the Linux container runtime
===================================
Docker is an open source project to pack, ship and run any application
as a lightweight container
Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
This means that they can run anywhere, from your laptop to the largest
EC2 compute instance and everything in between - and they don't require
that you use a particular language, framework or packaging system. That
makes them great building blocks for deploying and scaling web apps,
databases and backend services without depending on a particular stack
or provider.
Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
Docker is an open-source implementation of the deployment engine which
powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service.
It benefits directly from the experience accumulated over several years
of large-scale operation and support of hundreds of thousands of
applications and databases.
![Docker L](docs/sources/static_files/lego_docker.jpg "Docker")
![Docker L](docs/theme/docker/static/img/dockerlogo-h.png "Docker")
* *Heterogeneous payloads*: any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
## Better than VMs
* *Any server*: docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
A common method for distributing applications and sandboxing their
execution is to use virtual machines, or VMs. Typical VM formats are
VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory
these formats should allow every developer to automatically package
their application into a "machine" for easy distribution and deployment.
In practice, that almost never happens, for a few reasons:
* *Isolation*: docker isolates processes from each other and from the underlying host, using lightweight containers.
* *Size*: VMs are very large which makes them impractical to store
and transfer.
* *Performance*: running VMs consumes significant CPU and memory,
which makes them impractical in many scenarios, for example local
development of multi-tier applications, and large-scale deployment
of cpu and memory-intensive applications on large numbers of
machines.
* *Portability*: competing VM environments don't play well with each
other. Although conversion tools do exist, they are limited and
add even more overhead.
* *Hardware-centric*: VMs were designed with machine operators in
mind, not software developers. As a result, they offer very
limited tooling for what developers need most: building, testing
and running their software. For example, VMs offer no facilities
for application versioning, monitoring, configuration, logging or
service discovery.
By contrast, Docker relies on a different sandboxing method known as
*containerization*. Unlike traditional virtualization, containerization
takes place at the kernel level. Most modern operating system kernels
now support the primitives necessary for containerization, including
Linux with [openvz](http://openvz.org),
[vserver](http://linux-vserver.org) and more recently
[lxc](http://lxc.sourceforge.net), Solaris with
[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc)
and FreeBSD with
[Jails](http://www.freebsd.org/doc/handbook/jails.html).
Docker builds on top of these low-level primitives to offer developers a
portable format and runtime environment that solves all 4 problems.
Docker containers are small (and their transfer can be optimized with
layers), they have basically zero memory and cpu overhead, they are
completely portable and are designed from the ground up with an
application-centric design.
The best part: because Docker operates at the OS level, it can still be
run inside a VM!
## Plays well with others
Docker does not require that you buy into a particular programming
language, framework, packaging system or configuration language.
Is your application a Unix process? Does it use files, tcp connections,
environment variables, standard Unix streams and command-line arguments
as inputs and outputs? Then Docker can run it.
Can your application's build be expressed as a sequence of such
commands? Then Docker can build it.
## Escape dependency hell
A common problem for developers is the difficulty of managing all
their application's dependencies in a simple and automated way.
This is usually difficult for several reasons:
* *Cross-platform dependencies*. Modern applications often depend on
a combination of system libraries and binaries, language-specific
packages, framework-specific modules, internal components
developed for another project, etc. These dependencies live in
different "worlds" and require different tools - these tools
typically don't work well with each other, requiring awkward
custom integrations.
* Conflicting dependencies. Different applications may depend on
different versions of the same dependency. Packaging tools handle
these situations with various degrees of ease - but they all
handle them in different and incompatible ways, which again forces
the developer to do extra work.
* Custom dependencies. A developer may need to prepare a custom
version of their application's dependency. Some packaging systems
can handle custom versions of a dependency, others can't - and all
of them handle it differently.
* *Repeatability*: because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
Docker solves dependency hell by giving the developer a simple way to
express *all* their application's dependencies in one place, and
streamline the process of assembling them. If this makes you think of
[XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
*replace* your favorite packaging systems. It simply orchestrates
their use in a simple and repeatable way. How does it do that? With
layers.
Notable features
-----------------
Docker defines a build as running a sequence of Unix commands, one
after the other, in the same container. Build commands modify the
contents of the container (usually by installing new files on the
filesystem), the next command modifies it some more, etc. Since each
build command inherits the result of the previous commands, the
*order* in which the commands are executed expresses *dependencies*.
* Filesystem isolation: each process container runs in a completely separate root filesystem.
Here's a typical Docker build process:
* Resource isolation: system resources like cpu and memory can be allocated differently to each process container, using cgroups.
* Network isolation: each process container runs in its own network namespace, with a virtual interface and IP address of its own.
* Copy-on-write: root filesystems are created using copy-on-write, which makes deployment extremely fast, memory-cheap and disk-cheap.
* Logging: the standard streams (stdout/stderr/stdin) of each process container are collected and logged for real-time or batch retrieval.
* Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.
* Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.
Install instructions
==================
Quick install on Ubuntu 12.04 and 12.10
---------------------------------------
```bash
FROM ubuntu:12.04
RUN apt-get update
RUN apt-get install -q -y python python-pip curl
RUN curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
RUN cd helloflask-master && pip install -r requirements.txt
curl get.docker.io | sh -x
```
Note that Docker doesn't care *how* dependencies are built - as long
as they can be built by running a Unix command in a container.
Binary installs
----------------
Docker supports the following binary installation methods.
Note that some methods are community contributions and not yet officially supported.
Getting started
===============
* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
Docker can be installed on your local machine as well as servers - both
bare metal and virtualized. It is available as a binary on most modern
Linux systems, or as a VM on Windows, Mac and other systems.
Installing from source
----------------------
We also offer an interactive tutorial for quickly learning the basics of
using Docker.
1. Make sure you have a [Go language](http://golang.org/doc/install) compiler and [git](http://git-scm.com) installed.
For up-to-date install instructions and online tutorials, see the
[Getting Started page](http://www.docker.io/gettingstarted/).
2. Checkout the source code
```bash
git clone http://github.com/dotcloud/docker
```
3. Build the docker binary
```bash
cd docker
make VERBOSE=1
sudo cp ./bin/docker /usr/local/bin/docker
```
Usage examples
==============
Docker can be used to run short-lived commands, long-running daemons
(app servers, databases etc.), interactive shell sessions, etc.
First run the docker daemon
---------------------------
You can find a [list of real-world
examples](http://docs.docker.io/en/latest/examples/) in the
documentation.
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
```bash
# On a production system you want this running in an init script
sudo docker -d &
```
Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client can run from any account.
```bash
# Now you can run docker commands from any account.
docker help
```
Throwaway shell in a base ubuntu image
--------------------------------------
```bash
docker pull ubuntu:12.10
# Run an interactive shell, allocate a tty, attach stdin and stdout
# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q
docker run -i -t ubuntu:12.10 /bin/bash
```
Starting a long-running worker process
--------------------------------------
```bash
# Start a very useful long-running process
JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
# Collect the output of the job so far
docker logs $JOB
# Kill the job
docker kill $JOB
```
Running an irc bouncer
----------------------
```bash
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
```
Running Redis
-------------
```bash
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
```
Share your own image!
---------------------
```bash
CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl)
docker commit -m "Installed curl" $CONTAINER $USER/betterbase
docker push $USER/betterbase
```
A list of publicly available images is [available here](https://github.com/dotcloud/docker/wiki/Public-docker-images).
Expose a service on a TCP port
------------------------------
```bash
# Expose port 4444 of this container, and tell netcat to listen on it
JOB=$(docker run -d -p 4444 base /bin/nc -l -p 4444)
# Which public port is NATed to my container?
PORT=$(docker port $JOB 4444)
# Connect to the public port via the host's public address
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
echo hello world | nc $IP $PORT
# Verify that the network connection worked
echo "Daemon received: $(docker logs $JOB)"
```
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The
[cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c)
and
[namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part)
capabilities of the Linux kernel;
* The [Go](http://golang.org) programming language.
* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
* The [Go](http://golang.org) programming language;
* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
Contributing to Docker
======================
Want to hack on Docker? Awesome! There are instructions to get you
started [here](CONTRIBUTING.md).
Want to hack on Docker? Awesome! There are instructions to get you started on the website: http://docs.docker.io/en/latest/contributing/contributing/
They are probably not perfect, please let us know if anything feels
wrong or incomplete.
They are probably not perfect, please let us know if anything feels wrong or incomplete.
### Legal
*Brought to you courtesy of our legal counsel. For more context,
please see the Notice document.*
Note
----
We also keep the documentation in this repository. The website documentation is generated using sphinx using these sources.
Please find it under docs/sources/ and read more about it https://github.com/dotcloud/docker/master/docs/README.md
Please feel free to fix / update the documentation and send us pull requests. More tutorials are also welcome.
Setting up a dev environment
----------------------------
Instructions that have been verified to work on Ubuntu 12.10,
```bash
sudo apt-get -y install lxc wget bsdtar curl golang git
export GOPATH=~/go/
export PATH=$GOPATH/bin:$PATH
mkdir -p $GOPATH/src/github.com/dotcloud
cd $GOPATH/src/github.com/dotcloud
git clone git@github.com:dotcloud/docker.git
cd docker
go get -v github.com/dotcloud/docker/...
go install -v github.com/dotcloud/docker/...
```
Then run the docker daemon,
```bash
sudo $GOPATH/bin/docker -d
```
Run the `go install` command (above) to recompile docker.
What is a Standard Container?
=============================
Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
a format that is self-describing and portable, so that any compliant runtime can run it without extra dependencies, regardless of the underlying machine and the contents of the container.
The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
### 1. STANDARD OPERATIONS
Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
### 2. CONTENT-AGNOSTIC
Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
### 3. INFRASTRUCTURE-AGNOSTIC
Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
### 4. DESIGNED FOR AUTOMATION
Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
### 5. INDUSTRIAL-GRADE DELIVERY
There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
Standard Container Specification
--------------------------------
(TODO)
### Image format
### Standard operations
* Copy
* Run
* Stop
* Wait
* Commit
* Attach standard streams
* List filesystem changes
* ...
### Execution environment
#### Root filesystem
#### Environment variables
#### Process arguments
#### Networking
#### Process namespacing
#### Resource limits
#### Process monitoring
#### Logging
#### Signals
#### Pseudo-terminal allocation
#### Security
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov

71
SPECS/data-volumes.md Normal file
View File

@@ -0,0 +1,71 @@
## Spec for data volumes
Spec owner: Solomon Hykes <solomon@dotcloud.com>
Data volumes (issue #111) are a much-requested feature which trigger much discussion and debate. Below is the current authoritative spec for implementing data volumes.
This spec will be deprecated once the feature is fully implemented.
Discussion, requests, trolls, demands, offerings, threats and other forms of supplications concerning this spec should be addressed to Solomon here: https://github.com/dotcloud/docker/issues/111
### 1. Creating data volumes
At container creation, parts of a container's filesystem can be mounted as separate data volumes. Volumes are defined with the -v flag.
For example:
```bash
$ docker run -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres
```
In this example, a new container is created from the 'postgres' image. At the same time, docker creates 2 new data volumes: one will be mapped to the container at /var/lib/postgres, the other at /var/log.
2 important notes:
1) Volumes don't have top-level names. At no point does the user provide a name, or is a name given to him. Volumes are identified by the path at which they are mounted inside their container.
2) The user doesn't choose the source of the volume. Docker only mounts volumes it created itself, in the same way that it only runs containers that it created itself. That is by design.
### 2. Sharing data volumes
Instead of creating its own volumes, a container can share another container's volumes. For example:
```bash
$ docker run --volumes-from $OTHER_CONTAINER_ID postgres /usr/local/bin/postgres-backup
```
In this example, a new container is created from the 'postgres' example. At the same time, docker will *re-use* the 2 data volumes created in the previous example. One volume will be mounted on the /var/lib/postgres of *both* containers, and the other will be mounted on the /var/log of both containers.
### 3. Under the hood
Docker stores volumes in /var/lib/docker/volumes. Each volume receives a globally unique ID at creation, and is stored at /var/lib/docker/volumes/ID.
At creation, volumes are attached to a single container - the source of truth for this mapping will be the container's configuration.
Mounting a volume consists of calling "mount --bind" from the volume's directory to the appropriate sub-directory of the container mountpoint. This may be done by Docker itself, or farmed out to lxc (which supports mount-binding) if possible.
### 4. Backups, transfers and other volume operations
Volumes sometimes need to be backed up, transfered between hosts, synchronized, etc. These operations typically are application-specific or site-specific, eg. rsync vs. S3 upload vs. replication vs...
Rather than attempting to implement all these scenarios directly, Docker will allow for custom implementations using an extension mechanism.
### 5. Custom volume handlers
Docker allows for arbitrary code to be executed against a container's volumes, to implement any custom action: backup, transfer, synchronization across hosts, etc.
Here's an example:
```bash
$ DB=$(docker run -d -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres)
$ BACKUP_JOB=$(docker run -d --volumes-from $DB shykes/backuper /usr/local/bin/backup-postgres --s3creds=$S3CREDS)
$ docker wait $BACKUP_JOB
```
Congratulations, you just implemented a custom volume handler, using Docker's built-in ability to 1) execute arbitrary code and 2) share volumes between containers.

View File

@@ -1 +0,0 @@
0.10.0

82
Vagrantfile vendored Normal file
View File

@@ -0,0 +1,82 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
def v10(config)
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
# Install ubuntu packaging dependencies and create ubuntu packages
config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker'
end
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
v10(config)
end
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("1") do |config|
v10(config)
end
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
aws.region = "us-east-1"
aws.ami = "ami-d0f89fb9"
aws.ssh_username = "ubuntu"
aws.instance_type = "t1.micro"
end
config.vm.provider :rackspace do |rs|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
rs.flavor = /512MB/
rs.image = /Ubuntu/
end
config.vm.provider :virtualbox do |vb|
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
end
end
Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws, override|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
override.ssh.username = "ubuntu"
aws.region = "us-east-1"
aws.ami = "ami-d0f89fb9"
aws.instance_type = "t1.micro"
end
config.vm.provider :rackspace do |rs|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
rs.flavor = /512MB/
rs.image = /Ubuntu/
end
config.vm.provider :virtualbox do |vb|
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
end
end

View File

@@ -1 +0,0 @@
Victor Vieux <vieux@docker.com> (@vieux)

View File

@@ -1,19 +0,0 @@
package api
import (
"testing"
)
func TestJsonContentType(t *testing.T) {
if !MatchesContentType("application/json", "application/json") {
t.Fail()
}
if !MatchesContentType("application/json; charset=utf-8", "application/json") {
t.Fail()
}
if MatchesContentType("dockerapplication/json", "application/json") {
t.Fail()
}
}

View File

@@ -1,102 +0,0 @@
package client
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"os"
"reflect"
"strings"
"text/template"
flag "github.com/dotcloud/docker/pkg/mflag"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/registry"
)
var funcMap = template.FuncMap{
"json": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}
func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
method := reflect.ValueOf(cli).MethodByName(methodName)
if !method.IsValid() {
return nil, false
}
return method.Interface().(func(...string) error), true
}
func (cli *DockerCli) ParseCommands(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Println("Error: Command not found:", args[0])
return cli.CmdHelp(args[1:]...)
}
return method(args[1:]...)
}
return cli.CmdHelp(args...)
}
func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
flags := flag.NewFlagSet(name, flag.ContinueOnError)
flags.Usage = func() {
fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
flags.PrintDefaults()
os.Exit(2)
}
return flags
}
func (cli *DockerCli) LoadConfigFile() (err error) {
cli.configFile, err = registry.LoadConfig(os.Getenv("HOME"))
if err != nil {
fmt.Fprintf(cli.err, "WARNING: %s\n", err)
}
return err
}
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli {
var (
isTerminal = false
terminalFd uintptr
)
if in != nil {
if file, ok := in.(*os.File); ok {
terminalFd = file.Fd()
isTerminal = term.IsTerminal(terminalFd)
}
}
if err == nil {
err = out
}
return &DockerCli{
proto: proto,
addr: addr,
in: in,
out: out,
err: err,
isTerminal: isTerminal,
terminalFd: terminalFd,
tlsConfig: tlsConfig,
}
}
type DockerCli struct {
proto string
addr string
configFile *registry.ConfigFile
in io.ReadCloser
out io.Writer
err io.Writer
isTerminal bool
terminalFd uintptr
tlsConfig *tls.Config
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,390 +0,0 @@
package client
import (
"bytes"
"crypto/tls"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
gosignal "os/signal"
"regexp"
goruntime "runtime"
"strconv"
"strings"
"syscall"
"github.com/dotcloud/docker/api"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/utils"
)
var (
ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
)
func (cli *DockerCli) dial() (net.Conn, error) {
if cli.tlsConfig != nil && cli.proto != "unix" {
return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
}
return net.Dial(cli.proto, cli.addr)
}
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
params := bytes.NewBuffer(nil)
if data != nil {
if env, ok := data.(engine.Env); ok {
if err := env.Encode(params); err != nil {
return nil, -1, err
}
} else {
buf, err := json.Marshal(data)
if err != nil {
return nil, -1, err
}
if _, err := params.Write(buf); err != nil {
return nil, -1, err
}
}
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
if err != nil {
return nil, -1, err
}
if passAuthInfo {
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress())
getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return nil, err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
}
if headers, err := getHeaders(authConfig); err == nil && headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
}
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
req.Host = cli.addr
if data != nil {
req.Header.Set("Content-Type", "application/json")
} else if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
dial, err := cli.dial()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
if err != nil {
clientconn.Close()
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, -1, err
}
if len(body) == 0 {
return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL)
}
return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
return clientconn.Close()
})
return wrapper, resp.StatusCode, nil
}
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
if (method == "POST" || method == "PUT") && in == nil {
in = bytes.NewReader([]byte{})
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
req.Host = cli.addr
if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
if headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
dial, err := cli.dial()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
defer clientconn.Close()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
}
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(body) == 0 {
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
}
return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") {
return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
}
if _, err := io.Copy(out, resp.Body); err != nil {
return err
}
return nil
}
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
defer func() {
if started != nil {
close(started)
}
}()
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
req.Header.Set("Content-Type", "plain/text")
req.Host = cli.addr
dial, err := cli.dial()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
clientconn.Do(req)
rwc, br := clientconn.Hijack()
defer rwc.Close()
if started != nil {
started <- rwc
}
var receiveStdout chan error
var oldState *term.State
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
}
if stdout != nil || stderr != nil {
receiveStdout = utils.Go(func() (err error) {
defer func() {
if in != nil {
if setRawTerminal && cli.isTerminal {
term.RestoreTerminal(cli.terminalFd, oldState)
}
// For some reason this Close call blocks on darwin..
// As the client exists right after, simply discard the close
// until we find a better solution.
if goruntime.GOOS != "darwin" {
in.Close()
}
}
}()
// When TTY is ON, use regular copy
if setRawTerminal {
_, err = io.Copy(stdout, br)
} else {
_, err = utils.StdCopy(stdout, stderr, br)
}
utils.Debugf("[hijack] End of stdout")
return err
})
}
sendStdin := utils.Go(func() error {
if in != nil {
io.Copy(rwc, in)
utils.Debugf("[hijack] End of stdin")
}
if tcpc, ok := rwc.(*net.TCPConn); ok {
if err := tcpc.CloseWrite(); err != nil {
utils.Debugf("Couldn't send EOF: %s\n", err)
}
} else if unixc, ok := rwc.(*net.UnixConn); ok {
if err := unixc.CloseWrite(); err != nil {
utils.Debugf("Couldn't send EOF: %s\n", err)
}
}
// Discard errors due to pipe interruption
return nil
})
if stdout != nil || stderr != nil {
if err := <-receiveStdout; err != nil {
utils.Debugf("Error receiveStdout: %s", err)
return err
}
}
if !cli.isTerminal {
if err := <-sendStdin; err != nil {
utils.Debugf("Error sendStdin: %s", err)
return err
}
}
return nil
}
func (cli *DockerCli) resizeTty(id string) {
height, width := cli.getTtySize()
if height == 0 && width == 0 {
return
}
v := url.Values{}
v.Set("h", strconv.Itoa(height))
v.Set("w", strconv.Itoa(width))
if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
utils.Debugf("Error resize: %s", err)
}
}
func waitForExit(cli *DockerCli, containerId string) (int, error) {
stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false)
if err != nil {
return -1, err
}
var out engine.Env
if err := out.Decode(stream); err != nil {
return -1, err
}
return out.GetInt("StatusCode"), nil
}
// getExitCode perform an inspect on the container. It returns
// the running state and the exit code.
func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
if err != nil {
// If we can't connect, then the daemon probably died.
if err != ErrConnectionRefused {
return false, -1, err
}
return false, -1, nil
}
c := &api.Container{}
if err := json.Unmarshal(body, c); err != nil {
return false, -1, err
}
return c.State.Running, c.State.ExitCode, nil
}
func (cli *DockerCli) monitorTtySize(id string) error {
cli.resizeTty(id)
sigchan := make(chan os.Signal, 1)
gosignal.Notify(sigchan, syscall.SIGWINCH)
go func() {
for _ = range sigchan {
cli.resizeTty(id)
}
}()
return nil
}
func (cli *DockerCli) getTtySize() (int, int) {
if !cli.isTerminal {
return 0, 0
}
ws, err := term.GetWinsize(cli.terminalFd)
if err != nil {
utils.Debugf("Error getting size: %s", err)
if ws == nil {
return 0, 0
}
}
return int(ws.Height), int(ws.Width)
}
func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
if stream != nil {
defer stream.Close()
}
if err != nil {
return nil, statusCode, err
}
body, err := ioutil.ReadAll(stream)
if err != nil {
return nil, -1, err
}
return body, statusCode, nil
}

View File

@@ -1,47 +0,0 @@
package api
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/version"
"github.com/dotcloud/docker/utils"
"mime"
"strings"
)
const (
APIVERSION version.Version = "1.10"
DEFAULTHTTPHOST = "127.0.0.1"
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
)
func ValidateHost(val string) (string, error) {
host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
if err != nil {
return val, err
}
return host, nil
}
//TODO remove, used on < 1.5 in getContainersJSON
func DisplayablePorts(ports *engine.Table) string {
result := []string{}
ports.SetKey("PublicPort")
ports.Sort()
for _, port := range ports.Data {
if port.Get("IP") == "" {
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type")))
} else {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
}
}
return strings.Join(result, ", ")
}
func MatchesContentType(contentType, expectedType string) bool {
mimetype, _, err := mime.ParseMediaType(contentType)
if err != nil {
utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
}
return err == nil && mimetype == expectedType
}

View File

@@ -1,18 +0,0 @@
package api
import (
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/runconfig"
)
type Container struct {
Config runconfig.Config
HostConfig runconfig.HostConfig
State struct {
Running bool
ExitCode int
}
NetworkSettings struct {
Ports nat.PortMap
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,180 +0,0 @@
package server
import (
"fmt"
"github.com/dotcloud/docker/api"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"net/http"
"net/http/httptest"
"os"
"testing"
)
func TestGetBoolParam(t *testing.T) {
if ret, err := getBoolParam("true"); err != nil || !ret {
t.Fatalf("true -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("True"); err != nil || !ret {
t.Fatalf("True -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("1"); err != nil || !ret {
t.Fatalf("1 -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam(""); err != nil || ret {
t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("false"); err != nil || ret {
t.Fatalf("false -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("0"); err != nil || ret {
t.Fatalf("0 -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("faux"); err == nil || ret {
t.Fatalf("faux -> false, err | got %t %s", ret, err)
}
}
func TesthttpError(t *testing.T) {
r := httptest.NewRecorder()
httpError(r, fmt.Errorf("No such method"))
if r.Code != http.StatusNotFound {
t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
}
httpError(r, fmt.Errorf("This accound hasn't been activated"))
if r.Code != http.StatusForbidden {
t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
}
httpError(r, fmt.Errorf("Some error"))
if r.Code != http.StatusInternalServerError {
t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
}
}
func TestGetVersion(t *testing.T) {
tmp, err := utils.TestDirectory("")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
eng, err := engine.New(tmp)
if err != nil {
t.Fatal(err)
}
var called bool
eng.Register("version", func(job *engine.Job) engine.Status {
called = true
v := &engine.Env{}
v.SetJson("Version", "42.1")
v.Set("ApiVersion", "1.1.1.1.1")
v.Set("GoVersion", "2.42")
v.Set("Os", "Linux")
v.Set("Arch", "x86_64")
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/version", nil)
if err != nil {
t.Fatal(err)
}
// FIXME getting the version should require an actual running Server
if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if !called {
t.Fatalf("handler was not called")
}
out := engine.NewOutput()
v, err := out.AddEnv()
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(out, r.Body); err != nil {
t.Fatal(err)
}
out.Close()
expected := "42.1"
if result := v.Get("Version"); result != expected {
t.Errorf("Expected version %s, %s found", expected, result)
}
expected = "application/json"
if result := r.HeaderMap.Get("Content-Type"); result != expected {
t.Errorf("Expected Content-Type %s, %s found", expected, result)
}
}
func TestGetInfo(t *testing.T) {
tmp, err := utils.TestDirectory("")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
eng, err := engine.New(tmp)
if err != nil {
t.Fatal(err)
}
var called bool
eng.Register("info", func(job *engine.Job) engine.Status {
called = true
v := &engine.Env{}
v.SetInt("Containers", 1)
v.SetInt("Images", 42000)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/info", nil)
if err != nil {
t.Fatal(err)
}
// FIXME getting the version should require an actual running Server
if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if !called {
t.Fatalf("handler was not called")
}
out := engine.NewOutput()
i, err := out.AddEnv()
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(out, r.Body); err != nil {
t.Fatal(err)
}
out.Close()
{
expected := 42000
result := i.GetInt("Images")
if expected != result {
t.Fatalf("%#v\n", result)
}
}
{
expected := 1
result := i.GetInt("Containers")
if expected != result {
t.Fatalf("%#v\n", result)
}
}
{
expected := "application/json"
if result := r.HeaderMap.Get("Content-Type"); result != expected {
t.Fatalf("%#v\n", result)
}
}
}

124
archive.go Normal file
View File

@@ -0,0 +1,124 @@
package docker
import (
"errors"
"io"
"io/ioutil"
"os"
"os/exec"
)
type Archive io.Reader
type Compression uint32
const (
Uncompressed Compression = iota
Bzip2
Gzip
Xz
)
func (compression *Compression) Flag() string {
switch *compression {
case Bzip2:
return "j"
case Gzip:
return "z"
case Xz:
return "J"
}
return ""
}
func Tar(path string, compression Compression) (io.Reader, error) {
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-c"+compression.Flag(), ".")
return CmdStream(cmd)
}
func Untar(archive io.Reader, path string) error {
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-x")
cmd.Stdin = archive
output, err := cmd.CombinedOutput()
if err != nil {
return errors.New(err.Error() + ": " + string(output))
}
return nil
}
// CmdStream executes a command, and returns its stdout as a stream.
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr.
func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
pipeR, pipeW := io.Pipe()
errChan := make(chan []byte)
// Collect stderr, we will use it in case of an error
go func() {
errText, e := ioutil.ReadAll(stderr)
if e != nil {
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
}
errChan <- errText
}()
// Copy stdout to the returned pipe
go func() {
_, err := io.Copy(pipeW, stdout)
if err != nil {
pipeW.CloseWithError(err)
}
errText := <-errChan
if err := cmd.Wait(); err != nil {
pipeW.CloseWithError(errors.New(err.Error() + ": " + string(errText)))
} else {
pipeW.Close()
}
}()
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, err
}
return pipeR, nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
f, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size()
return &TempArchive{f, size}, nil
}
type TempArchive struct {
*os.File
Size int64 // Pre-computed from Stat().Size() as a convenience
}
func (archive *TempArchive) Read(data []byte) (int, error) {
n, err := archive.File.Read(data)
if err != nil {
os.Remove(archive.File.Name())
}
return n, err
}

View File

@@ -1 +0,0 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)

View File

@@ -1,645 +0,0 @@
package archive
import (
"bytes"
"compress/bzip2"
"compress/gzip"
"errors"
"fmt"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"syscall"
)
type (
Archive io.ReadCloser
ArchiveReader io.Reader
Compression int
TarOptions struct {
Includes []string
Compression Compression
}
)
var (
ErrNotImplemented = errors.New("Function not implemented")
)
const (
Uncompressed Compression = iota
Bzip2
Gzip
Xz
)
func DetectCompression(source []byte) Compression {
sourceLen := len(source)
for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68},
Gzip: {0x1F, 0x8B, 0x08},
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
fail := false
if len(m) > sourceLen {
utils.Debugf("Len too short")
continue
}
i := 0
for _, b := range m {
if b != source[i] {
fail = true
break
}
i++
}
if !fail {
return compression
}
}
return Uncompressed
}
func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
args := []string{"xz", "-d", "-c", "-q"}
return CmdStream(exec.Command(args[0], args[1:]...), archive)
}
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
buf := make([]byte, 10)
totalN := 0
for totalN < 10 {
n, err := archive.Read(buf[totalN:])
if err != nil {
if err == io.EOF {
return nil, fmt.Errorf("Tarball too short")
}
return nil, err
}
totalN += n
utils.Debugf("[tar autodetect] n: %d", n)
}
compression := DetectCompression(buf)
wrap := io.MultiReader(bytes.NewReader(buf), archive)
switch compression {
case Uncompressed:
return ioutil.NopCloser(wrap), nil
case Gzip:
return gzip.NewReader(wrap)
case Bzip2:
return ioutil.NopCloser(bzip2.NewReader(wrap)), nil
case Xz:
return xzDecompress(wrap)
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
switch compression {
case Uncompressed:
return utils.NopWriteCloser(dest), nil
case Gzip:
return gzip.NewWriter(dest), nil
case Bzip2, Xz:
// archive/bzip2 does not support writing, and there is no xz support at all
// However, this is not a problem as docker only currently generates gzipped tars
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
func (compression *Compression) Extension() string {
switch *compression {
case Uncompressed:
return "tar"
case Bzip2:
return "tar.bz2"
case Gzip:
return "tar.gz"
case Xz:
return "tar.xz"
}
return ""
}
func addTarFile(path, name string, tw *tar.Writer) error {
fi, err := os.Lstat(path)
if err != nil {
return err
}
link := ""
if fi.Mode()&os.ModeSymlink != 0 {
if link, err = os.Readlink(path); err != nil {
return err
}
}
hdr, err := tar.FileInfoHeader(fi, link)
if err != nil {
return err
}
if fi.IsDir() && !strings.HasSuffix(name, "/") {
name = name + "/"
}
hdr.Name = name
stat, ok := fi.Sys().(*syscall.Stat_t)
if ok {
// Currently go does not fill in the major/minors
if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK ||
stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR {
hdr.Devmajor = int64(major(uint64(stat.Rdev)))
hdr.Devminor = int64(minor(uint64(stat.Rdev)))
}
}
capability, _ := system.Lgetxattr(path, "security.capability")
if capability != nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg {
if file, err := os.Open(path); err != nil {
return err
} else {
_, err := io.Copy(tw, file)
if err != nil {
return err
}
file.Close()
}
}
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
return err
}
}
case tar.TypeReg, tar.TypeRegA:
// Source is regular file
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
if err != nil {
return err
}
if _, err := io.Copy(file, reader); err != nil {
file.Close()
return err
}
file.Close()
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= syscall.S_IFBLK
case tar.TypeChar:
mode |= syscall.S_IFCHR
case tar.TypeFifo:
mode |= syscall.S_IFIFO
}
if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
return err
}
case tar.TypeLink:
if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil {
return err
}
case tar.TypeSymlink:
if err := os.Symlink(hdr.Linkname, path); err != nil {
return err
}
case tar.TypeXGlobalHeader:
utils.Debugf("PAX Global Extended Headers found and ignored")
return nil
default:
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
}
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
return err
}
for key, value := range hdr.Xattrs {
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
return err
}
}
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
if hdr.Typeflag != tar.TypeSymlink {
if err := system.UtimesNano(path, ts); err != nil {
return err
}
} else {
if err := system.LUtimesNano(path, ts); err != nil {
return err
}
}
return nil
}
// Tar creates an archive from the directory at `path`, and returns it as a
// stream of bytes.
func Tar(path string, compression Compression) (io.ReadCloser, error) {
return TarFilter(path, &TarOptions{Compression: compression})
}
func escapeName(name string) string {
escaped := make([]byte, 0)
for i, c := range []byte(name) {
if i == 0 && c == '/' {
continue
}
// all printable chars except "-" which is 0x2d
if (0x20 <= c && c <= 0x7E) && c != 0x2d {
escaped = append(escaped, c)
} else {
escaped = append(escaped, fmt.Sprintf("\\%03o", c)...)
}
}
return string(escaped)
}
// Tar creates an archive from the directory at `path`, only including files whose relative
// paths are included in `filter`. If `filter` is nil, then all files are included.
func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(pipeWriter, options.Compression)
if err != nil {
return nil, err
}
tw := tar.NewWriter(compressWriter)
go func() {
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
if options.Includes == nil {
options.Includes = []string{"."}
}
for _, include := range options.Includes {
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
if err != nil {
utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
return nil
}
relFilePath, err := filepath.Rel(srcPath, filePath)
if err != nil {
return nil
}
if err := addTarFile(filePath, relFilePath, tw); err != nil {
utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
}
return nil
})
}
// Make sure to check the error on Close.
if err := tw.Close(); err != nil {
utils.Debugf("Can't close tar writer: %s\n", err)
}
if err := compressWriter.Close(); err != nil {
utils.Debugf("Can't close compress writer: %s\n", err)
}
if err := pipeWriter.Close(); err != nil {
utils.Debugf("Can't close pipe writer: %s\n", err)
}
}()
return pipeReader, nil
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `path`.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(archive io.Reader, dest string, options *TarOptions) error {
if archive == nil {
return fmt.Errorf("Empty archive")
}
decompressedArchive, err := DecompressStream(archive)
if err != nil {
return err
}
defer decompressedArchive.Close()
tr := tar.NewReader(decompressedArchive)
var dirs []*tar.Header
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return err
}
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
if !strings.HasSuffix(hdr.Name, "/") {
// Not the root directory, ensure that the parent directory exists
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = os.MkdirAll(parentPath, 0777)
if err != nil {
return err
}
}
}
path := filepath.Join(dest, hdr.Name)
// If path exits we almost always just want to remove and replace it
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return err
}
}
}
if err := createTarFile(path, dest, hdr, tr); err != nil {
return err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
if err := syscall.UtimesNano(path, ts); err != nil {
return err
}
}
return nil
}
// TarUntar is a convenience function which calls Tar and Untar, with
// the output of one piped into the other. If either Tar or Untar fails,
// TarUntar aborts and returns the error.
func TarUntar(src string, dst string) error {
utils.Debugf("TarUntar(%s %s)", src, dst)
archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed})
if err != nil {
return err
}
defer archive.Close()
return Untar(archive, dst, nil)
}
// UntarPath is a convenience function which looks for an archive
// at filesystem path `src`, and unpacks it at `dst`.
func UntarPath(src, dst string) error {
archive, err := os.Open(src)
if err != nil {
return err
}
defer archive.Close()
if err := Untar(archive, dst, nil); err != nil {
return err
}
return nil
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
//
func CopyWithTar(src, dst string) error {
srcSt, err := os.Stat(src)
if err != nil {
return err
}
if !srcSt.IsDir() {
return CopyFileWithTar(src, dst)
}
// Create dst, copy src's content into it
utils.Debugf("Creating dest directory: %s", dst)
if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
return err
}
utils.Debugf("Calling TarUntar(%s, %s)", src, dst)
return TarUntar(src, dst)
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
//
// If `dst` ends with a trailing slash '/', the final destination path
// will be `dst/base(src)`.
func CopyFileWithTar(src, dst string) (err error) {
utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
srcSt, err := os.Stat(src)
if err != nil {
return err
}
if srcSt.IsDir() {
return fmt.Errorf("Can't copy a directory")
}
// Clean up the trailing /
if dst[len(dst)-1] == '/' {
dst = path.Join(dst, filepath.Base(src))
}
// Create the holding directory if necessary
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
return err
}
r, w := io.Pipe()
errC := utils.Go(func() error {
defer w.Close()
srcF, err := os.Open(src)
if err != nil {
return err
}
defer srcF.Close()
tw := tar.NewWriter(w)
hdr, err := tar.FileInfoHeader(srcSt, "")
if err != nil {
return err
}
hdr.Name = filepath.Base(dst)
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := io.Copy(tw, srcF); err != nil {
return err
}
tw.Close()
return nil
})
defer func() {
if er := <-errC; err != nil {
err = er
}
}()
return Untar(r, filepath.Dir(dst), nil)
}
// CmdStream executes a command, and returns its stdout as a stream.
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr.
func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
if input != nil {
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
// Write stdin if any
go func() {
io.Copy(stdin, input)
stdin.Close()
}()
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
pipeR, pipeW := io.Pipe()
errChan := make(chan []byte)
// Collect stderr, we will use it in case of an error
go func() {
errText, e := ioutil.ReadAll(stderr)
if e != nil {
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
}
errChan <- errText
}()
// Copy stdout to the returned pipe
go func() {
_, err := io.Copy(pipeW, stdout)
if err != nil {
pipeW.CloseWithError(err)
}
errText := <-errChan
if err := cmd.Wait(); err != nil {
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
} else {
pipeW.Close()
}
}()
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, err
}
return pipeR, nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
f, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
if err = f.Sync(); err != nil {
return nil, err
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size()
return &TempArchive{f, size}, nil
}
type TempArchive struct {
*os.File
Size int64 // Pre-computed from Stat().Size() as a convenience
}
func (archive *TempArchive) Read(data []byte) (int, error) {
n, err := archive.File.Read(data)
if err != nil {
os.Remove(archive.File.Name())
}
return n, err
}

View File

@@ -1,139 +0,0 @@
package archive
import (
"bytes"
"fmt"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"testing"
"time"
)
func TestCmdStreamLargeStderr(t *testing.T) {
cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
out, err := CmdStream(cmd, nil)
if err != nil {
t.Fatalf("Failed to start command: %s", err)
}
errCh := make(chan error)
go func() {
_, err := io.Copy(ioutil.Discard, out)
errCh <- err
}()
select {
case err := <-errCh:
if err != nil {
t.Fatalf("Command should not have failed (err=%.100s...)", err)
}
case <-time.After(5 * time.Second):
t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
}
}
func TestCmdStreamBad(t *testing.T) {
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
out, err := CmdStream(badCmd, nil)
if err != nil {
t.Fatalf("Failed to start command: %s", err)
}
if output, err := ioutil.ReadAll(out); err == nil {
t.Fatalf("Command should have failed")
} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
t.Fatalf("Wrong error value (%s)", err)
} else if s := string(output); s != "hello\n" {
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
}
}
func TestCmdStreamGood(t *testing.T) {
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
out, err := CmdStream(cmd, nil)
if err != nil {
t.Fatal(err)
}
if output, err := ioutil.ReadAll(out); err != nil {
t.Fatalf("Command should not have failed (err=%s)", err)
} else if s := string(output); s != "hello\n" {
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
}
}
func tarUntar(t *testing.T, origin string, compression Compression) error {
archive, err := Tar(origin, compression)
if err != nil {
t.Fatal(err)
}
defer archive.Close()
buf := make([]byte, 10)
if _, err := archive.Read(buf); err != nil {
return err
}
wrap := io.MultiReader(bytes.NewReader(buf), archive)
detectedCompression := DetectCompression(buf)
if detectedCompression.Extension() != compression.Extension() {
return fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
}
tmp, err := ioutil.TempDir("", "docker-test-untar")
if err != nil {
return err
}
defer os.RemoveAll(tmp)
if err := Untar(wrap, tmp, nil); err != nil {
return err
}
if _, err := os.Stat(tmp); err != nil {
return err
}
changes, err := ChangesDirs(origin, tmp)
if err != nil {
return err
}
if len(changes) != 0 {
t.Fatalf("Unexpected differences after tarUntar: %v", changes)
}
return nil
}
func TestTarUntar(t *testing.T) {
origin, err := ioutil.TempDir("", "docker-test-untar-origin")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(origin)
if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
t.Fatal(err)
}
for _, c := range []Compression{
Uncompressed,
Gzip,
} {
if err := tarUntar(t, origin, c); err != nil {
t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
}
}
}
// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz
// use PAX Global Extended Headers.
// Failing prevents the archives from being uncompressed during ADD
func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
err := createTarFile("pax_global_header", "some_dir", &hdr, nil)
if err != nil {
t.Fatal(err)
}
}

View File

@@ -1,369 +0,0 @@
package archive
import (
"bytes"
"fmt"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"os"
"path/filepath"
"strings"
"syscall"
"time"
)
type ChangeType int
const (
ChangeModify = iota
ChangeAdd
ChangeDelete
)
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
var kind string
switch change.Kind {
case ChangeModify:
kind = "C"
case ChangeAdd:
kind = "A"
case ChangeDelete:
kind = "D"
}
return fmt.Sprintf("%s %s", kind, change.Path)
}
// Gnu tar and the go tar writer don't have sub-second mtime
// precision, which is problematic when we apply changes via tar
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
func Changes(layers []string, rw string) ([]Change, error) {
var changes []Change
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
path = filepath.Join("/", path)
// Skip root
if path == "/" {
return nil
}
// Skip AUFS metadata
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
return err
}
change := Change{
Path: path,
}
// Find out what kind of modification happened
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, ".wh.") {
originalFile := file[len(".wh."):]
change.Path = filepath.Join(filepath.Dir(path), originalFile)
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
type FileInfo struct {
parent *FileInfo
name string
stat syscall.Stat_t
children map[string]*FileInfo
capability []byte
}
func (root *FileInfo) LookUp(path string) *FileInfo {
parent := root
if path == "/" {
return root
}
pathElements := strings.Split(path, "/")
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
return "/"
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild, _ := oldChildren[name]
if oldChild != nil {
// change?
oldStat := &oldChild.stat
newStat := &newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if oldStat.Mode != newStat.Mode ||
oldStat.Uid != newStat.Uid ||
oldStat.Gid != newStat.Gid ||
oldStat.Rdev != newStat.Rdev ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
!sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) ||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
}
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
root := &FileInfo{
name: "/",
children: make(map[string]*FileInfo),
}
return root
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
relPath = filepath.Join("/", relPath)
if relPath == "/" {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
if err := syscall.Lstat(path, &info.stat); err != nil {
return err
}
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}
// Compare two directories and generate an array of Change objects describing the changes
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
oldRoot, err := collectFileInfo(oldDir)
if err != nil {
return nil, err
}
newRoot, err := collectFileInfo(newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
func ChangesSize(newDir string, changes []Change) int64 {
var size int64
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, _ := os.Lstat(file)
if fileInfo != nil && !fileInfo.IsDir() {
size += fileInfo.Size()
}
}
}
return size
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
func minor(device uint64) uint64 {
return (device & 0xff) | ((device >> 12) & 0xfff00)
}
func ExportChanges(dir string, changes []Change) (Archive, error) {
reader, writer := io.Pipe()
tw := tar.NewWriter(writer)
go func() {
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: time.Now(),
AccessTime: time.Now(),
ChangeTime: time.Now(),
}
if err := tw.WriteHeader(hdr); err != nil {
utils.Debugf("Can't write whiteout header: %s\n", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := addTarFile(path, change.Path[1:], tw); err != nil {
utils.Debugf("Can't add file %s to tar: %s\n", path, err)
}
}
}
// Make sure to check the error on Close.
if err := tw.Close(); err != nil {
utils.Debugf("Can't close layer: %s\n", err)
}
writer.Close()
}()
return reader, nil
}

View File

@@ -1,301 +0,0 @@
package archive
import (
"io/ioutil"
"os"
"os/exec"
"path"
"sort"
"testing"
"time"
)
func max(x, y int) int {
if x >= y {
return x
}
return y
}
func copyDir(src, dst string) error {
cmd := exec.Command("cp", "-a", src, dst)
if err := cmd.Run(); err != nil {
return err
}
return nil
}
// Helper to sort []Change by path
type byPath struct{ changes []Change }
func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path }
func (b byPath) Len() int { return len(b.changes) }
func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] }
type FileType uint32
const (
Regular FileType = iota
Dir
Symlink
)
type FileData struct {
filetype FileType
path string
contents string
permissions os.FileMode
}
func createSampleDir(t *testing.T, root string) {
files := []FileData{
{Regular, "file1", "file1\n", 0600},
{Regular, "file2", "file2\n", 0666},
{Regular, "file3", "file3\n", 0404},
{Regular, "file4", "file4\n", 0600},
{Regular, "file5", "file5\n", 0600},
{Regular, "file6", "file6\n", 0600},
{Regular, "file7", "file7\n", 0600},
{Dir, "dir1", "", 0740},
{Regular, "dir1/file1-1", "file1-1\n", 01444},
{Regular, "dir1/file1-2", "file1-2\n", 0666},
{Dir, "dir2", "", 0700},
{Regular, "dir2/file2-1", "file2-1\n", 0666},
{Regular, "dir2/file2-2", "file2-2\n", 0666},
{Dir, "dir3", "", 0700},
{Regular, "dir3/file3-1", "file3-1\n", 0666},
{Regular, "dir3/file3-2", "file3-2\n", 0666},
{Dir, "dir4", "", 0700},
{Regular, "dir4/file3-1", "file4-1\n", 0666},
{Regular, "dir4/file3-2", "file4-2\n", 0666},
{Symlink, "symlink1", "target1", 0666},
{Symlink, "symlink2", "target2", 0666},
}
now := time.Now()
for _, info := range files {
p := path.Join(root, info.path)
if info.filetype == Dir {
if err := os.MkdirAll(p, info.permissions); err != nil {
t.Fatal(err)
}
} else if info.filetype == Regular {
if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
t.Fatal(err)
}
} else if info.filetype == Symlink {
if err := os.Symlink(info.contents, p); err != nil {
t.Fatal(err)
}
}
if info.filetype != Symlink {
// Set a consistent ctime, atime for all files and dirs
if err := os.Chtimes(p, now, now); err != nil {
t.Fatal(err)
}
}
}
}
// Create an directory, copy it, make sure we report no changes between the two
func TestChangesDirsEmpty(t *testing.T) {
src, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
createSampleDir(t, src)
dst := src + "-copy"
if err := copyDir(src, dst); err != nil {
t.Fatal(err)
}
changes, err := ChangesDirs(dst, src)
if err != nil {
t.Fatal(err)
}
if len(changes) != 0 {
t.Fatalf("Reported changes for identical dirs: %v", changes)
}
os.RemoveAll(src)
os.RemoveAll(dst)
}
func mutateSampleDir(t *testing.T, root string) {
// Remove a regular file
if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
t.Fatal(err)
}
// Remove a directory
if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
t.Fatal(err)
}
// Remove a symlink
if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
t.Fatal(err)
}
// Rewrite a file
if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
t.Fatal(err)
}
// Replace a file
if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
t.Fatal(err)
}
// Touch file
if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
t.Fatal(err)
}
// Replace file with dir
if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
t.Fatal(err)
}
if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
t.Fatal(err)
}
// Create new file
if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
t.Fatal(err)
}
// Create new dir
if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
t.Fatal(err)
}
// Create a new symlink
if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
t.Fatal(err)
}
// Change a symlink
if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
t.Fatal(err)
}
if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
t.Fatal(err)
}
// Replace dir with file
if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
t.Fatal(err)
}
// Touch dir
if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
t.Fatal(err)
}
}
func TestChangesDirsMutated(t *testing.T) {
src, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
createSampleDir(t, src)
dst := src + "-copy"
if err := copyDir(src, dst); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(src)
defer os.RemoveAll(dst)
mutateSampleDir(t, dst)
changes, err := ChangesDirs(dst, src)
if err != nil {
t.Fatal(err)
}
sort.Sort(byPath{changes})
expectedChanges := []Change{
{"/dir1", ChangeDelete},
{"/dir2", ChangeModify},
{"/dir3", ChangeModify},
{"/dirnew", ChangeAdd},
{"/file1", ChangeDelete},
{"/file2", ChangeModify},
{"/file3", ChangeModify},
{"/file4", ChangeModify},
{"/file5", ChangeModify},
{"/filenew", ChangeAdd},
{"/symlink1", ChangeDelete},
{"/symlink2", ChangeModify},
{"/symlinknew", ChangeAdd},
}
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
if i >= len(expectedChanges) {
t.Fatalf("unexpected change %s\n", changes[i].String())
}
if i >= len(changes) {
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
}
if changes[i].Path == expectedChanges[i].Path {
if changes[i] != expectedChanges[i] {
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
}
} else if changes[i].Path < expectedChanges[i].Path {
t.Fatalf("unexpected change %s\n", changes[i].String())
} else {
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
}
}
}
func TestApplyLayer(t *testing.T) {
src, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
createSampleDir(t, src)
defer os.RemoveAll(src)
dst := src + "-copy"
if err := copyDir(src, dst); err != nil {
t.Fatal(err)
}
mutateSampleDir(t, dst)
defer os.RemoveAll(dst)
changes, err := ChangesDirs(dst, src)
if err != nil {
t.Fatal(err)
}
layer, err := ExportChanges(dst, changes)
if err != nil {
t.Fatal(err)
}
layerCopy, err := NewTempArchive(layer, "")
if err != nil {
t.Fatal(err)
}
if err := ApplyLayer(src, layerCopy); err != nil {
t.Fatal(err)
}
changes2, err := ChangesDirs(src, dst)
if err != nil {
t.Fatal(err)
}
if len(changes2) != 0 {
t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
}
}

View File

@@ -1,160 +0,0 @@
package archive
import (
"fmt"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"time"
)
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor
func mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return syscall.NsecToTimespec(time.UnixNano())
}
// ApplyLayer parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`.
func ApplyLayer(dest string, layer ArchiveReader) error {
// We need to be able to set any perms
oldmask := syscall.Umask(0)
defer syscall.Umask(oldmask)
layer, err := DecompressStream(layer)
if err != nil {
return err
}
tr := tar.NewReader(layer)
var dirs []*tar.Header
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return err
}
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
if !strings.HasSuffix(hdr.Name, "/") {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = os.MkdirAll(parentPath, 600)
if err != nil {
return err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, ".wh..wh.") {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr); err != nil {
return err
}
}
continue
}
path := filepath.Join(dest, hdr.Name)
base := filepath.Base(path)
if strings.HasPrefix(base, ".wh.") {
originalBase := base[len(".wh."):]
originalPath := filepath.Join(filepath.Dir(path), originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return err
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return err
}
}
}
srcData := io.Reader(tr)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := createTarFile(path, dest, srcHdr, srcData); err != nil {
return err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
if err := syscall.UtimesNano(path, ts); err != nil {
return err
}
}
return nil
}

View File

@@ -1,59 +0,0 @@
package archive
import (
"bytes"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io/ioutil"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (Archive, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return ioutil.NopCloser(buf), nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

77
archive_test.go Normal file
View File

@@ -0,0 +1,77 @@
package docker
import (
"io"
"io/ioutil"
"os"
"os/exec"
"testing"
"time"
)
func TestCmdStreamLargeStderr(t *testing.T) {
cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
out, err := CmdStream(cmd)
if err != nil {
t.Fatalf("Failed to start command: " + err.Error())
}
errCh := make(chan error)
go func() {
_, err := io.Copy(ioutil.Discard, out)
errCh <- err
}()
select {
case err := <-errCh:
if err != nil {
t.Fatalf("Command should not have failed (err=%s...)", err.Error()[:100])
}
case <-time.After(5 * time.Second):
t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
}
}
func TestCmdStreamBad(t *testing.T) {
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
out, err := CmdStream(badCmd)
if err != nil {
t.Fatalf("Failed to start command: " + err.Error())
}
if output, err := ioutil.ReadAll(out); err == nil {
t.Fatalf("Command should have failed")
} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
t.Fatalf("Wrong error value (%s)", err.Error())
} else if s := string(output); s != "hello\n" {
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
}
}
func TestCmdStreamGood(t *testing.T) {
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
out, err := CmdStream(cmd)
if err != nil {
t.Fatal(err)
}
if output, err := ioutil.ReadAll(out); err != nil {
t.Fatalf("Command should not have failed (err=%s)", err)
} else if s := string(output); s != "hello\n" {
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
}
}
func TestTarUntar(t *testing.T) {
archive, err := Tar(".", Uncompressed)
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-untar")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
if err := Untar(archive, tmp); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(tmp); err != nil {
t.Fatalf("Error stating %s: %s", tmp, err.Error())
}
}

168
auth/auth.go Normal file
View File

@@ -0,0 +1,168 @@
package auth
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"strings"
)
// Where we store the config file
const CONFIGFILE = ".dockercfg"
// the registry server we want to login against
const REGISTRY_SERVER = "https://registry.docker.io"
type AuthConfig struct {
Username string `json:"username"`
Password string `json:"password"`
Email string `json:"email"`
rootPath string `json:-`
}
func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
return &AuthConfig{
Username: username,
Password: password,
Email: email,
rootPath: rootPath,
}
}
// create a base64 encoded auth string to store in config
func EncodeAuth(authConfig *AuthConfig) string {
authStr := authConfig.Username + ":" + authConfig.Password
msg := []byte(authStr)
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
base64.StdEncoding.Encode(encoded, msg)
return string(encoded)
}
// decode the auth string
func DecodeAuth(authStr string) (*AuthConfig, error) {
decLen := base64.StdEncoding.DecodedLen(len(authStr))
decoded := make([]byte, decLen)
authByte := []byte(authStr)
n, err := base64.StdEncoding.Decode(decoded, authByte)
if err != nil {
return nil, err
}
if n > decLen {
return nil, fmt.Errorf("Something went wrong decoding auth config")
}
arr := strings.Split(string(decoded), ":")
if len(arr) != 2 {
return nil, fmt.Errorf("Invalid auth configuration file")
}
password := strings.Trim(arr[1], "\x00")
return &AuthConfig{Username: arr[0], Password: password}, nil
}
// load up the auth config information and return values
// FIXME: use the internal golang config parser
func LoadConfig(rootPath string) (*AuthConfig, error) {
confFile := path.Join(rootPath, CONFIGFILE)
if _, err := os.Stat(confFile); err != nil {
return &AuthConfig{}, fmt.Errorf("The Auth config file is missing")
}
b, err := ioutil.ReadFile(confFile)
if err != nil {
return nil, err
}
arr := strings.Split(string(b), "\n")
origAuth := strings.Split(arr[0], " = ")
origEmail := strings.Split(arr[1], " = ")
authConfig, err := DecodeAuth(origAuth[1])
if err != nil {
return nil, err
}
authConfig.Email = origEmail[1]
authConfig.rootPath = rootPath
return authConfig, nil
}
// save the auth config
func saveConfig(rootPath, authStr string, email string) error {
lines := "auth = " + authStr + "\n" + "email = " + email + "\n"
b := []byte(lines)
err := ioutil.WriteFile(path.Join(rootPath, CONFIGFILE), b, 0600)
if err != nil {
return err
}
return nil
}
// try to register/login to the registry server
func Login(authConfig *AuthConfig) (string, error) {
storeConfig := false
reqStatusCode := 0
var status string
var errMsg string
var reqBody []byte
jsonBody, err := json.Marshal(authConfig)
if err != nil {
errMsg = fmt.Sprintf("Config Error: %s", err)
return "", errors.New(errMsg)
}
// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
b := strings.NewReader(string(jsonBody))
req1, err := http.Post(REGISTRY_SERVER+"/v1/users", "application/json; charset=utf-8", b)
if err != nil {
errMsg = fmt.Sprintf("Server Error: %s", err)
return "", errors.New(errMsg)
}
reqStatusCode = req1.StatusCode
defer req1.Body.Close()
reqBody, err = ioutil.ReadAll(req1.Body)
if err != nil {
errMsg = fmt.Sprintf("Server Error: [%#v] %s", reqStatusCode, err)
return "", errors.New(errMsg)
}
if reqStatusCode == 201 {
status = "Account Created\n"
storeConfig = true
} else if reqStatusCode == 400 {
// FIXME: This should be 'exists', not 'exist'. Need to change on the server first.
if string(reqBody) == "Username or email already exist" {
client := &http.Client{}
req, err := http.NewRequest("GET", REGISTRY_SERVER+"/v1/users", nil)
req.SetBasicAuth(authConfig.Username, authConfig.Password)
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode == 200 {
status = "Login Succeeded\n"
storeConfig = true
} else {
status = fmt.Sprintf("Login: %s", body)
return "", errors.New(status)
}
} else {
status = fmt.Sprintf("Registration: %s", reqBody)
return "", errors.New(status)
}
} else {
status = fmt.Sprintf("[%s] : %s", reqStatusCode, reqBody)
return "", errors.New(status)
}
if storeConfig {
authStr := EncodeAuth(authConfig)
saveConfig(authConfig.rootPath, authStr, authConfig.Email)
}
return status, nil
}

23
auth/auth_test.go Normal file
View File

@@ -0,0 +1,23 @@
package auth
import (
"testing"
)
func TestEncodeAuth(t *testing.T) {
newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
authStr := EncodeAuth(newAuthConfig)
decAuthConfig, err := DecodeAuth(authStr)
if err != nil {
t.Fatal(err)
}
if newAuthConfig.Username != decAuthConfig.Username {
t.Fatal("Encode Username doesn't match decoded Username")
}
if newAuthConfig.Password != decAuthConfig.Password {
t.Fatal("Encode Password doesn't match decoded Password")
}
if authStr != "a2VuOnRlc3Q=" {
t.Fatal("AuthString encoding isn't correct.")
}
}

20
buildbot/README.rst Normal file
View File

@@ -0,0 +1,20 @@
Buildbot
========
Buildbot is a continuous integration system designed to automate the
build/test cycle. By automatically rebuilding and testing the tree each time
something has changed, build problems are pinpointed quickly, before other
developers are inconvenienced by the failure.
When running 'make hack' at the docker root directory, it spawns a virtual
machine in the background running a buildbot instance and adds a git
post-commit hook that automatically run docker tests for you.
You can check your buildbot instance at http://192.168.33.21:8010/waterfall
Buildbot dependencies
---------------------
vagrant, virtualbox packages and python package requests

28
buildbot/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,28 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
$BUILDBOT_IP = '192.168.33.21'
def v10(config)
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..'
config.vm.network :hostonly, $BUILDBOT_IP
# Ensure puppet is installed on the instance
config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet'
config.vm.provision :puppet do |puppet|
puppet.manifests_path = '.'
puppet.manifest_file = 'buildbot.pp'
puppet.options = ['--templatedir','.']
end
end
Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
v10(config)
end
Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
v10(config)
end

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Auto setup of buildbot configuration. Package installation is being done
# on buildbot.pp
# Dependencies: buildbot, buildbot-slave, supervisor
SLAVE_NAME='buildworker'
SLAVE_SOCKET='localhost:9989'
BUILDBOT_PWD='pass-docker'
USER='vagrant'
ROOT_PATH='/data/buildbot'
DOCKER_PATH='/data/docker'
BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg"
IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }')
function run { su $USER -c "$1"; }
export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin
# Exit if buildbot has already been installed
[ -d "$ROOT_PATH" ] && exit 0
# Setup buildbot
run "mkdir -p ${ROOT_PATH}"
cd ${ROOT_PATH}
run "buildbot create-master master"
run "cp $BUILDBOT_CFG/master.cfg master"
run "sed -i 's/localhost/$IP/' master/master.cfg"
run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
# Allow buildbot subprocesses (docker tests) to properly run in containers,
# in particular with docker -u
run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac"
# Setup supervisor
cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"`
# Add git hook
cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks
sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit

View File

@@ -0,0 +1,18 @@
[program:buildmaster]
command=su vagrant -c "buildbot start master"
directory=/data/buildbot
chown= root:root
redirect_stderr=true
stdout_logfile=/var/log/supervisor/buildbot-master.log
stderr_logfile=/var/log/supervisor/buildbot-master.log
[program:buildworker]
command=buildslave start slave
directory=/data/buildbot
chown= root:root
redirect_stderr=true
stdout_logfile=/var/log/supervisor/buildbot-slave.log
stderr_logfile=/var/log/supervisor/buildbot-slave.log
[group:buildbot]
programs=buildmaster,buildworker

View File

@@ -0,0 +1,46 @@
import os
from buildbot.buildslave import BuildSlave
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
from buildbot.steps.shell import ShellCommand
from buildbot.status import html
from buildbot.status.web import authz, auth
PORT_WEB = 8010 # Buildbot webserver port
PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
TEST_USER = 'buildbot' # Credential to authenticate build triggers
TEST_PWD = 'docker' # Credential to authenticate build triggers
BUILDER_NAME = 'docker'
BUILDPASSWORD = 'pass-docker' # Credential to authenticate buildworkers
DOCKER_PATH = '/data/docker'
c = BuildmasterConfig = {}
c['title'] = "Docker"
c['titleURL'] = "waterfall"
c['buildbotURL'] = "http://localhost:{0}/".format(PORT_WEB)
c['db'] = {'db_url':"sqlite:///state.sqlite"}
c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)]
c['slavePortnum'] = PORT_MASTER
c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
# Docker test command
test_cmd = """(
cd {0}/..; rm -rf docker-tmp; git clone docker docker-tmp;
cd docker-tmp; make test; exit_status=$?;
cd ..; rm -rf docker-tmp; exit $exit_status)""".format(DOCKER_PATH)
# Builder
factory = BuildFactory()
factory.addStep(ShellCommand(description='Docker',logEnviron=False,
usePTY=True,command=test_cmd))
c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
factory=factory)]
# Status
authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]),
forceBuild='auth')
c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env python
'''Trigger buildbot docker test build
post-commit git hook designed to automatically trigger buildbot on
the provided vagrant docker VM.'''
import requests
USERNAME = 'buildbot'
PASSWORD = 'docker'
BASE_URL = 'http://localhost:8010'
path = lambda s: BASE_URL + '/' + s
try:
session = requests.session()
session.post(path('login'),data={'username':USERNAME,'passwd':PASSWORD})
session.post(path('builders/docker/force'),
data={'forcescheduler':'trigger','reason':'Test commit'})
except:
pass

32
buildbot/buildbot.pp Normal file
View File

@@ -0,0 +1,32 @@
node default {
$USER = 'vagrant'
$ROOT_PATH = '/data/buildbot'
$DOCKER_PATH = '/data/docker'
exec {'apt_update': command => '/usr/bin/apt-get update' }
Package { require => Exec['apt_update'] }
group {'puppet': ensure => 'present'}
# Install dependencies
Package { ensure => 'installed' }
package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: }
file{[ '/data' ]:
owner => $USER, group => $USER, ensure => 'directory' }
file {'/var/tmp/requirements.txt':
content => template('requirements.txt') }
exec {'requirements':
require => [ Package['python-dev'], Package['python-pip'],
File['/var/tmp/requirements.txt'] ],
cwd => '/var/tmp',
command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt;
rm /var/tmp/requirements.txt)'" }
exec {'buildbot-cfg-sh':
require => [ Package['supervisor'], Exec['requirements']],
path => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin',
cwd => '/data',
command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" }
}

View File

@@ -0,0 +1,6 @@
sqlalchemy<=0.7.9
sqlalchemy-migrate>=0.7.2
buildbot==0.8.7p1
buildbot_slave==0.8.7p1
nose==1.2.1
requests==1.1.0

169
builder.go Normal file
View File

@@ -0,0 +1,169 @@
package docker
import (
"bufio"
"fmt"
"io"
"strings"
)
type Builder struct {
runtime *Runtime
}
func NewBuilder(runtime *Runtime) *Builder {
return &Builder{
runtime: runtime,
}
}
func (builder *Builder) Run(image *Image, cmd ...string) (*Container, error) {
// FIXME: pass a NopWriter instead of nil
config, err := ParseRun(append([]string{"-d", image.Id}, cmd...), nil, builder.runtime.capabilities)
if config.Image == "" {
return nil, fmt.Errorf("Image not specified")
}
if len(config.Cmd) == 0 {
return nil, fmt.Errorf("Command not specified")
}
if config.Tty {
return nil, fmt.Errorf("The tty mode is not supported within the builder")
}
// Create new container
container, err := builder.runtime.Create(config)
if err != nil {
return nil, err
}
if err := container.Start(); err != nil {
return nil, err
}
return container, nil
}
func (builder *Builder) Commit(container *Container, repository, tag, comment, author string) (*Image, error) {
return builder.runtime.Commit(container.Id, repository, tag, comment, author)
}
func (builder *Builder) clearTmp(containers, images map[string]struct{}) {
for c := range containers {
tmp := builder.runtime.Get(c)
builder.runtime.Destroy(tmp)
Debugf("Removing container %s", c)
}
for i := range images {
builder.runtime.graph.Delete(i)
Debugf("Removing image %s", i)
}
}
func (builder *Builder) Build(dockerfile io.Reader, stdout io.Writer) error {
var (
image, base *Image
tmpContainers map[string]struct{} = make(map[string]struct{})
tmpImages map[string]struct{} = make(map[string]struct{})
)
defer builder.clearTmp(tmpContainers, tmpImages)
file := bufio.NewReader(dockerfile)
for {
line, err := file.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
return err
}
line = strings.TrimSpace(line)
// Skip comments and empty line
if len(line) == 0 || line[0] == '#' {
continue
}
tmp := strings.SplitN(line, " ", 2)
if len(tmp) != 2 {
return fmt.Errorf("Invalid Dockerfile format")
}
switch tmp[0] {
case "from":
fmt.Fprintf(stdout, "FROM %s\n", tmp[1])
image, err = builder.runtime.repositories.LookupImage(tmp[1])
if err != nil {
return err
}
break
case "run":
fmt.Fprintf(stdout, "RUN %s\n", tmp[1])
if image == nil {
return fmt.Errorf("Please provide a source image with `from` prior to run")
}
// Create the container and start it
c, err := builder.Run(image, "/bin/sh", "-c", tmp[1])
if err != nil {
return err
}
tmpContainers[c.Id] = struct{}{}
// Wait for it to finish
if result := c.Wait(); result != 0 {
return fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", tmp[1], result)
}
// Commit the container
base, err = builder.Commit(c, "", "", "", "")
if err != nil {
return err
}
tmpImages[base.Id] = struct{}{}
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
break
case "copy":
if image == nil {
return fmt.Errorf("Please provide a source image with `from` prior to copy")
}
tmp2 := strings.SplitN(tmp[1], " ", 2)
if len(tmp) != 2 {
return fmt.Errorf("Invalid COPY format")
}
fmt.Fprintf(stdout, "COPY %s to %s in %s\n", tmp2[0], tmp2[1], base.ShortId())
file, err := Download(tmp2[0], stdout)
if err != nil {
return err
}
defer file.Body.Close()
c, err := builder.Run(base, "echo", "insert", tmp2[0], tmp2[1])
if err != nil {
return err
}
if err := c.Inject(file.Body, tmp2[1]); err != nil {
return err
}
base, err = builder.Commit(c, "", "", "", "")
if err != nil {
return err
}
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
break
default:
fmt.Fprintf(stdout, "Skipping unknown op %s\n", tmp[0])
}
}
if base != nil {
// The build is successful, keep the temporary containers and images
for i := range tmpImages {
delete(tmpImages, i)
}
for i := range tmpContainers {
delete(tmpContainers, i)
}
fmt.Fprintf(stdout, "Build finished. image id: %s\n", base.ShortId())
} else {
fmt.Fprintf(stdout, "An error occured during the build\n")
}
return nil
}

View File

@@ -1,38 +0,0 @@
package builtins
import (
api "github.com/dotcloud/docker/api/server"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/runtime/networkdriver/bridge"
"github.com/dotcloud/docker/server"
)
func Register(eng *engine.Engine) {
daemon(eng)
remote(eng)
}
// remote: a RESTful api for cross-docker communication
func remote(eng *engine.Engine) {
eng.Register("serveapi", api.ServeApi)
}
// daemon: a default execution and storage backend for Docker on Linux,
// with the following underlying components:
//
// * Pluggable storage drivers including aufs, vfs, lvm and btrfs.
// * Pluggable execution drivers including lxc and chroot.
//
// In practice `daemon` still includes most core Docker components, including:
//
// * The reference registry client implementation
// * Image management
// * The build facility
// * Logging
//
// These components should be broken off into plugins of their own.
//
func daemon(eng *engine.Engine) {
eng.Register("initserver", server.InitServer)
eng.Register("init_networkdriver", bridge.InitDriver)
}

106
changes.go Normal file
View File

@@ -0,0 +1,106 @@
package docker
import (
"fmt"
"os"
"path/filepath"
"strings"
)
type ChangeType int
const (
ChangeModify = iota
ChangeAdd
ChangeDelete
)
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
var kind string
switch change.Kind {
case ChangeModify:
kind = "C"
case ChangeAdd:
kind = "A"
case ChangeDelete:
kind = "D"
}
return fmt.Sprintf("%s %s", kind, change.Path)
}
func Changes(layers []string, rw string) ([]Change, error) {
var changes []Change
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
path = filepath.Join("/", path)
// Skip root
if path == "/" {
return nil
}
// Skip AUFS metadata
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
return err
}
change := Change{
Path: path,
}
// Find out what kind of modification happened
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, ".wh.") {
originalFile := strings.TrimLeft(file, ".wh.")
change.Path = filepath.Join(filepath.Dir(path), originalFile)
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && f.ModTime() == stat.ModTime() {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil {
return nil, err
}
return changes, nil
}

1083
commands.go Normal file

File diff suppressed because it is too large Load Diff

397
commands_test.go Normal file
View File

@@ -0,0 +1,397 @@
package docker
import (
"bufio"
"fmt"
"github.com/dotcloud/docker/rcli"
"io"
"io/ioutil"
"strings"
"testing"
"time"
)
func closeWrap(args ...io.Closer) error {
e := false
ret := fmt.Errorf("Error closing elements")
for _, c := range args {
if err := c.Close(); err != nil {
e = true
ret = fmt.Errorf("%s\n%s", ret, err)
}
}
if e {
return ret
}
return nil
}
func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
c := make(chan bool)
// Make sure we are not too long
go func() {
time.Sleep(d)
c <- true
}()
go func() {
f()
c <- false
}()
if <-c {
t.Fatal(msg)
}
}
func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error {
for i := 0; i < count; i++ {
if _, err := w.Write([]byte(input)); err != nil {
return err
}
o, err := bufio.NewReader(r).ReadString('\n')
if err != nil {
return err
}
if strings.Trim(o, " \r\n") != output {
return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", output, o)
}
}
return nil
}
func cmdWait(srv *Server, container *Container) error {
stdout, stdoutPipe := io.Pipe()
go func() {
srv.CmdWait(nil, stdoutPipe, container.Id)
}()
if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
return err
}
// Cleanup pipes
return closeWrap(stdout, stdoutPipe)
}
// TestRunHostname checks that 'docker run -h' correctly sets a custom hostname
func TestRunHostname(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, _ := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c := make(chan struct{})
go func() {
if err := srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-h", "foobar", GetTestImage(runtime).Id, "hostname"); err != nil {
t.Fatal(err)
}
close(c)
}()
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if cmdOutput != "foobar\n" {
t.Fatalf("'hostname' should display '%s', not '%s'", "foobar\n", cmdOutput)
}
setTimeout(t, "CmdRun timed out", 2*time.Second, func() {
<-c
cmdWait(srv, srv.runtime.List()[0])
})
}
func TestRunExit(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
container := runtime.List()[0]
// Closing /bin/cat stdin, expect it to exit
p, err := container.StdinPipe()
if err != nil {
t.Fatal(err)
}
if err := p.Close(); err != nil {
t.Fatal(err)
}
// as the process exited, CmdRun must finish and unblock. Wait for it
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-c1
cmdWait(srv, container)
})
// Make sure that the client has been disconnected
setTimeout(t, "The client should have been disconnected once the remote process exited.", 2*time.Second, func() {
// Expecting pipe i/o error, just check that read does not block
stdin.Read([]byte{})
})
// Cleanup pipes
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
}
// Expected behaviour: the process dies when the client disconnects
func TestRunDisconnect(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdRun returns.
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (simulate disconnect)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// as the pipes are close, we expect the process to die,
// therefore CmdRun to unblock. Wait for CmdRun
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-c1
})
// Client disconnect after run -i should cause stdin to be closed, which should
// cause /bin/cat to exit.
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
container := runtime.List()[0]
container.Wait()
if container.State.Running {
t.Fatalf("/bin/cat is still running after closing stdin")
}
})
}
// Expected behaviour: the process dies when the client disconnects
func TestRunDisconnectTty(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdRun returns.
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", "-t", GetTestImage(runtime).Id, "/bin/cat")
close(c1)
}()
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for {
// Client disconnect after run -i should keep stdin out in TTY mode
l := runtime.List()
if len(l) == 1 && l[0].State.Running {
break
}
time.Sleep(10 * time.Millisecond)
}
})
// Client disconnect after run -i should keep stdin out in TTY mode
container := runtime.List()[0]
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (simulate disconnect)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// In tty mode, we expect the process to stay alive even after client's stdin closes.
// Do not wait for run to finish
// Give some time to monitor to do his thing
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)")
}
}
// TestAttachStdin checks attaching to stdin without stdout and stderr.
// 'docker run -i -a stdin' should sends the client's stdin to the command,
// then detach from it and print the container id.
func TestRunAttachStdin(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
ch := make(chan struct{})
go func() {
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", "-a", "stdin", GetTestImage(runtime).Id, "sh", "-c", "echo hello; cat")
close(ch)
}()
// Send input to the command, close stdin
setTimeout(t, "Write timed out", 2*time.Second, func() {
if _, err := stdinPipe.Write([]byte("hi there\n")); err != nil {
t.Fatal(err)
}
if err := stdinPipe.Close(); err != nil {
t.Fatal(err)
}
})
container := runtime.List()[0]
// Check output
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
if err != nil {
t.Fatal(err)
}
if cmdOutput != container.ShortId()+"\n" {
t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ShortId()+"\n", cmdOutput)
}
// wait for CmdRun to return
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-ch
})
setTimeout(t, "Waiting for command to exit timed out", 2*time.Second, func() {
container.Wait()
})
// Check logs
if cmdLogs, err := container.ReadLog("stdout"); err != nil {
t.Fatal(err)
} else {
if output, err := ioutil.ReadAll(cmdLogs); err != nil {
t.Fatal(err)
} else {
expectedLog := "hello\nhi there\n"
if string(output) != expectedLog {
t.Fatalf("Unexpected logs: should be '%s', not '%s'\n", expectedLog, output)
}
}
}
}
// Expected behaviour, the process stays alive when the client disconnects
func TestAttachDisconnect(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
srv := &Server{runtime: runtime}
container, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Memory: 33554432,
Cmd: []string{"/bin/cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
// Start the process
if err := container.Start(); err != nil {
t.Fatal(err)
}
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
// Attach to it
c1 := make(chan struct{})
go func() {
// We're simulating a disconnect so the return value doesn't matter. What matters is the
// fact that CmdAttach returns.
srv.CmdAttach(stdin, rcli.NewDockerLocalConn(stdoutPipe), container.Id)
close(c1)
}()
setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
}
})
// Close pipes (client disconnects)
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
t.Fatal(err)
}
// Wait for attach to finish, the client disconnected, therefore, Attach finished his job
setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() {
<-c1
})
// We closed stdin, expect /bin/cat to still be running
// Wait a little bit to make sure container.monitor() did his thing
err = container.WaitTimeout(500 * time.Millisecond)
if err == nil || !container.State.Running {
t.Fatalf("/bin/cat is not running after closing stdin")
}
// Try to avoid the timeoout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
}

816
container.go Normal file
View File

@@ -0,0 +1,816 @@
package docker
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/rcli"
"github.com/kr/pty"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"syscall"
"time"
)
type Container struct {
root string
Id string
Created time.Time
Path string
Args []string
Config *Config
State State
Image string
network *NetworkInterface
NetworkSettings *NetworkSettings
SysInitPath string
ResolvConfPath string
cmd *exec.Cmd
stdout *writeBroadcaster
stderr *writeBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
ptyMaster io.Closer
runtime *Runtime
waitLock chan struct{}
}
type Config struct {
Hostname string
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
AttachStdin bool
AttachStdout bool
AttachStderr bool
PortSpecs []string
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
}
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
}
flHostname := cmd.String("h", "", "Container host name")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: leave the container running in the background")
flAttach := NewAttachOpts()
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
if *flMemory > 0 && !capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
var flPorts ListOpts
cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
var flEnv ListOpts
cmd.Var(&flEnv, "e", "Set environment variables")
var flDns ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
if err := cmd.Parse(args); err != nil {
return nil, err
}
if *flDetach && len(flAttach) > 0 {
return nil, fmt.Errorf("Conflicting options: -a and -d")
}
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
parsedArgs := cmd.Args()
runCmd := []string{}
image := ""
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
config := &Config{
Hostname: *flHostname,
PortSpecs: flPorts,
User: *flUser,
Tty: *flTty,
OpenStdin: *flStdin,
Memory: *flMemory,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv,
Cmd: runCmd,
Dns: flDns,
Image: image,
}
if *flMemory > 0 && !capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, nil
}
type NetworkSettings struct {
IpAddress string
IpPrefixLen int
Gateway string
Bridge string
PortMapping map[string]string
}
// String returns a human-readable description of the port mapping defined in the settings
func (settings *NetworkSettings) PortMappingHuman() string {
var mapping []string
for private, public := range settings.PortMapping {
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
}
sort.Strings(mapping)
return strings.Join(mapping, ", ")
}
// Inject the io.Reader at the given path. Note: do not close the reader
func (container *Container) Inject(file io.Reader, pth string) error {
// Make sure the directory exists
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
return err
}
// FIXME: Handle permissions/already existing dest
dest, err := os.Create(path.Join(container.rwPath(), pth))
if err != nil {
return err
}
if _, err := io.Copy(dest, file); err != nil {
return err
}
return nil
}
func (container *Container) Cmd() *exec.Cmd {
return container.cmd
}
func (container *Container) When() time.Time {
return container.Created
}
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
return err
}
// Load container settings
if err := json.Unmarshal(data, container); err != nil {
return err
}
return nil
}
func (container *Container) ToDisk() (err error) {
data, err := json.Marshal(container)
if err != nil {
return
}
return ioutil.WriteFile(container.jsonPath(), data, 0666)
}
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
return err
}
defer fo.Close()
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
return err
}
return nil
}
func (container *Container) startPty() error {
ptyMaster, ptySlave, err := pty.Open()
if err != nil {
return err
}
container.ptyMaster = ptyMaster
container.cmd.Stdout = ptySlave
container.cmd.Stderr = ptySlave
// Copy the PTYs to our broadcasters
go func() {
defer container.stdout.CloseWriters()
Debugf("[startPty] Begin of stdout pipe")
io.Copy(container.stdout, ptyMaster)
Debugf("[startPty] End of stdout pipe")
}()
// stdin
if container.Config.OpenStdin {
container.cmd.Stdin = ptySlave
container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
go func() {
defer container.stdin.Close()
Debugf("[startPty] Begin of stdin pipe")
io.Copy(ptyMaster, container.stdin)
Debugf("[startPty] End of stdin pipe")
}()
}
if err := container.cmd.Start(); err != nil {
return err
}
ptySlave.Close()
return nil
}
func (container *Container) start() error {
container.cmd.Stdout = container.stdout
container.cmd.Stderr = container.stderr
if container.Config.OpenStdin {
stdin, err := container.cmd.StdinPipe()
if err != nil {
return err
}
go func() {
defer stdin.Close()
Debugf("Begin of stdin pipe [start]")
io.Copy(stdin, container.stdin)
Debugf("End of stdin pipe [start]")
}()
}
return container.cmd.Start()
}
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
var cStdout, cStderr io.ReadCloser
var nJobs int
errors := make(chan error, 3)
if stdin != nil && container.Config.OpenStdin {
nJobs += 1
if cStdin, err := container.StdinPipe(); err != nil {
errors <- err
} else {
go func() {
Debugf("[start] attach stdin\n")
defer Debugf("[end] attach stdin\n")
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
if container.Config.StdinOnce && !container.Config.Tty {
defer cStdin.Close()
}
if container.Config.Tty {
_, err = CopyEscapable(cStdin, stdin)
} else {
_, err = io.Copy(cStdin, stdin)
}
if err != nil {
Debugf("[error] attach stdin: %s\n", err)
}
// Discard error, expecting pipe error
errors <- nil
}()
}
}
if stdout != nil {
nJobs += 1
if p, err := container.StdoutPipe(); err != nil {
errors <- err
} else {
cStdout = p
go func() {
Debugf("[start] attach stdout\n")
defer Debugf("[end] attach stdout\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stdout, cStdout)
if err != nil {
Debugf("[error] attach stdout: %s\n", err)
}
errors <- err
}()
}
}
if stderr != nil {
nJobs += 1
if p, err := container.StderrPipe(); err != nil {
errors <- err
} else {
cStderr = p
go func() {
Debugf("[start] attach stderr\n")
defer Debugf("[end] attach stderr\n")
// If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce {
if stdin != nil {
defer stdin.Close()
}
if stdinCloser != nil {
defer stdinCloser.Close()
}
}
_, err := io.Copy(stderr, cStderr)
if err != nil {
Debugf("[error] attach stderr: %s\n", err)
}
errors <- err
}()
}
}
return Go(func() error {
if cStdout != nil {
defer cStdout.Close()
}
if cStderr != nil {
defer cStderr.Close()
}
// FIXME: how do clean up the stdin goroutine without the unwanted side effect
// of closing the passed stdin? Add an intermediary io.Pipe?
for i := 0; i < nJobs; i += 1 {
Debugf("Waiting for job %d/%d\n", i+1, nJobs)
if err := <-errors; err != nil {
Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
return err
}
Debugf("Job %d completed successfully\n", i+1)
}
Debugf("All jobs completed successfully\n")
return nil
})
}
func (container *Container) Start() error {
container.State.lock()
defer container.State.unlock()
if container.State.Running {
return fmt.Errorf("The container %s is already running.", container.Id)
}
if err := container.EnsureMounted(); err != nil {
return err
}
if err := container.allocateNetwork(); err != nil {
return err
}
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
if err := container.generateLXCConfig(); err != nil {
return err
}
params := []string{
"-n", container.Id,
"-f", container.lxcConfigPath(),
"--",
"/sbin/init",
}
// Networking
params = append(params, "-g", container.network.Gateway.String())
// User
if container.Config.User != "" {
params = append(params, "-u", container.Config.User)
}
if container.Config.Tty {
params = append(params, "-e", "TERM=xterm")
}
// Setup environment
params = append(params,
"-e", "HOME=/",
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
)
for _, elem := range container.Config.Env {
params = append(params, "-e", elem)
}
// Program
params = append(params, "--", container.Path)
params = append(params, container.Args...)
container.cmd = exec.Command("lxc-start", params...)
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
return err
}
if err := container.runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
return err
}
var err error
if container.Config.Tty {
err = container.startPty()
} else {
err = container.start()
}
if err != nil {
return err
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
container.State.setRunning(container.cmd.Process.Pid)
// Init the lock
container.waitLock = make(chan struct{})
container.ToDisk()
go container.monitor()
return nil
}
func (container *Container) Run() error {
if err := container.Start(); err != nil {
return err
}
container.Wait()
return nil
}
func (container *Container) Output() (output []byte, err error) {
pipe, err := container.StdoutPipe()
if err != nil {
return nil, err
}
defer pipe.Close()
if err := container.Start(); err != nil {
return nil, err
}
output, err = ioutil.ReadAll(pipe)
container.Wait()
return output, err
}
// StdinPipe() returns a pipe connected to the standard input of the container's
// active process.
//
func (container *Container) StdinPipe() (io.WriteCloser, error) {
return container.stdinPipe, nil
}
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stdout.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stderr.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) allocateNetwork() error {
iface, err := container.runtime.networkManager.Allocate()
if err != nil {
return err
}
container.NetworkSettings.PortMapping = make(map[string]string)
for _, spec := range container.Config.PortSpecs {
if nat, err := iface.AllocatePort(spec); err != nil {
iface.Release()
return err
} else {
container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
}
}
container.network = iface
container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
container.NetworkSettings.IpAddress = iface.IPNet.IP.String()
container.NetworkSettings.IpPrefixLen, _ = iface.IPNet.Mask.Size()
container.NetworkSettings.Gateway = iface.Gateway.String()
return nil
}
func (container *Container) releaseNetwork() {
container.network.Release()
container.network = nil
container.NetworkSettings = &NetworkSettings{}
}
// FIXME: replace this with a control socket within docker-init
func (container *Container) waitLxc() error {
for {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
return nil
}
}
time.Sleep(500 * time.Millisecond)
}
return nil
}
func (container *Container) monitor() {
// Wait for the program to exit
Debugf("Waiting for process")
// If the command does not exists, try to wait via lxc
if container.cmd == nil {
if err := container.waitLxc(); err != nil {
Debugf("%s: Process: %s", container.Id, err)
}
} else {
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
}
}
Debugf("Process finished")
var exitCode int = -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
// Cleanup
container.releaseNetwork()
if container.Config.OpenStdin {
if err := container.stdin.Close(); err != nil {
Debugf("%s: Error close stdin: %s", container.Id, err)
}
}
if err := container.stdout.CloseWriters(); err != nil {
Debugf("%s: Error close stdout: %s", container.Id, err)
}
if err := container.stderr.CloseWriters(); err != nil {
Debugf("%s: Error close stderr: %s", container.Id, err)
}
if container.ptyMaster != nil {
if err := container.ptyMaster.Close(); err != nil {
Debugf("%s: Error closing Pty master: %s", container.Id, err)
}
}
if err := container.Unmount(); err != nil {
log.Printf("%v: Failed to umount filesystem: %v", container.Id, err)
}
// Re-create a brand new stdin pipe once the container exited
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
// Report status back
container.State.setStopped(exitCode)
// Release the lock
close(container.waitLock)
if err := container.ToDisk(); err != nil {
// FIXME: there is a race condition here which causes this to fail during the unit tests.
// If another goroutine was waiting for Wait() to return before removing the container's root
// from the filesystem... At this point it may already have done so.
// This is because State.setStopped() has already been called, and has caused Wait()
// to return.
// FIXME: why are we serializing running state to disk in the first place?
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.Id, err)
}
}
func (container *Container) kill() error {
if !container.State.Running {
return nil
}
// Sending SIGKILL to the process via lxc
output, err := exec.Command("lxc-kill", "-n", container.Id, "9").CombinedOutput()
if err != nil {
log.Printf("error killing container %s (%s, %s)", container.Id, output, err)
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
}
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
if err := container.cmd.Process.Kill(); err != nil {
return err
}
}
// Wait for the container to be actually stopped
container.Wait()
return nil
}
func (container *Container) Kill() error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
return container.kill()
}
func (container *Container) Stop(seconds int) error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
// 1. Send a SIGTERM
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
log.Print(string(output))
log.Print("Failed to send SIGTERM to the process, force killing")
if err := container.kill(); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
if err := container.kill(); err != nil {
return err
}
}
return nil
}
func (container *Container) Restart(seconds int) error {
if err := container.Stop(seconds); err != nil {
return err
}
if err := container.Start(); err != nil {
return err
}
return nil
}
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.ExitCode
}
func (container *Container) ExportRw() (Archive, error) {
return Tar(container.rwPath(), Uncompressed)
}
func (container *Container) Export() (Archive, error) {
if err := container.EnsureMounted(); err != nil {
return nil, err
}
return Tar(container.RootfsPath(), Uncompressed)
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
go func() {
container.Wait()
done <- true
}()
select {
case <-time.After(timeout):
return fmt.Errorf("Timed Out")
case <-done:
return nil
}
panic("unreachable")
}
func (container *Container) EnsureMounted() error {
if mounted, err := container.Mounted(); err != nil {
return err
} else if mounted {
return nil
}
return container.Mount()
}
func (container *Container) Mount() error {
image, err := container.GetImage()
if err != nil {
return err
}
return image.Mount(container.RootfsPath(), container.rwPath())
}
func (container *Container) Changes() ([]Change, error) {
image, err := container.GetImage()
if err != nil {
return nil, err
}
return image.Changes(container.rwPath())
}
func (container *Container) GetImage() (*Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
}
func (container *Container) Mounted() (bool, error) {
return Mounted(container.RootfsPath())
}
func (container *Container) Unmount() error {
return Unmount(container.RootfsPath())
}
// ShortId returns a shorthand version of the container's id for convenience.
// A collision with other container shorthands is very unlikely, but possible.
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length container Id.
func (container *Container) ShortId() string {
return TruncateId(container.Id)
}
func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.Id, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}
func (container *Container) lxcConfigPath() string {
return path.Join(container.root, "config.lxc")
}
// This method must be exported to be used from the lxc template
func (container *Container) RootfsPath() string {
return path.Join(container.root, "rootfs")
}
func (container *Container) rwPath() string {
return path.Join(container.root, "rw")
}
func validateId(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}

1084
container_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1 +0,0 @@
Tianon Gravi <admwiggin@gmail.com> (@tianon)

View File

@@ -1,146 +0,0 @@
#!/usr/bin/env bash
set -e
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
: ${CONFIG:=/proc/config.gz}
if ! command -v zgrep &> /dev/null; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
declare -A colors=(
[black]=30
[red]=31
[green]=32
[yellow]=33
[blue]=34
[magenta]=35
[cyan]=36
[white]=37
)
color() {
color=()
if [ "$1" = 'bold' ]; then
color+=( '1' )
shift
fi
if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then
color+=( "${colors[$1]}" )
fi
local IFS=';'
echo -en '\033['"${color[*]}"m
}
wrap_color() {
text="$1"
shift
color "$@"
echo -n "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set "$1"; then
wrap_good "CONFIG_$1" 'enabled'
else
wrap_bad "CONFIG_$1" 'missing'
fi
}
check_flags() {
for flag in "$@"; do
echo "- $(check_flag "$flag")"
done
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
for tryConfig in \
'/proc/config.gz' \
"/boot/config-$(uname -r)" \
'/usr/src/linux/.config' \
; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
echo -n '- '
cgroupCpuDir="$(awk '/[, ]cpu([, ]|$)/ && $8 == "cgroup" { print $5 }' /proc/$$/mountinfo | head -n1)"
cgroupDir="$(dirname "$cgroupCpuDir")"
if [ -d "$cgroupDir/cpu" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupCpuDir]"
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
DEVPTS_MULTIPLE_INSTANCES
CGROUPS CGROUP_DEVICE
MACVLAN VETH BRIDGE
IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
NF_NAT NF_NAT_NEEDED
)
check_flags "${flags[@]}"
echo
echo 'Optional Features:'
flags=(
MEMCG_SWAP
RESOURCE_COUNTERS
)
check_flags "${flags[@]}"
echo '- Storage Drivers:'
{
echo '- "'$(wrap_color 'aufs' blue)'":'
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
echo '- "'$(wrap_color 'btrfs' blue)'":'
check_flags BTRFS_FS | sed 's/^/ /'
echo '- "'$(wrap_color 'devicemapper' blue)'":'
check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS | sed 's/^/ /'
} | sed 's/^/ /'
echo
#echo 'Potential Future Features:'
#check_flags USER_NS
#echo

View File

@@ -1,689 +0,0 @@
#!bash
#
# bash completion file for core docker commands
#
# This script provides supports completion of:
# - commands and their options
# - container ids and names
# - image repos and tags
# - filepaths
#
# To enable the completions either:
# - place this file in /etc/bash_completion.d
# or
# - copy this file and add the line below to your .bashrc after
# bash completion features are loaded
# . docker.bash
#
# Note:
# Currently, the completions will not work if the docker daemon is not
# bound to the default communication port/socket
# If the docker daemon is using a unix socket for communication your user
# must have access to the socket for the completions to function correctly
__docker_q() {
docker 2>/dev/null "$@"
}
__docker_containers_all()
{
local containers="$( __docker_q ps -a -q )"
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
}
__docker_containers_running()
{
local containers="$( __docker_q ps -q )"
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
}
__docker_containers_stopped()
{
local containers="$( { __docker_q ps -a -q; __docker_q ps -q; } | sort | uniq -u )"
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
}
__docker_image_repos()
{
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
COMPREPLY=( $( compgen -W "$repos" -- "$cur" ) )
}
__docker_image_repos_and_tags()
{
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
COMPREPLY=( $( compgen -W "$repos $images" -- "$cur" ) )
__ltrim_colon_completions "$cur"
}
__docker_image_repos_and_tags_and_ids()
{
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
local ids="$( __docker_q images -a -q )"
COMPREPLY=( $( compgen -W "$repos $images $ids" -- "$cur" ) )
__ltrim_colon_completions "$cur"
}
__docker_containers_and_images()
{
local containers="$( __docker_q ps -a -q )"
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
local ids="$( __docker_q images -a -q )"
COMPREPLY=( $( compgen -W "$containers $names $repos $images $ids" -- "$cur" ) )
__ltrim_colon_completions "$cur"
}
__docker_pos_first_nonflag()
{
local argument_flags=$1
local counter=$cpos
while [ $counter -le $cword ]; do
if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then
(( counter++ ))
else
case "${words[$counter]}" in
-*)
;;
*)
break
;;
esac
fi
(( counter++ ))
done
echo $counter
}
_docker_docker()
{
case "$prev" in
-H)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-H" -- "$cur" ) )
;;
*)
COMPREPLY=( $( compgen -W "$commands help" -- "$cur" ) )
;;
esac
}
_docker_attach()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--no-stdin --sig-proxy" -- "$cur" ) )
;;
*)
local counter="$(__docker_pos_first_nonflag)"
if [ $cword -eq $counter ]; then
__docker_containers_running
fi
;;
esac
}
_docker_build()
{
case "$prev" in
-t|--tag)
__docker_image_repos_and_tags
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm" -- "$cur" ) )
;;
*)
local counter="$(__docker_pos_first_nonflag '-t|--tag')"
if [ $cword -eq $counter ]; then
_filedir
fi
;;
esac
}
_docker_commit()
{
case "$prev" in
-m|--message|-a|--author|--run)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-m --message -a --author --run" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag '-m|--message|-a|--author|--run')
if [ $cword -eq $counter ]; then
__docker_containers_all
return
fi
(( counter++ ))
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags
return
fi
;;
esac
}
_docker_cp()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
case "$cur" in
*:)
return
;;
*)
__docker_containers_all
COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
compopt -o nospace
return
;;
esac
fi
(( counter++ ))
if [ $cword -eq $counter ]; then
_filedir
return
fi
}
_docker_diff()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_containers_all
fi
}
_docker_events()
{
case "$prev" in
--since)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--since" -- "$cur" ) )
;;
*)
;;
esac
}
_docker_export()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_containers_all
fi
}
_docker_help()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
COMPREPLY=( $( compgen -W "$commands" -- "$cur" ) )
fi
}
_docker_history()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-q --quiet --no-trunc" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags_and_ids
fi
;;
esac
}
_docker_images()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-q --quiet -a --all --no-trunc -v --viz -t --tree" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_image_repos
fi
;;
esac
}
_docker_import()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
return
fi
(( counter++ ))
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags
return
fi
}
_docker_info()
{
return
}
_docker_insert()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags_and_ids
fi
}
_docker_inspect()
{
case "$prev" in
-f|--format)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-f --format" -- "$cur" ) )
;;
*)
__docker_containers_and_images
;;
esac
}
_docker_kill()
{
__docker_containers_running
}
_docker_load()
{
return
}
_docker_login()
{
case "$prev" in
-u|--username|-p|--password|-e|--email)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) )
;;
*)
;;
esac
}
_docker_logs()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-f --follow" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_containers_all
fi
;;
esac
}
_docker_port()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_containers_all
fi
}
_docker_ps()
{
case "$prev" in
--since|--before)
__docker_containers_all
;;
-n)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) )
;;
*)
;;
esac
}
_docker_pull()
{
case "$prev" in
-t|--tag)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-t --tag" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag '-t|--tag')
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags
fi
;;
esac
}
_docker_push()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_image_repos
# TODO replace this with __docker_image_repos_and_tags
# see https://github.com/dotcloud/docker/issues/3411
fi
}
_docker_restart()
{
case "$prev" in
-t|--time)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) )
;;
*)
__docker_containers_all
;;
esac
}
_docker_rm()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-v --volumes -l --link" -- "$cur" ) )
;;
*)
__docker_containers_stopped
;;
esac
}
_docker_rmi()
{
__docker_image_repos_and_tags_and_ids
}
_docker_run()
{
case "$prev" in
--cidfile)
_filedir
;;
--volumes-from)
__docker_containers_all
;;
-v|--volume)
# TODO something magical with colons and _filedir ?
return
;;
-e|--env)
COMPREPLY=( $( compgen -e -- "$cur" ) )
return
;;
--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf')
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags_and_ids
fi
;;
esac
}
_docker_save()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags_and_ids
fi
}
_docker_search()
{
case "$prev" in
-s|--stars)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--no-trunc -t --trusted -s --stars" -- "$cur" ) )
;;
*)
;;
esac
}
_docker_start()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-a --attach -i --interactive" -- "$cur" ) )
;;
*)
__docker_containers_stopped
;;
esac
}
_docker_stop()
{
case "$prev" in
-t|--time)
return
;;
*)
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) )
;;
*)
__docker_containers_running
;;
esac
}
_docker_tag()
{
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "-f --force" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags
return
fi
(( counter++ ))
if [ $cword -eq $counter ]; then
__docker_image_repos_and_tags
return
fi
;;
esac
}
_docker_top()
{
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
__docker_containers_running
fi
}
_docker_version()
{
return
}
_docker_wait()
{
__docker_containers_all
}
_docker()
{
local commands="
attach
build
commit
cp
diff
events
export
history
images
import
info
insert
inspect
kill
load
login
logs
port
ps
pull
push
restart
rm
rmi
run
save
search
start
stop
tag
top
version
wait
"
COMPREPLY=()
local cur prev words cword
_get_comp_words_by_ref -n : cur prev words cword
local command='docker'
local counter=1
while [ $counter -lt $cword ]; do
case "${words[$counter]}" in
-H)
(( counter++ ))
;;
-*)
;;
*)
command="${words[$counter]}"
cpos=$counter
(( cpos++ ))
break
;;
esac
(( counter++ ))
done
local completions_func=_docker_${command}
declare -F $completions_func >/dev/null && $completions_func
return 0
}
complete -F _docker docker

View File

@@ -1,261 +0,0 @@
# docker.fish - docker completions for fish shell
#
# This file is generated by gen_docker_fish_completions.py from:
# https://github.com/barnybug/docker-fish-completion
#
# To install the completions:
# mkdir -p ~/.config/fish/completions
# cp docker.fish ~/.config/fish/completions
#
# Completion supported:
# - parameters
# - commands
# - containers
# - images
# - repositories
function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
for i in (commandline -opc)
if contains -- $i attach build commit cp diff events export history images import info insert inspect kill load login logs port ps pull push restart rm rmi run save search start stop tag top version wait
return 1
end
end
return 0
end
function __fish_print_docker_containers --description 'Print a list of docker containers' -a select
switch $select
case running
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
case stopped
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
case all
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n'
end
end
function __fish_print_docker_images --description 'Print a list of docker images'
docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1":"$2}'
end
function __fish_print_docker_repositories --description 'Print a list of docker repositories'
docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1}' | command sort | command uniq
end
# common options
complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group"
complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified'
complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API'
complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking"
complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode'
complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers'
complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver'
complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime'
complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication'
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports'
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward'
complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules"
complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available'
complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file'
complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers'
complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver'
complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit'
# subcommands
# attach
complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach stdin'
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)'
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container"
# build
complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build a container from a Dockerfile'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success'
# commit
complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes"
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (eg. "John Hannibal Smith <hannibal@a-team.com>"'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
# cp
complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from the containers filesystem to the host path'
# diff
complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem"
complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container"
# events
complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server'
complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show previously created events and then stream.'
# export
complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive'
complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container"
# history
complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image'
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs'
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image"
# images
complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'Output graph in graphviz format'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository"
# import
complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball'
# info
complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information'
# insert
complete -c docker -f -n '__fish_docker_no_subcommand' -a insert -d 'Insert a file in an image'
complete -c docker -A -f -n '__fish_seen_subcommand_from insert' -a '(__fish_print_docker_images)' -d "Image"
# inspect
complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image"
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container"
# kill
complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container"
# load
complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive'
# login
complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server'
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email'
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password'
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username'
# logs
complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container"
# port
complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port which is NAT-ed to PRIVATE_PORT'
complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container"
# ps
complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.'
# pull
complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server'
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s t -l tag -d 'Download tagged image in repository'
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image"
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository"
# push
complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to the docker registry server'
complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image"
complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository"
# restart
complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10'
complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container"
# rm
complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force removal of running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container"
# rmi
complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images'
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force'
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image"
# run
complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to the host interfaces'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to stdin, stdout or stderr.'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: Run container in the background, print new container id'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom dns servers'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default entrypoint of the image'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: <number><optional unit>, where unit = b, k, m or g)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)"
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image"
# save
complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive'
complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image"
# search
complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image in the docker index'
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least xxx stars'
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s t -l trusted -d 'Only show trusted builds'
# start
complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container'
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's stdout/stderr and forward all signals to the process"
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's stdin"
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container"
# stop
complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it.'
complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container"
# tag
complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository'
complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force'
complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -a '(__fish_print_docker_images)' -d "Image"
# top
complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container"
# version
complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the docker version information'
# wait
complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code'
complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container"

View File

@@ -1,242 +0,0 @@
#compdef docker
#
# zsh completion for docker (http://docker.io)
#
# version: 0.2.2
# author: Felix Riedel
# license: BSD License
# github: https://github.com/felixr/docker-zsh-completion
#
__parse_docker_list() {
sed -e '/^ID/d' -e 's/[ ]\{2,\}/|/g' -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/' | awk ' BEGIN {FS="|"} { printf("%s:%7s, %s\n", $1, $4, $2)}'
}
__docker_stoppedcontainers() {
local expl
declare -a stoppedcontainers
stoppedcontainers=(${(f)"$(docker ps -a | grep --color=never 'Exit' | __parse_docker_list )"})
_describe -t containers-stopped "Stopped Containers" stoppedcontainers
}
__docker_runningcontainers() {
local expl
declare -a containers
containers=(${(f)"$(docker ps | __parse_docker_list)"})
_describe -t containers-active "Running Containers" containers
}
__docker_containers () {
__docker_stoppedcontainers
__docker_runningcontainers
}
__docker_images () {
local expl
declare -a images
images=(${(f)"$(docker images | awk '(NR > 1){printf("%s\\:%s\n", $1,$2)}')"})
images=($images ${(f)"$(docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"})
_describe -t docker-images "Images" images
}
__docker_tags() {
local expl
declare -a tags
tags=(${(f)"$(docker images | awk '(NR>1){print $2}'| sort | uniq)"})
_describe -t docker-tags "tags" tags
}
__docker_search() {
# declare -a dockersearch
local cache_policy
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
if [[ -z "$cache_policy" ]]; then
zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
fi
local searchterm cachename
searchterm="${words[$CURRENT]%/}"
cachename=_docker-search-$searchterm
local expl
local -a result
if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \
&& ! _retrieve_cache ${cachename#_}; then
_message "Searching for ${searchterm}..."
result=(${(f)"$(docker search ${searchterm} | awk '(NR>2){print $1}')"})
_store_cache ${cachename#_} result
fi
_wanted dockersearch expl 'Available images' compadd -a result
}
__docker_caching_policy()
{
# oldp=( "$1"(Nmh+24) ) # 24 hour
oldp=( "$1"(Nmh+1) ) # 24 hour
(( $#oldp ))
}
__docker_repositories () {
local expl
declare -a repos
repos=(${(f)"$(docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"})
_describe -t docker-repos "Repositories" repos
}
__docker_commands () {
# local -a _docker_subcommands
local cache_policy
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
if [[ -z "$cache_policy" ]]; then
zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
fi
if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
&& ! _retrieve_cache docker_subcommands;
then
_docker_subcommands=(${${(f)"$(_call_program commands
docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}})
_docker_subcommands=($_docker_subcommands 'help:Show help for a command')
_store_cache docker_subcommands _docker_subcommands
fi
_describe -t docker-commands "docker command" _docker_subcommands
}
__docker_subcommand () {
local -a _command_args
case "$words[1]" in
(attach|wait)
_arguments ':containers:__docker_runningcontainers'
;;
(build)
_arguments \
'-t=-:repository:__docker_repositories' \
':path or URL:_directories'
;;
(commit)
_arguments \
':container:__docker_containers' \
':repository:__docker_repositories' \
':tag: '
;;
(diff|export|logs)
_arguments '*:containers:__docker_containers'
;;
(history)
_arguments '*:images:__docker_images'
;;
(images)
_arguments \
'-a[Show all images]' \
':repository:__docker_repositories'
;;
(inspect)
_arguments '*:containers:__docker_containers'
;;
(history)
_arguments ':images:__docker_images'
;;
(insert)
_arguments '1:containers:__docker_containers' \
'2:URL:(http:// file://)' \
'3:file:_files'
;;
(kill)
_arguments '*:containers:__docker_runningcontainers'
;;
(port)
_arguments '1:containers:__docker_runningcontainers'
;;
(start)
_arguments '*:containers:__docker_stoppedcontainers'
;;
(rm)
_arguments '-v[Remove the volumes associated to the container]' \
'*:containers:__docker_stoppedcontainers'
;;
(rmi)
_arguments '-v[Remove the volumes associated to the container]' \
'*:images:__docker_images'
;;
(top)
_arguments '1:containers:__docker_runningcontainers'
;;
(restart|stop)
_arguments '-t=-[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \
'*:containers:__docker_runningcontainers'
;;
(top)
_arguments ':containers:__docker_runningcontainers'
;;
(ps)
_arguments '-a[Show all containers. Only running containers are shown by default]' \
'-h[Show help]' \
'--before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
'-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)'
;;
(tag)
_arguments \
'-f[force]'\
':image:__docker_images'\
':repository:__docker_repositories' \
':tag:__docker_tags'
;;
(run)
_arguments \
'-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \
'-c=-[CPU shares (relative weight)]:CPU shares: ' \
'-d[Detached mode: leave the container running in the background]' \
'*--dns=[Set custom dns servers]:dns server: ' \
'*-e=[Set environment variables]:environment variable: ' \
'--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
'-h=-[Container host name]:hostname:_hosts' \
'-i[Keep stdin open even if not attached]' \
'-m=-[Memory limit (in bytes)]:limit: ' \
'*-p=-[Expose a container''s port to the host]:port:_ports' \
'-t=-[Allocate a pseudo-tty]:toggle:(true false)' \
'-u=-[Username or UID]:user:_users' \
'*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\
'--volumes-from=-[Mount volumes from the specified container]:volume: ' \
'(-):images:__docker_images' \
'(-):command: _command_names -e' \
'*::arguments: _normal'
;;
(pull|search)
_arguments ':name:__docker_search'
;;
(help)
_arguments ':subcommand:__docker_commands'
;;
(*)
_message 'Unknown sub command'
esac
}
_docker () {
local curcontext="$curcontext" state line
typeset -A opt_args
_arguments -C \
'-H=-[tcp://host:port to bind/connect to]:socket: ' \
'(-): :->command' \
'(-)*:: :->option-or-argument'
if (( CURRENT == 1 )); then
fi
case $state in
(command)
__docker_commands
;;
(option-or-argument)
curcontext=${curcontext%:*:*}:docker-$words[1]:
__docker_subcommand
;;
esac
}
_docker "$@"

View File

@@ -1,23 +1,18 @@
package main
import (
"fmt"
"io"
"log"
"net"
"os"
"os/exec"
"path"
"time"
)
var DOCKERPATH = path.Join(os.Getenv("DOCKERPATH"), "docker")
const DOCKER_PATH = "/home/creack/dotcloud/docker/docker/docker"
// WARNING: this crashTest will 1) crash your host, 2) remove all containers
func runDaemon() (*exec.Cmd, error) {
os.Remove("/var/run/docker.pid")
exec.Command("rm", "-rf", "/var/lib/docker/containers").Run()
cmd := exec.Command(DOCKERPATH, "-d")
cmd := exec.Command(DOCKER_PATH, "-d")
outPipe, err := cmd.StdoutPipe()
if err != nil {
return nil, err
@@ -43,43 +38,19 @@ func crashTest() error {
return err
}
var endpoint string
if ep := os.Getenv("TEST_ENDPOINT"); ep == "" {
endpoint = "192.168.56.1:7979"
} else {
endpoint = ep
}
c := make(chan bool)
var conn io.Writer
go func() {
conn, _ = net.Dial("tcp", endpoint)
c <- false
}()
go func() {
time.Sleep(2 * time.Second)
c <- true
}()
<-c
restartCount := 0
totalTestCount := 1
for {
daemon, err := runDaemon()
if err != nil {
return err
}
restartCount++
// time.Sleep(5000 * time.Millisecond)
var stop bool
go func() error {
stop = false
for i := 0; i < 100 && !stop; {
for i := 0; i < 100 && !stop; i++ {
func() error {
cmd := exec.Command(DOCKERPATH, "run", "ubuntu", "echo", fmt.Sprintf("%d", totalTestCount))
i++
totalTestCount++
cmd := exec.Command(DOCKER_PATH, "run", "base", "echo", "hello", "world")
log.Printf("%d", i)
outPipe, err := cmd.StdoutPipe()
if err != nil {
return err
@@ -91,10 +62,9 @@ func crashTest() error {
if err := cmd.Start(); err != nil {
return err
}
if conn != nil {
go io.Copy(conn, outPipe)
}
go func() {
io.Copy(os.Stdout, outPipe)
}()
// Expecting error, do not check
inPipe.Write([]byte("hello world!!!!!\n"))
go inPipe.Write([]byte("hello world!!!!!\n"))
@@ -116,6 +86,7 @@ func crashTest() error {
return err
}
}
return nil
}
func main() {

View File

@@ -1,11 +0,0 @@
Desktop Integration
===================
The ./contrib/desktop-integration contains examples of typical dockerized
desktop applications.
Examples
========
* Data container: ./data/Dockerfile creates a data image sharing /data volume
* Iceweasel: ./iceweasel/Dockerfile shows a way to dockerize a common multimedia application

View File

@@ -1,38 +0,0 @@
# VERSION: 0.1
# DESCRIPTION: Create data image sharing /data volume
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# COMMENTS:
# This image is used as base for all data containers.
# /data volume is owned by sysadmin.
# USAGE:
# # Download data Dockerfile
# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile
#
# # Build data image
# docker build -t data .
#
# # Create a data container. (eg: iceweasel-data)
# docker run --name iceweasel-data data true
#
# # List data from it
# docker run --volumes-from iceweasel-data busybox ls -al /data
docker-version 0.6.5
# Smallest base image, just to launch a container
FROM busybox
MAINTAINER Daniel Mizyrycki <daniel@docker.com>
# Create a regular user
RUN echo 'sysadmin:x:1000:1000::/data:/bin/sh' >> /etc/passwd
RUN echo 'sysadmin:x:1000:' >> /etc/group
# Create directory for that user
RUN mkdir /data
RUN chown sysadmin.sysadmin /data
# Add content to /data. This will keep sysadmin ownership
RUN touch /data/init_volume
# Create /data volume
VOLUME /data

View File

@@ -1,41 +0,0 @@
# VERSION: 0.7
# DESCRIPTION: Create iceweasel container with its dependencies
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# COMMENTS:
# This file describes how to build a Iceweasel container with all
# dependencies installed. It uses native X11 unix socket and alsa
# sound devices. Tested on Debian 7.2
# USAGE:
# # Download Iceweasel Dockerfile
# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile
#
# # Build iceweasel image
# docker build -t iceweasel .
#
# # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data
# docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \
# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
# -e DISPLAY=unix$DISPLAY iceweasel
#
# # To run stateful dockerized data containers
# docker run --volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \
# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
# -e DISPLAY=unix$DISPLAY iceweasel
docker-version 0.6.5
# Base docker image
FROM debian:wheezy
MAINTAINER Daniel Mizyrycki <daniel@docker.com>
# Install Iceweasel and "sudo"
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq iceweasel sudo
# create sysadmin account
RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin
RUN sed -Ei 's/sudo:x:27:/sudo:x:27:sysadmin/' /etc/group
RUN sed -Ei 's/(\%sudo\s+ALL=\(ALL\:ALL\) )ALL/\1 NOPASSWD:ALL/' /etc/sudoers
# Autorun iceweasel. -no-remote is necessary to create a new container, as
# iceweasel appears to communicate with itself through X11.
CMD ["/usr/bin/sudo", "-u", "sysadmin", "-H", "-E", "/usr/bin/iceweasel", "-no-remote"]

View File

@@ -0,0 +1,68 @@
# docker-build: build your software with docker
## Description
docker-build is a script to build docker images from source. It will be deprecated once the 'build' feature is incorporated into docker itself (See https://github.com/dotcloud/docker/issues/278)
Author: Solomon Hykes <solomon@dotcloud.com>
## Install
docker-builder requires:
1) A reasonably recent Python setup (tested on 2.7.2).
2) A running docker daemon at version 0.1.4 or more recent (http://www.docker.io/gettingstarted)
## Usage
First create a valid Changefile, which defines a sequence of changes to apply to a base image.
$ cat Changefile
# Start build from a know base image
from base:ubuntu-12.10
# Update ubuntu sources
run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
run apt-get update
# Install system packages
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
# Insert files from the host (./myscript must be present in the current directory)
copy myscript /usr/local/bin/myscript
Run docker-build, and pass the contents of your Changefile as standard input.
$ IMG=$(./docker-build < Changefile)
This will take a while: for each line of the changefile, docker-build will:
1. Create a new container to execute the given command or insert the given file
2. Wait for the container to complete execution
3. Commit the resulting changes as a new image
4. Use the resulting image as the input of the next step
If all the steps succeed, the result will be an image containing the combined results of each build step.
You can trace back those build steps by inspecting the image's history:
$ docker history $IMG
ID CREATED CREATED BY
1e9e2045de86 A few seconds ago /bin/sh -c cat > /usr/local/bin/myscript; chmod +x /usr/local/bin/git
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
83e85d155451 A few seconds ago /bin/sh -c apt-get update
bfd53b36d9d3 A few seconds ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
base 2 weeks ago /bin/bash
27cf78414709 2 weeks ago
Note that your build started from 'base', as instructed by your Changefile. But that base image itself seems to have been built in 2 steps - hence the extra step in the history.
You can use this build technique to create any image you want: a database, a web application, or anything else that can be build by a sequence of unix commands - in other words, anything else.

104
contrib/docker-build/docker-build Executable file
View File

@@ -0,0 +1,104 @@
#!/usr/bin/env python
# docker-build is a script to build docker images from source.
# It will be deprecated once the 'build' feature is incorporated into docker itself.
# (See https://github.com/dotcloud/docker/issues/278)
#
# Author: Solomon Hykes <solomon@dotcloud.com>
# First create a valid Changefile, which defines a sequence of changes to apply to a base image.
#
# $ cat Changefile
# # Start build from a know base image
# from base:ubuntu-12.10
# # Update ubuntu sources
# run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
# run apt-get update
# # Install system packages
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
# # Insert files from the host (./myscript must be present in the current directory)
# copy myscript /usr/local/bin/myscript
#
#
# Run docker-build, and pass the contents of your Changefile as standard input.
#
# $ IMG=$(./docker-build < Changefile)
#
# This will take a while: for each line of the changefile, docker-build will:
#
# 1. Create a new container to execute the given command or insert the given file
# 2. Wait for the container to complete execution
# 3. Commit the resulting changes as a new image
# 4. Use the resulting image as the input of the next step
import sys
import subprocess
import json
import hashlib
def docker(args, stdin=None):
print "# docker " + " ".join(args)
p = subprocess.Popen(["docker"] + list(args), stdin=stdin, stdout=subprocess.PIPE)
return p.stdout
def image_exists(img):
return docker(["inspect", img]).read().strip() != ""
def run_and_commit(img_in, cmd, stdin=None):
run_id = docker(["run"] + (["-i", "-a", "stdin"] if stdin else ["-d"]) + [img_in, "/bin/sh", "-c", cmd], stdin=stdin).read().rstrip()
print "---> Waiting for " + run_id
result=int(docker(["wait", run_id]).read().rstrip())
if result != 0:
print "!!! '{}' return non-zero exit code '{}'. Aborting.".format(cmd, result)
sys.exit(1)
return docker(["commit", run_id]).read().rstrip()
def insert(base, src, dst):
print "COPY {} to {} in {}".format(src, dst, base)
if dst == "":
raise Exception("Missing destination path")
stdin = file(src)
stdin.seek(0)
return run_and_commit(base, "cat > {0}; chmod +x {0}".format(dst), stdin=stdin)
def main():
base=""
steps = []
try:
for line in sys.stdin.readlines():
line = line.strip()
# Skip comments and empty lines
if line == "" or line[0] == "#":
continue
op, param = line.split(" ", 1)
if op == "from":
print "FROM " + param
base = param
steps.append(base)
elif op == "run":
print "RUN " + param
result = run_and_commit(base, param)
steps.append(result)
base = result
print "===> " + base
elif op == "copy":
src, dst = param.split(" ", 1)
result = insert(base, src, dst)
steps.append(result)
base = result
print "===> " + base
else:
print "Skipping uknown op " + op
except:
docker(["rmi"] + steps[1:])
raise
print base
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,11 @@
# Start build from a know base image
from base:ubuntu-12.10
# Update ubuntu sources
run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
run apt-get update
# Install system packages
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
# Insert files from the host (./myscript must be present in the current directory)
copy myscript /usr/local/bin/myscript

View File

@@ -1,170 +0,0 @@
package main
import (
"flag"
"fmt"
"github.com/dotcloud/docker/runtime/graphdriver/devmapper"
"os"
"path"
"sort"
"strconv"
"strings"
)
func usage() {
fmt.Fprintf(os.Stderr, "Usage: %s <flags> [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0])
flag.PrintDefaults()
os.Exit(1)
}
func byteSizeFromString(arg string) (int64, error) {
digits := ""
rest := ""
last := strings.LastIndexAny(arg, "0123456789")
if last >= 0 {
digits = arg[:last+1]
rest = arg[last+1:]
}
val, err := strconv.ParseInt(digits, 10, 64)
if err != nil {
return val, err
}
rest = strings.ToLower(strings.TrimSpace(rest))
var multiplier int64 = 1
switch rest {
case "":
multiplier = 1
case "k", "kb":
multiplier = 1024
case "m", "mb":
multiplier = 1024 * 1024
case "g", "gb":
multiplier = 1024 * 1024 * 1024
case "t", "tb":
multiplier = 1024 * 1024 * 1024 * 1024
default:
return 0, fmt.Errorf("Unknown size unit: %s", rest)
}
return val * multiplier, nil
}
func main() {
root := flag.String("r", "/var/lib/docker", "Docker root dir")
flDebug := flag.Bool("D", false, "Debug mode")
flag.Parse()
if *flDebug {
os.Setenv("DEBUG", "1")
}
if flag.NArg() < 1 {
usage()
}
args := flag.Args()
home := path.Join(*root, "devicemapper")
devices, err := devmapper.NewDeviceSet(home, false)
if err != nil {
fmt.Println("Can't initialize device mapper: ", err)
os.Exit(1)
}
switch args[0] {
case "status":
status := devices.Status()
fmt.Printf("Pool name: %s\n", status.PoolName)
fmt.Printf("Data Loopback file: %s\n", status.DataLoopback)
fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback)
fmt.Printf("Sector size: %d\n", status.SectorSize)
fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total))
fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total))
break
case "list":
ids := devices.List()
sort.Strings(ids)
for _, id := range ids {
fmt.Println(id)
}
break
case "device":
if flag.NArg() < 2 {
usage()
}
status, err := devices.GetDeviceStatus(args[1])
if err != nil {
fmt.Println("Can't get device info: ", err)
os.Exit(1)
}
fmt.Printf("Id: %d\n", status.DeviceId)
fmt.Printf("Size: %d\n", status.Size)
fmt.Printf("Transaction Id: %d\n", status.TransactionId)
fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors)
fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors)
fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector)
break
case "resize":
if flag.NArg() < 2 {
usage()
}
size, err := byteSizeFromString(args[1])
if err != nil {
fmt.Println("Invalid size: ", err)
os.Exit(1)
}
err = devices.ResizePool(size)
if err != nil {
fmt.Println("Error resizeing pool: ", err)
os.Exit(1)
}
break
case "snap":
if flag.NArg() < 3 {
usage()
}
err := devices.AddDevice(args[1], args[2])
if err != nil {
fmt.Println("Can't create snap device: ", err)
os.Exit(1)
}
break
case "remove":
if flag.NArg() < 2 {
usage()
}
err := devices.RemoveDevice(args[1])
if err != nil {
fmt.Println("Can't remove device: ", err)
os.Exit(1)
}
break
case "mount":
if flag.NArg() < 3 {
usage()
}
err := devices.MountDevice(args[1], args[2], false)
if err != nil {
fmt.Println("Can't create snap device: ", err)
os.Exit(1)
}
break
default:
fmt.Printf("Unknown command %s\n", args[0])
usage()
os.Exit(1)
}
return
}

View File

@@ -1,27 +0,0 @@
#
# This Dockerfile will create an image that allows to generate upstart and
# systemd scripts (more to come)
#
# docker-version 0.6.2
#
FROM ubuntu:12.10
MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
RUN apt-get update && apt-get install -y wget git mercurial
# Install Go
RUN wget --no-check-certificate https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz -O go-1.1.2.tar.gz
RUN tar -xzvf go-1.1.2.tar.gz && mv /go /goroot
RUN mkdir /go
ENV GOROOT /goroot
ENV GOPATH /go
ENV PATH $GOROOT/bin:$PATH
RUN go get github.com/dotcloud/docker && cd /go/src/github.com/dotcloud/docker && git checkout v0.6.3
ADD manager.go /manager/
RUN cd /manager && go build -o /usr/bin/manager
ENTRYPOINT ["/usr/bin/manager"]

View File

@@ -1,4 +0,0 @@
FROM busybox
MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
ADD manager /usr/bin/
ENTRYPOINT ["/usr/bin/manager"]

View File

@@ -1,130 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"github.com/dotcloud/docker"
"os"
"strings"
"text/template"
)
var templates = map[string]string{
"upstart": `description "{{.description}}"
author "{{.author}}"
start on filesystem and started lxc-net and started docker
stop on runlevel [!2345]
respawn
exec /home/vagrant/goroot/bin/docker start -a {{.container_id}}
`,
"systemd": `[Unit]
Description={{.description}}
Author={{.author}}
After=docker.service
[Service]
Restart=always
ExecStart=/usr/bin/docker start -a {{.container_id}}
ExecStop=/usr/bin/docker stop -t 2 {{.container_id}}
[Install]
WantedBy=local.target
`,
}
func main() {
// Parse command line for custom options
kind := flag.String("t", "upstart", "Type of manager requested")
author := flag.String("a", "<none>", "Author of the image")
description := flag.String("d", "<none>", "Description of the image")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "\nUsage: manager <container id>\n\n")
flag.PrintDefaults()
}
flag.Parse()
// We require at least the container ID
if flag.NArg() != 1 {
println(flag.NArg())
flag.Usage()
return
}
// Check that the requested process manager is supported
if _, exists := templates[*kind]; !exists {
panic("Unknown script template")
}
// Load the requested template
tpl, err := template.New("processManager").Parse(templates[*kind])
if err != nil {
panic(err)
}
// Create stdout/stderr buffers
bufOut := bytes.NewBuffer(nil)
bufErr := bytes.NewBuffer(nil)
// Instanciate the Docker CLI
cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil)
// Retrieve the container info
if err := cli.CmdInspect(flag.Arg(0)); err != nil {
// As of docker v0.6.3, CmdInspect always returns nil
panic(err)
}
// If there is nothing in the error buffer, then the Docker daemon is there and the container has been found
if bufErr.Len() == 0 {
// Unmarshall the resulting container data
c := []*docker.Container{{}}
if err := json.Unmarshal(bufOut.Bytes(), &c); err != nil {
panic(err)
}
// Reset the buffers
bufOut.Reset()
bufErr.Reset()
// Retrieve the info of the linked image
if err := cli.CmdInspect(c[0].Image); err != nil {
panic(err)
}
// If there is nothing in the error buffer, then the image has been found.
if bufErr.Len() == 0 {
// Unmarshall the resulting image data
img := []*docker.Image{{}}
if err := json.Unmarshal(bufOut.Bytes(), &img); err != nil {
panic(err)
}
// If no author has been set, use the one from the image
if *author == "<none>" && img[0].Author != "" {
*author = strings.Replace(img[0].Author, "\"", "", -1)
}
// If no description has been set, use the comment from the image
if *description == "<none>" && img[0].Comment != "" {
*description = strings.Replace(img[0].Comment, "\"", "", -1)
}
}
}
/// Old version: Wrtie the resulting script to file
// f, err := os.OpenFile(kind, os.O_CREATE|os.O_WRONLY, 0755)
// if err != nil {
// panic(err)
// }
// defer f.Close()
// Create a map with needed data
data := map[string]string{
"author": *author,
"description": *description,
"container_id": flag.Arg(0),
}
// Process the template and output it on Stdout
if err := tpl.Execute(os.Stdout, data); err != nil {
panic(err)
}
}

View File

@@ -1,53 +0,0 @@
#!/bin/sh
set -e
usage() {
echo >&2 "usage: $0 [-a author] [-d description] container [manager]"
echo >&2 " ie: $0 -a 'John Smith' 4ec9612a37cd systemd"
echo >&2 " ie: $0 -d 'Super Cool System' 4ec9612a37cd # defaults to upstart"
exit 1
}
auth='<none>'
desc='<none>'
have_auth=
have_desc=
while getopts a:d: opt; do
case "$opt" in
a)
auth="$OPTARG"
have_auth=1
;;
d)
desc="$OPTARG"
have_desc=1
;;
esac
done
shift $(($OPTIND - 1))
[ $# -ge 1 -a $# -le 2 ] || usage
cid="$1"
script="${2:-upstart}"
if [ ! -e "manager/$script" ]; then
echo >&2 "Error: manager type '$script' is unknown (PRs always welcome!)."
echo >&2 'The currently supported types are:'
echo >&2 " $(cd manager && echo *)"
exit 1
fi
# TODO https://github.com/dotcloud/docker/issues/734 (docker inspect formatting)
#if command -v docker > /dev/null 2>&1; then
# image="$(docker inspect -f '{{.Image}}' "$cid")"
# if [ "$image" ]; then
# if [ -z "$have_auth" ]; then
# auth="$(docker inspect -f '{{.Author}}' "$image")"
# fi
# if [ -z "$have_desc" ]; then
# desc="$(docker inspect -f '{{.Comment}}' "$image")"
# fi
# fi
#fi
exec "manager/$script" "$cid" "$auth" "$desc"

View File

@@ -1,20 +0,0 @@
#!/bin/sh
set -e
cid="$1"
auth="$2"
desc="$3"
cat <<-EOF
[Unit]
Description=$desc
Author=$auth
After=docker.service
[Service]
ExecStart=/usr/bin/docker start -a $cid
ExecStop=/usr/bin/docker stop -t 2 $cid
[Install]
WantedBy=local.target
EOF

View File

@@ -1,15 +0,0 @@
#!/bin/sh
set -e
cid="$1"
auth="$2"
desc="$3"
cat <<-EOF
description "$(echo "$desc" | sed 's/"/\\"/g')"
author "$(echo "$auth" | sed 's/"/\\"/g')"
start on filesystem and started lxc-net and started docker
stop on runlevel [!2345]
respawn
exec /usr/bin/docker start -a "$cid"
EOF

View File

@@ -1,13 +0,0 @@
# /etc/conf.d/docker: config file for /etc/init.d/docker
# where the docker daemon output gets piped
#DOCKER_LOGFILE="/var/log/docker.log"
# where docker's pid get stored
#DOCKER_PIDFILE="/run/docker.pid"
# where the docker daemon itself is run from
#DOCKER_BINARY="/usr/bin/docker"
# any other random options you want to pass to docker
DOCKER_OPTS=""

View File

@@ -1,31 +0,0 @@
#!/sbin/runscript
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: $
DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log}
DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid}
DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker}
DOCKER_OPTS=${DOCKER_OPTS:-}
start() {
checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE"
ebegin "Starting docker daemon"
start-stop-daemon --start --background \
--exec "$DOCKER_BINARY" \
--pidfile "$DOCKER_PIDFILE" \
--stdout "$DOCKER_LOGFILE" \
--stderr "$DOCKER_LOGFILE" \
-- -d -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS
eend $?
}
stop() {
ebegin "Stopping docker daemon"
start-stop-daemon --stop \
--exec "$DOCKER_BINARY" \
--pidfile "$DOCKER_PIDFILE"
eend $?
}

View File

@@ -1,13 +0,0 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
After=network.target
[Service]
ExecStart=/usr/bin/docker -d
Restart=on-failure
LimitNOFILE=1048576
LimitNPROC=1048576
[Install]
WantedBy=multi-user.target

View File

@@ -1,13 +0,0 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
After=network.target
[Service]
ExecStart=/usr/bin/docker -d -H fd://
Restart=on-failure
LimitNOFILE=1048576
LimitNPROC=1048576
[Install]
WantedBy=multi-user.target

View File

@@ -1,8 +0,0 @@
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
[Install]
WantedBy=sockets.target

View File

@@ -1,129 +0,0 @@
#!/bin/sh
### BEGIN INIT INFO
# Provides: docker
# Required-Start: $syslog $remote_fs
# Required-Stop: $syslog $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Create lightweight, portable, self-sufficient containers.
# Description:
# Docker is an open-source project to easily create lightweight, portable,
# self-sufficient containers from any application. The same container that a
# developer builds and tests on a laptop can run at scale, in production, on
# VMs, bare metal, OpenStack clusters, public clouds and more.
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/docker)
DOCKER=/usr/bin/$BASE
DOCKER_PIDFILE=/var/run/$BASE.pid
DOCKER_LOGFILE=/var/log/$BASE.log
DOCKER_OPTS=
DOCKER_DESC="Docker"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check docker is present
if [ ! -x $DOCKER ]; then
log_failure_msg "$DOCKER not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$DOCKER_DESC must be run as root"
exit 1
fi
}
cgroupfs_mount() {
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
|| [ ! -e /proc/cgroups ] \
|| [ ! -d /sys/fs/cgroup ]; then
return
fi
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
(
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
)
}
case "$1" in
start)
fail_unless_root
cgroupfs_mount
touch "$DOCKER_LOGFILE"
chgrp docker "$DOCKER_LOGFILE"
log_begin_msg "Starting $DOCKER_DESC: $BASE"
start-stop-daemon --start --background \
--no-close \
--exec "$DOCKER" \
--pidfile "$DOCKER_PIDFILE" \
-- \
-d -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS \
>> "$DOCKER_LOGFILE" 2>&1
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $DOCKER_DESC: $BASE"
start-stop-daemon --stop --pidfile "$DOCKER_PIDFILE"
log_end_msg $?
;;
restart)
fail_unless_root
docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null`
[ -n "$docker_pid" ] \
&& ps -p $docker_pid > /dev/null 2>&1 \
&& $0 stop
$0 start
;;
force-reload)
fail_unless_root
$0 restart
;;
status)
status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac
exit 0

View File

@@ -1,13 +0,0 @@
# Docker Upstart and SysVinit configuration file
# Customize location of Docker binary (especially for development testing).
#DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options.
#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4"
# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"
# This is also a handy place to tweak where Docker's temporary files go.
#export TMPDIR="/mnt/bigdrive/docker-tmp"

View File

@@ -1,123 +0,0 @@
#!/bin/sh
#
# /etc/rc.d/init.d/docker
#
# Daemon for docker.io
#
# chkconfig: 2345 95 95
# description: Daemon for docker.io
### BEGIN INIT INFO
# Provides: docker
# Required-Start: $network cgconfig
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: start and stop docker
# Description: Daemon for docker.io
### END INIT INFO
# Source function library.
. /etc/rc.d/init.d/functions
prog="docker"
exec="/usr/bin/$prog"
pidfile="/var/run/$prog.pid"
lockfile="/var/lock/subsys/$prog"
logfile="/var/log/$prog"
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
prestart() {
service cgconfig status > /dev/null
if [[ $? != 0 ]]; then
service cgconfig start
fi
}
start() {
[ -x $exec ] || exit 5
if ! [ -f $pidfile ]; then
prestart
printf "Starting $prog:\t"
echo "\n$(date)\n" >> $logfile
$exec -d $other_args &>> $logfile &
pid=$!
touch $lockfile
success
echo
else
failure
echo
printf "$pidfile still exists...\n"
exit 7
fi
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@@ -1,7 +0,0 @@
# /etc/sysconfig/docker
#
# Other arguments to pass to the docker daemon process
# These will be parsed by the sysv initscript and appended
# to the arguments list passed to docker -d
other_args=""

View File

@@ -1,41 +0,0 @@
description "Docker daemon"
start on filesystem
stop on runlevel [!2345]
limit nofile 524288 1048576
limit nproc 524288 1048576
respawn
pre-start script
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
|| [ ! -e /proc/cgroups ] \
|| [ ! -d /sys/fs/cgroup ]; then
exit 0
fi
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
(
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
)
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
DOCKER=/usr/bin/$UPSTART_JOB
DOCKER_OPTS=
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$DOCKER" -d $DOCKER_OPTS
end script

55
contrib/install.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/bin/sh
# This script is meant for quick & easy install via 'curl URL-OF-SCRIPT | sh'
# Original version by Jeff Lindsay <progrium@gmail.com>
# Revamped by Jerome Petazzoni <jerome@dotcloud.com>
#
# This script canonical location is http://get.docker.io/; to update it, run:
# s3cmd put -m text/x-shellscript -P install.sh s3://get.docker.io/index
echo "Ensuring basic dependencies are installed..."
apt-get -qq update
apt-get -qq install lxc wget bsdtar
echo "Looking in /proc/filesystems to see if we have AUFS support..."
if grep -q aufs /proc/filesystems
then
echo "Found."
else
echo "Ahem, it looks like the current kernel does not support AUFS."
echo "Let's see if we can load the AUFS module with modprobe..."
if modprobe aufs
then
echo "Module loaded."
else
echo "Ahem, things didn't turn out as expected."
KPKG=linux-image-extra-$(uname -r)
echo "Trying to install $KPKG..."
if apt-get -qq install $KPKG
then
echo "Installed."
else
echo "Oops, we couldn't install the -extra kernel."
echo "Are you sure you are running a supported version of Ubuntu?"
echo "Proceeding anyway, but Docker will probably NOT WORK!"
fi
fi
fi
echo "Downloading docker binary and uncompressing into /usr/local/bin..."
curl -s http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz |
tar -C /usr/local/bin --strip-components=1 -zxf- \
docker-master/docker
if [ -f /etc/init/dockerd.conf ]
then
echo "Upstart script already exists."
else
echo "Creating /etc/init/dockerd.conf..."
echo "exec env LANG=\"en_US.UTF-8\" /usr/local/bin/docker -d" > /etc/init/dockerd.conf
fi
echo "Starting dockerd..."
start dockerd > /dev/null
echo "Done."
echo

View File

@@ -1,56 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-attach.1
.\"
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
.SH NAME
docker-attach \- Attach to a running container
.SH SYNOPSIS
.B docker attach
\fB--no-stdin\fR[=\fIfalse\fR]
\fB--sig-proxy\fR[=\fItrue\fR]
container
.SH DESCRIPTION
If you \fBdocker run\fR a container in detached mode (\fB-d\fR), you can reattach to the detached container with \fBdocker attach\fR using the container's ID or name.
.sp
You can detach from the container again (and leave it running) with CTRL-c (for a quiet exit) or CTRL-\ to get a stacktrace of the Docker client when it quits. When you detach from the container the exit code will be returned to the client.
.SH "OPTIONS"
.TP
.B --no-stdin=\fItrue\fR|\fIfalse\fR:
When set to true, do not attach to stdin. The default is \fIfalse\fR.
.TP
.B --sig-proxy=\fItrue\fR|\fIfalse\fR:
When set to true, proxify all received signal to the process (even in non-tty mode). The default is \fItrue\fR.
.sp
.SH EXAMPLES
.sp
.PP
.B Attaching to a container
.TP
In this example the top command is run inside a container, from an image called fedora, in detached mode. The ID from the container is passed into the \fBdocker attach\fR command:
.sp
.nf
.RS
# ID=$(sudo docker run -d fedora /usr/bin/top -b)
# sudo docker attach $ID
top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 373572k total, 355560k used, 18012k free, 27872k buffers
Swap: 786428k total, 0k used, 786428k free, 221740k cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top
top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 373572k total, 355244k used, 18328k free, 27872k buffers
Swap: 786428k total, 0k used, 786428k free, 221776k cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top
.RE
.fi
.sp
.SH HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,65 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-build.1
.\"
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
.SH NAME
docker-build \- Build a container image from a Dockerfile source at PATH
.SH SYNOPSIS
.B docker build
[\fB--no-cache\fR[=\fIfalse\fR]
[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR]
[\fB--rm\fR[=\fitrue\fR]]
[\fB-t\fR|\fB--tag\fR=\fItag\fR]
PATH | URL | -
.SH DESCRIPTION
This will read the Dockerfile from the directory specified in \fBPATH\fR. It also sends any other files and directories found in the current directory to the Docker daemon. The contents of this directory would be used by ADD command found within the Dockerfile.
Warning, this will send a lot of data to the Docker daemon if the current directory contains a lot of data.
If the absolute path is provided instead of ., only the files and directories required by the ADD commands from the Dockerfile will be added to the context and transferred to the Docker daemon.
.sp
When a single Dockerfile is given as URL, then no context is set. When a Git repository is set as URL, the repository is used as context.
.SH "OPTIONS"
.TP
.B -q, --quiet=\fItrue\fR|\fIfalse\fR:
When set to true, suppress verbose build output. Default is \fIfalse\fR.
.TP
.B --rm=\fItrue\fr|\fIfalse\fR:
When true, remove intermediate containers that are created during the build process. The default is true.
.TP
.B -t, --tag=\fItag\fR:
Tag to be applied to the resulting image on successful completion of the build.
.TP
.B --no-cache=\fItrue\fR|\fIfalse\fR
When set to true, do not use a cache when building the image. The default is \fIfalse\fR.
.sp
.SH EXAMPLES
.sp
.sp
.B Building an image from current directory
.TP
USing a Dockerfile, Docker images are built using the build command:
.sp
.RS
docker build .
.RE
.sp
If, for some reasone, you do not what to remove the intermediate containers created during the build you must set--rm=false.
.sp
.RS
docker build --rm=false .
.sp
.RE
.sp
A good practice is to make a subdirectory with a related name and create the Dockerfile in that directory. E.g. a directory called mongo may contain a Dockerfile for a MongoDB image, or a directory called httpd may contain an Dockerfile for an Apache web server.
.sp
It is also good practice to add the files required for the image to the subdirectory. These files will be then specified with the `ADD` instruction in the Dockerfile. Note: if you include a tar file, which is good practice, then Docker will automatically extract the contents of the tar file specified in the `ADD` instruction into the specified target.
.sp
.B Building an image container using a URL
.TP
This will clone the Github repository and use it as context. The Dockerfile at the root of the repository is used as Dockerfile. This only works if the Github repository is a dedicated repository. Note that you can specify an arbitrary Git repository by using the git:// schema.
.sp
.RS
docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache
.RE
.sp
.SH HISTORY
March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,84 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-images.1
.\"
.TH "DOCKER" "1" "April 2014" "0.1" "Docker"
.SH NAME
docker-images \- List the images in the local repository
.SH SYNOPSIS
.B docker images
[\fB-a\fR|\fB--all\fR=\fIfalse\fR]
[\fB--no-trunc\fR[=\fIfalse\fR]
[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR]
[\fB-t\fR|\fB--tree\fR=\fIfalse\fR]
[\fB-v\fR|\fB--viz\fR=\fIfalse\fR]
[NAME]
.SH DESCRIPTION
This command lists the images stored in the local Docker repository.
.sp
By default, intermediate images, used during builds, are not listed. Some of the output, e.g. image ID, is truncated, for space reasons. However the truncated image ID, and often the first few characters, are enough to be used in other Docker commands that use the image ID. The output includes repository, tag, image ID, date created and the virtual size.
.sp
The title REPOSITORY for the first title may seem confusing. It is essentially the image name. However, because you can tag a specific image, and multiple tags (image instances) can be associated with a single name, the name is really a repository for all tagged images of the same name.
.SH "OPTIONS"
.TP
.B -a, --all=\fItrue\fR|\fIfalse\fR:
When set to true, also include all intermediate images in the list. The default is false.
.TP
.B --no-trunc=\fItrue\fR|\fIfalse\fR:
When set to true, list the full image ID and not the truncated ID. The default is false.
.TP
.B -q, --quiet=\fItrue\fR|\fIfalse\fR:
When set to true, list the complete image ID as part of the output. The default is false.
.TP
.B -t, --tree=\fItrue\fR|\fIfalse\fR:
When set to true, list the images in a tree dependency tree (hierarchy) format. The default is false.
.TP
.B -v, --viz=\fItrue\fR|\fIfalse\fR
When set to true, list the graph in graphviz format. The default is \fIfalse\fR.
.sp
.SH EXAMPLES
.sp
.B Listing the images
.TP
To list the images in a local repository (not the registry) run:
.sp
.RS
docker images
.RE
.sp
The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, IMAGE ID, CREATED, and VIRTUAL SIZE.
.sp
To get a verbose list of images which contains all the intermediate images used in builds use \fB-a\fR:
.sp
.RS
docker images -a
.RE
.sp
.B List images dependency tree hierarchy
.TP
To list the images in the local repository (not the registry) in a dependency tree format then use the \fB-t\fR|\fB--tree=true\fR option.
.sp
.RS
docker images -t
.RE
.sp
This displays a staggered hierarchy tree where the less indented image is the oldest with dependent image layers branching inward (to the right) on subsequent lines. The newest or top level image layer is listed last in any tree branch.
.sp
.B List images in GraphViz format
.TP
To display the list in a format consumable by a GraphViz tools run with \fB-v\fR|\fB--viz=true\fR. For example to produce a .png graph file of the hierarchy use:
.sp
.RS
docker images --viz | dot -Tpng -o docker.png
.sp
.RE
.sp
.B Listing only the shortened image IDs
.TP
Listing just the shortened image IDs. This can be useful for some automated tools.
.sp
.RS
docker images -q
.RE
.sp
.SH HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,39 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-info.1
.\"
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
.SH NAME
docker-info \- Display system wide information
.SH SYNOPSIS
.B docker info
.SH DESCRIPTION
This command displays system wide information regarding the Docker installation. Information displayed includes the number of containers and images, pool name, data file, metadata file, data space used, total data space, metadata space used, total metadata space, execution driver, and the kernel version.
.sp
The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where /var/lib/docker is mounted.
.SH "OPTIONS"
There are no available options.
.sp
.SH EXAMPLES
.sp
.B Display Docker system information
.TP
Here is a sample output:
.sp
.RS
# docker info
Containers: 18
Images: 95
Storage Driver: devicemapper
Pool Name: docker-8:1-170408448-pool
Data file: /var/lib/docker/devicemapper/devicemapper/data
Metadata file: /var/lib/docker/devicemapper/devicemapper/metadata
Data Space Used: 9946.3 Mb
Data Space Total: 102400.0 Mb
Metadata Space Used: 9.9 Mb
Metadata Space Total: 2048.0 Mb
Execution Driver: native-0.1
Kernel Version: 3.10.0-116.el7.x86_64
.RE
.sp
.SH HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,237 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-inspect.1
.\"
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
.SH NAME
docker-inspect \- Return low-level information on a container/image
.SH SYNOPSIS
.B docker inspect
[\fB-f\fR|\fB--format\fR=""
CONTAINER|IMAGE [CONTAINER|IMAGE...]
.SH DESCRIPTION
This displays all the information available in Docker for a given container or image. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result.
.SH "OPTIONS"
.TP
.B -f, --format="":
The text/template package of Go describes all the details of the format. See examples section
.SH EXAMPLES
.sp
.PP
.B Getting information on a container
.TP
To get information on a container use it's ID or instance name
.sp
.fi
.RS
#docker inspect 1eb5fabf5a03
[{
"ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b",
"Created": "2014-04-04T21:33:52.02361335Z",
"Path": "/usr/sbin/nginx",
"Args": [],
"Config": {
"Hostname": "1eb5fabf5a03",
"Domainname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"CpuShares": 0,
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"PortSpecs": null,
"ExposedPorts": {
"80/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"HOME=/",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/usr/sbin/nginx"
],
"Dns": null,
"DnsSearch": null,
"Image": "summit/nginx",
"Volumes": null,
"VolumesFrom": "",
"WorkingDir": "",
"Entrypoint": null,
"NetworkDisabled": false,
"OnBuild": null,
"Context": {
"mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650",
"process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650"
}
},
"State": {
"Running": true,
"Pid": 858,
"ExitCode": 0,
"StartedAt": "2014-04-04T21:33:54.16259207Z",
"FinishedAt": "0001-01-01T00:00:00Z",
"Ghost": false
},
"Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6",
"NetworkSettings": {
"IPAddress": "172.17.0.2",
"IPPrefixLen": 16,
"Gateway": "172.17.42.1",
"Bridge": "docker0",
"PortMapping": null,
"Ports": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "80"
}
]
}
},
"ResolvConfPath": "/etc/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname",
"HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts",
"Name": "/ecstatic_ptolemy",
"Driver": "devicemapper",
"ExecDriver": "native-0.1",
"Volumes": {},
"VolumesRW": {},
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LxcConf": [],
"Privileged": false,
"PortBindings": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "80"
}
]
},
"Links": null,
"PublishAllPorts": false,
"DriverOptions": {
"lxc": null
},
"CliAddress": ""
}
.RE
.nf
.sp
.B Getting the IP address of a container instance
.TP
To get the IP address of a container use:
.sp
.fi
.RS
# docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03
172.17.0.2
.RE
.nf
.sp
.B Listing all port bindings
.TP
One can loop over arrays and maps in the results to produce simple text output:
.sp
.fi
.RS
# docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03
80/tcp -> 80
.RE
.nf
.sp
.B Getting information on an image
.TP
Use an image's ID or name (e.g. repository/name[:tag]) to get information on it.
.sp
.fi
.RS
docker inspect 58394af37342
[{
"id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9",
"parent": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
"created": "2014-02-03T16:10:40.500814677Z",
"container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5",
"container_config": {
"Hostname": "88807319f25e",
"Domainname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"CpuShares": 0,
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"PortSpecs": null,
"ExposedPorts": null,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"HOME=/",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/sh",
"-c",
"#(nop) ADD fedora-20-medium.tar.xz in /"
],
"Dns": null,
"DnsSearch": null,
"Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
"Volumes": null,
"VolumesFrom": "",
"WorkingDir": "",
"Entrypoint": null,
"NetworkDisabled": false,
"OnBuild": null,
"Context": null
},
"docker_version": "0.6.3",
"author": "Lokesh Mandvekar \u003clsm5@redhat.com\u003e - ./buildcontainers.sh",
"config": {
"Hostname": "88807319f25e",
"Domainname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"CpuShares": 0,
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"PortSpecs": null,
"ExposedPorts": null,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"HOME=/",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Dns": null,
"DnsSearch": null,
"Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
"Volumes": null,
"VolumesFrom": "",
"WorkingDir": "",
"Entrypoint": null,
"NetworkDisabled": false,
"OnBuild": null,
"Context": null
},
"architecture": "x86_64",
"Size": 385520098
}]
.RE
.nf
.sp
.SH HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,45 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-rm.1
.\"
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
.SH NAME
docker-rm \- Remove one or more containers.
.SH SYNOPSIS
.B docker rm
[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
[\fB-l\fR|\fB--link\fR[=\fIfalse\fR]
[\fB-v\fR|\fB--volumes\fR[=\fIfalse\fR]
CONTAINER [CONTAINER...]
.SH DESCRIPTION
This will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the \fBdocker ps -a\fR command.
.SH "OPTIONS"
.TP
.B -f, --force=\fItrue\fR|\fIfalse\fR:
When set to true, force the removal of the container. The default is \fIfalse\fR.
.TP
.B -l, --link=\fItrue\fR|\fIfalse\fR:
When set to true, remove the specified link and not the underlying container. The default is \fIfalse\fR.
.TP
.B -v, --volumes=\fItrue\fR|\fIfalse\fR:
When set to true, remove the volumes associated to the container. The default is \fIfalse\fR.
.SH EXAMPLES
.sp
.PP
.B Removing a container using its ID
.TP
To remove a container using its ID, find either from a \fBdocker ps -a\fR command, or use the ID returned from the \fBdocker run\fR command, or retrieve it from a file used to store it using the \fBdocker run --cidfile\fR:
.sp
.RS
docker rm abebf7571666
.RE
.sp
.B Removing a container using the container name:
.TP
The name of the container can be found using the \fBdocker ps -a\fR command. The use that name as follows:
.sp
.RS
docker rm hopeful_morse
.RE
.sp
.SH HISTORY
March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,29 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-run.1
.\"
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
.SH NAME
docker-rmi \- Remove one or more images.
.SH SYNOPSIS
.B docker rmi
[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
IMAGE [IMAGE...]
.SH DESCRIPTION
This will remove one or more images from the host node. This does not remove images from a registry. You cannot remove an image of a running container unless you use the \fB-f\fR option. To see all images on a host use the \fBdocker images\fR command.
.SH "OPTIONS"
.TP
.B -f, --force=\fItrue\fR|\fIfalse\fR:
When set to true, force the removal of the image. The default is \fIfalse\fR.
.SH EXAMPLES
.sp
.PP
.B Removing an image
.TP
Here is an example of removing and image:
.sp
.RS
docker rmi fedora/httpd
.RE
.sp
.SH HISTORY
March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,277 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-run.1
.\"
.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
.SH NAME
docker-run \- Run a process in an isolated container
.SH SYNOPSIS
.B docker run
[\fB-a\fR|\fB--attach\fR[=]] [\fB-c\fR|\fB--cpu-shares\fR[=0] [\fB-m\fR|\fB--memory\fR=\fImemory-limit\fR]
[\fB--cidfile\fR=\fIfile\fR] [\fB-d\fR|\fB--detach\fR[=\fIfalse\fR]] [\fB--dns\fR=\fIIP-address\fR]
[\fB--name\fR=\fIname\fR] [\fB-u\fR|\fB--user\fR=\fIusername\fR|\fIuid\fR]
[\fB--link\fR=\fIname\fR:\fIalias\fR]
[\fB-e\fR|\fB--env\fR=\fIenvironment\fR] [\fB--entrypoint\fR=\fIcommand\fR]
[\fB--expose\fR=\fIport\fR] [\fB-P\fR|\fB--publish-all\fR[=\fIfalse\fR]]
[\fB-p\fR|\fB--publish\fR=\fIport-mappping\fR] [\fB-h\fR|\fB--hostname\fR=\fIhostname\fR]
[\fB--rm\fR[=\fIfalse\fR]] [\fB--priviledged\fR[=\fIfalse\fR]
[\fB-i\fR|\fB--interactive\fR[=\fIfalse\fR]
[\fB-t\fR|\fB--tty\fR[=\fIfalse\fR]] [\fB--lxc-conf\fR=\fIoptions\fR]
[\fB-n\fR|\fB--networking\fR[=\fItrue\fR]]
[\fB-v\fR|\fB--volume\fR=\fIvolume\fR] [\fB--volumes-from\fR=\fIcontainer-id\fR]
[\fB-w\fR|\fB--workdir\fR=\fIdirectory\fR] [\fB--sig-proxy\fR[=\fItrue\fR]]
IMAGE [COMMAND] [ARG...]
.SH DESCRIPTION
.PP
Run a process in a new container. \fBdocker run\fR starts a process with its own file system, its own networking, and its own isolated process tree. The \fIIMAGE\fR which starts the process may define defaults related to the process that will be run in the container, the networking to expose, and more, but \fBdocker run\fR gives final control to the operator or administrator who starts the container from the image. For that reason \fBdocker run\fR has more options than any other docker command.
If the \fIIMAGE\fR is not already loaded then \fBdocker run\fR will pull the \fIIMAGE\fR, and all image dependencies, from the repository in the same way running \fBdocker pull\fR \fIIMAGE\fR, before it starts the container from that image.
.SH "OPTIONS"
.TP
.B -a, --attach=\fIstdin\fR|\fIstdout\fR|\fIstderr\fR:
Attach to stdin, stdout or stderr. In foreground mode (the default when -d is not specified), \fBdocker run\fR can start the process in the container and attach the console to the processs standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The \fB-a\fR option can be set for each of stdin, stdout, and stderr.
.TP
.B -c, --cpu-shares=0:
CPU shares in relative weight. You can increase the priority of a container with the -c option. By default, all containers run at the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via \fBdocker run\fR.
.TP
.B -m, --memory=\fImemory-limit\fR:
Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. The memory limit format: <number><optional unit>, where unit = b, k, m or g.
.TP
.B --cidfile=\fIfile\fR:
Write the container ID to the file specified.
.TP
.B -d, --detach=\fItrue\fR|\fIfalse\fR:
Detached mode. This runs the container in the background. It outputs the new container's id and and error messages. At any time you can run \fBdocker ps\fR in the other shell to view a list of the running containers. You can reattach to a detached container with \fBdocker attach\fR. If you choose to run a container in the detached mode, then you cannot use the -rm option.
.TP
.B --dns=\fIIP-address\fR:
Set custom DNS servers. This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (eg. 127.0.0.1). When this is the case the \fB-dns\fR flags is necessary for every run.
.TP
.B -e, --env=\fIenvironment\fR:
Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container.
.TP
.B --entrypoint=\ficommand\fR:
This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a \fB--entrypoint\fR and a string to specify the new ENTRYPOINT.
.TP
.B --expose=\fIport\fR:
Expose a port from the container without publishing it to your host. A containers port can be exposed to other containers in three ways: 1) The developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) the operator can use the \fB--expose\fR option with \fBdocker run\fR, or 3) the container can be started with the \fB--link\fR.
.TP
.B -P, --publish-all=\fItrue\fR|\fIfalse\fR:
When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. To find the map between the host ports and the exposed ports, use \fBdocker port\fR.
.TP
.B -p, --publish=[]:
Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)
.TP
.B -h , --hostname=\fIhostname\fR:
Sets the container host name that is available inside the container.
.TP
.B -i , --interactive=\fItrue\fR|\fIfalse\fR:
When set to true, keep stdin open even if not attached. The default is false.
.TP
.B --link=\fIname\fR:\fIalias\fR:
Add link to another container. The format is name:alias. If the operator uses \fB--link\fR when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use.
.TP
.B -n, --networking=\fItrue\fR|\fIfalse\fR:
By default, all containers have networking enabled (true) and can make outgoing connections. The operator can disable networking with \fB--networking\fR to false. This disables all incoming and outgoing networking. In cases like this, I/O can only be performed through files or by using STDIN/STDOUT.
Also by default, the container will use the same DNS servers as the host. but you canThe operator may override this with \fB-dns\fR.
.TP
.B --name=\fIname\fR:
Assign a name to the container. The operator can identify a container in three ways:
.sp
.nf
UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”)
UUID short identifier (“f78375b1c487”)
Name (“jonah”)
.fi
.sp
The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with \fB--name\fR then the daemon will also generate a random string name. The name is useful when defining links (see \fB--link\fR) (or any other place you need to identify a container). This works for both background and foreground Docker containers.
.TP
.B --privileged=\fItrue\fR|\fIfalse\fR:
Give extended privileges to this container. By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices.
When the operator executes \fBdocker run -privileged\fR, Docker will enable access to all devices on the host as well as set some configuration in AppArmor (\fB???\fR) to allow the container nearly all the same access to the host as processes running outside of a container on the host.
.TP
.B --rm=\fItrue\fR|\fIfalse\fR:
If set to \fItrue\fR the container is automatically removed when it exits. The default is \fIfalse\fR. This option is incompatible with \fB-d\fR.
.TP
.B --sig-proxy=\fItrue\fR|\fIfalse\fR:
When set to true, proxify all received signals to the process (even in non-tty mode). The default is true.
.TP
.B -t, --tty=\fItrue\fR|\fIfalse\fR:
When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false.
.TP
.B -u, --user=\fIusername\fR,\fRuid\fR:
Set a username or UID for the container.
.TP
.B -v, --volume=\fIvolume\fR:
Bind mount a volume to the container. The \fB-v\fR option can be used one or more times to add one or more mounts to a container. These mounts can then be used in other containers using the \fB--volumes-from\fR option. See examples.
.TP
.B --volumes-from=\fIcontainer-id\fR:
Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the \fB--volumes-from\fR option when running those other containers. The volumes can be shared even if the original container with the mount is not running.
.TP
.B -w, --workdir=\fIdirectory\fR:
Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the \fB-w\fR option.
.TP
.B IMAGE:
The image name or ID.
.TP
.B COMMAND:
The command or program to run inside the image.
.TP
.B ARG:
The arguments for the command to be run in the container.
.SH EXAMPLES
.sp
.sp
.B Exposing log messages from the container to the host's log
.TP
If you want messages that are logged in your container to show up in the host's syslog/journal then you should bind mount the /var/log directory as follows.
.sp
.RS
docker run -v /dev/log:/dev/log -i -t fedora /bin/bash
.RE
.sp
From inside the container you can test this by sending a message to the log.
.sp
.RS
logger "Hello from my container"
.sp
.RE
Then exit and check the journal.
.RS
.sp
exit
.sp
journalctl -b | grep hello
.RE
.sp
This should list the message sent to logger.
.sp
.B Attaching to one or more from STDIN, STDOUT, STDERR
.TP
If you do not specify -a then Docker will attach everything (stdin,stdout,stderr). You can specify to which of the three standard streams (stdin, stdout, stderr) youd like to connect instead, as in:
.sp
.RS
docker run -a stdin -a stdout -i -t fedora /bin/bash
.RE
.sp
.B Linking Containers
.TP
The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows:
.sp
.RS
docker run --name=link-test -d -i -t fedora/httpd
.RE
.sp
.TP
A second container, in this case called linker, can communicate with the httpd container, named link-test, by running with the \fB--link=<name>:<alias>\fR
.sp
.RS
docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash
.RE
.sp
.TP
Now the container linker is linked to container link-test with the alias lt. Running the \fBenv\fR command in the linker container shows environment variables with the LT (alias) context (\fBLT_\fR)
.sp
.nf
.RS
# env
HOSTNAME=668231cb0978
TERM=xterm
LT_PORT_80_TCP=tcp://172.17.0.3:80
LT_PORT_80_TCP_PORT=80
LT_PORT_80_TCP_PROTO=tcp
LT_PORT=tcp://172.17.0.3:80
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
LT_NAME=/linker/lt
SHLVL=1
HOME=/
LT_PORT_80_TCP_ADDR=172.17.0.3
_=/usr/bin/env
.RE
.fi
.sp
.TP
When linking two containers Docker will use the exposed ports of the container to create a secure tunnel for the parent to access.
.TP
.sp
.B Mapping Ports for External Usage
.TP
The exposed port of an application can be mapped to a host port using the \fB-p\fR flag. For example a httpd port 80 can be mapped to the host port 8080 using the following:
.sp
.RS
docker run -p 8080:80 -d -i -t fedora/httpd
.RE
.sp
.TP
.B Creating and Mounting a Data Volume Container
.TP
Many applications require the sharing of persistent data across several containers. Docker allows you to create a Data Volume Container that other containers can mount from. For example, create a named container that contains directories /var/volume1 and /tmp/volume2. The image will need to contain these directories so a couple of RUN mkdir instructions might be required for you fedora-data image:
.sp
.RS
docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true
.sp
docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash
.RE
.sp
.TP
Multiple -volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data:
.sp
.RS
docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash
.RE
.TP
.sp
.B Mounting External Volumes
.TP
To mount a host directory as a container volume, specify the absolute path to the directory and the absolute path for the container directory separated by a colon:
.sp
.RS
docker run -v /var/db:/data1 -i -t fedora bash
.RE
.sp
.TP
When using SELinux, be aware that the host has no knowledge of container SELinux policy. Therefore, in the above example, if SELinux policy is enforced, the /var/db directory is not writable to the container. A "Permission Denied" message will occur and an avc: message in the host's syslog.
.sp
.TP
To work around this, at time of writing this man page, the following command needs to be run in order for the proper SELinux policy type label to be attached to the host directory:
.sp
.RS
chcon -Rt svirt_sandbox_file_t /var/db
.RE
.sp
.TP
Now, writing to the /data1 volume in the container will be allowed and the changes will also be reflected on the host in /var/db.
.sp
.SH HISTORY
March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,49 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker-tag.1
.\"
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
.SH NAME
docker-tag \- Tag an image in the repository
.SH SYNOPSIS
.B docker tag
[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
\fBIMAGE\fR [REGISTRYHOST/][USERNAME/]NAME[:TAG]
.SH DESCRIPTION
This will tag an image in the repository.
.SH "OPTIONS"
.TP
.B -f, --force=\fItrue\fR|\fIfalse\fR:
When set to true, force the tag name. The default is \fIfalse\fR.
.TP
.B REGISTRYHOST:
The hostname of the registry if required. This may also include the port separated by a ':'
.TP
.B USERNAME:
The username or other qualifying identifier for the image.
.TP
.B NAME:
The image name.
.TP
.B TAG:
The tag you are assigning to the image.
.SH EXAMPLES
.sp
.PP
.B Tagging an image
.TP
Here is an example where an image is tagged with the tag 'Version-1.0' :
.sp
.RS
docker tag 0e5574283393 fedora/httpd:Version-1.0
.RE
.sp
.B Tagging an image for an internal repository
.TP
To push an image to an internal Registry and not the default docker.io based registry you must tag it with the registry hostname and port (if needed).
.sp
.RS
docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0
.RE
.sp
.SH HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,172 +0,0 @@
.\" Process this file with
.\" nroff -man -Tascii docker.1
.\"
.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
.SH NAME
docker \- Docker image and container command line interface
.SH SYNOPSIS
.B docker [OPTIONS] [COMMAND] [arg...]
.SH DESCRIPTION
\fBdocker\fR has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So \fBdocker\fR is both a server as deamon and a client to the daemon through the CLI.
.sp
To run the Docker deamon you do not specify any of the commands listed below but must specify the \fB-d\fR option. The other options listed below are for the daemon only.
.sp
The Docker CLI has over 30 commands. The commands are listed below and each has its own man page which explain usage and arguements.
.sp
To see the man page for a command run \fBman docker <command>\fR.
.SH "OPTIONS"
.B \-D=false:
Enable debug mode
.TP
.B\-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use.
When host=[0.0.0.0], port=[4243] or path
=[/var/run/docker.sock] is omitted, default values are used.
.TP
.B \-\-api-enable-cors=false
Enable CORS headers in the remote API
.TP
.B \-b=""
Attach containers to a pre\-existing network bridge; use 'none' to disable container networking
.TP
.B \-\-bip=""
Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b
.TP
.B \-d=false
Enable daemon mode
.TP
.B \-\-dns=""
Force Docker to use specific DNS servers
.TP
.B \-g="/var/lib/docker"
Path to use as the root of the Docker runtime
.TP
.B \-\-icc=true
Enable inter\-container communication
.TP
.B \-\-ip="0.0.0.0"
Default IP address to use when binding container ports
.TP
.B \-\-iptables=true
Disable Docker's addition of iptables rules
.TP
.B \-\-mtu=1500
Set the containers network mtu
.TP
.B \-p="/var/run/docker.pid"
Path to use for daemon PID file
.TP
.B \-r=true
Restart previously running containers
.TP
.B \-s=""
Force the Docker runtime to use a specific storage driver
.TP
.B \-v=false
Print version information and quit
.SH "COMMANDS"
.TP
.B attach
Attach to a running container
.TP
.B build
Build a container from a Dockerfile
.TP
.B commit
Create a new image from a container's changes
.TP
.B cp
Copy files/folders from the containers filesystem to the host at path
.TP
.B diff
Inspect changes on a container's filesystem
.TP
.B events
Get real time events from the server
.TP
.B export
Stream the contents of a container as a tar archive
.TP
.B history
Show the history of an image
.TP
.B images
List images
.TP
.B import
Create a new filesystem image from the contents of a tarball
.TP
.B info
Display system-wide information
.TP
.B insert
Insert a file in an image
.TP
.B inspect
Return low-level information on a container
.TP
.B kill
Kill a running container (which includes the wrapper process and everything inside it)
.TP
.B load
Load an image from a tar archive
.TP
.B login
Register or Login to a Docker registry server
.TP
.B logs
Fetch the logs of a container
.TP
.B port
Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
.TP
.B ps
List containers
.TP
.B pull
Pull an image or a repository from a Docker registry server
.TP
.B push
Push an image or a repository to a Docker registry server
.TP
.B restart
Restart a running container
.TP
.B rm
Remove one or more containers
.TP
.B rmi
Remove one or more images
.TP
.B run
Run a command in a new container
.TP
.B save
Save an image to a tar archive
.TP
.B search
Search for an image in the Docker index
.TP
.B start
Start a stopped container
.TP
.B stop
Stop a running container
.TP
.B tag
Tag an image into a repository
.TP
.B top
Lookup the running processes of a container
.TP
.B version
Show the Docker version information
.TP
.B wait
Block until a container stops, then print its exit code
.SH EXAMPLES
.sp
For specific examples please see the man page for the specific Docker command.
.sp
.SH HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

View File

@@ -1,92 +0,0 @@
#
# /etc/pacman.conf
#
# See the pacman.conf(5) manpage for option and repository directives
#
# GENERAL OPTIONS
#
[options]
# The following paths are commented out with their default values listed.
# If you wish to use different paths, uncomment and update the paths.
#RootDir = /
#DBPath = /var/lib/pacman/
#CacheDir = /var/cache/pacman/pkg/
#LogFile = /var/log/pacman.log
#GPGDir = /etc/pacman.d/gnupg/
HoldPkg = pacman glibc
#XferCommand = /usr/bin/curl -C - -f %u > %o
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
#CleanMethod = KeepInstalled
#UseDelta = 0.7
Architecture = auto
# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup
#IgnorePkg =
#IgnoreGroup =
#NoUpgrade =
#NoExtract =
# Misc options
#UseSyslog
#Color
#TotalDownload
# We cannot check disk space from within a chroot environment
#CheckSpace
#VerbosePkgLists
# By default, pacman accepts packages signed by keys that its local keyring
# trusts (see pacman-key and its man page), as well as unsigned packages.
SigLevel = Required DatabaseOptional
LocalFileSigLevel = Optional
#RemoteFileSigLevel = Required
# NOTE: You must run `pacman-key --init` before first using pacman; the local
# keyring can then be populated with the keys of all official Arch Linux
# packagers with `pacman-key --populate archlinux`.
#
# REPOSITORIES
# - can be defined here or included from another file
# - pacman will search repositories in the order defined here
# - local/custom mirrors can be added here or in separate files
# - repositories listed first will take precedence when packages
# have identical names, regardless of version number
# - URLs will have $repo replaced by the name of the current repo
# - URLs will have $arch replaced by the name of the architecture
#
# Repository entries are of the format:
# [repo-name]
# Server = ServerName
# Include = IncludePath
#
# The header [repo-name] is crucial - it must be present and
# uncommented to enable the repo.
#
# The testing repositories are disabled by default. To enable, uncomment the
# repo name header and Include lines. You can add preferred servers immediately
# after the header, and they will be used before the default mirrors.
#[testing]
#Include = /etc/pacman.d/mirrorlist
[core]
Include = /etc/pacman.d/mirrorlist
[extra]
Include = /etc/pacman.d/mirrorlist
#[community-testing]
#Include = /etc/pacman.d/mirrorlist
[community]
Include = /etc/pacman.d/mirrorlist
# An example of a custom package repository. See the pacman manpage for
# tips on creating your own repositories.
#[custom]
#SigLevel = Optional TrustAll
#Server = file:///home/custompkgs

View File

@@ -1,63 +0,0 @@
#!/usr/bin/env bash
# Generate a minimal filesystem for archlinux and load it into the local
# docker as "archlinux"
# requires root
set -e
hash pacstrap &>/dev/null || {
echo "Could not find pacstrap. Run pacman -S arch-install-scripts"
exit 1
}
hash expect &>/dev/null || {
echo "Could not find expect. Run pacman -S expect"
exit 1
}
ROOTFS=$(mktemp -d /tmp/rootfs-archlinux-XXXXXXXXXX)
chmod 755 $ROOTFS
# packages to ignore for space savings
PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
expect <<EOF
set timeout 60
set send_slow {1 1}
spawn pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
expect {
"Install anyway?" { send n\r; exp_continue }
"(default=all)" { send \r; exp_continue }
"Proceed with installation?" { send "\r"; exp_continue }
"skip the above package" {send "y\r"; exp_continue }
"checking" { exp_continue }
"loading" { exp_continue }
"installing" { exp_continue }
}
EOF
arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"
echo 'en_US.UTF-8 UTF-8' > $ROOTFS/etc/locale.gen
arch-chroot $ROOTFS locale-gen
arch-chroot $ROOTFS /bin/sh -c 'echo "Server = https://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist'
# udev doesn't work in containers, rebuild /dev
DEV=$ROOTFS/dev
rm -rf $DEV
mkdir -p $DEV
mknod -m 666 $DEV/null c 1 3
mknod -m 666 $DEV/zero c 1 5
mknod -m 666 $DEV/random c 1 8
mknod -m 666 $DEV/urandom c 1 9
mkdir -m 755 $DEV/pts
mkdir -m 1777 $DEV/shm
mknod -m 666 $DEV/tty c 5 0
mknod -m 600 $DEV/console c 5 1
mknod -m 666 $DEV/tty0 c 4 0
mknod -m 666 $DEV/full c 1 7
mknod -m 600 $DEV/initctl p
mknod -m 666 $DEV/ptmx c 5 2
tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux
docker run -i -t archlinux echo Success.
rm -rf $ROOTFS

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Generate a very minimal filesystem based on busybox-static,
# and load it into the local docker under the name "busybox".
@@ -35,5 +35,5 @@ do
cp -a /dev/$X dev
done
tar --numeric-owner -cf- . | docker import - busybox
tar -cf- . | docker import - busybox
docker run -i -u root busybox /bin/echo Success.

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env bash
# Generate a minimal filesystem for CRUX/Linux and load it into the local
# docker as "cruxlinux"
# requires root and the crux iso (http://crux.nu)
set -e
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso"
ISO=${1}
ROOTFS=$(mktemp -d /tmp/rootfs-crux-XXXXXXXXXX)
CRUX=$(mktemp -d /tmp/crux-XXXXXXXXXX)
TMP=$(mktemp -d /tmp/XXXXXXXXXX)
VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/')
# Mount the ISO
mount -o ro,loop $ISO $CRUX
# Extract pkgutils
tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz
# Put pkgadd in the $PATH
export PATH="$TMP/usr/bin:$PATH"
# Install core packages
mkdir -p $ROOTFS/var/lib/pkg
touch $ROOTFS/var/lib/pkg/db
for pkg in $CRUX/crux/core/*; do
pkgadd -r $ROOTFS $pkg
done
# Remove agetty and inittab config
if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then
echo "Removing agetty from /etc/inittab ..."
chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab
chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab
chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab
fi
# Remove kernel source
rm -rf $ROOTFS/usr/src/*
# udev doesn't work in containers, rebuild /dev
DEV=$ROOTFS/dev
rm -rf $DEV
mkdir -p $DEV
mknod -m 666 $DEV/null c 1 3
mknod -m 666 $DEV/zero c 1 5
mknod -m 666 $DEV/random c 1 8
mknod -m 666 $DEV/urandom c 1 9
mkdir -m 755 $DEV/pts
mkdir -m 1777 $DEV/shm
mknod -m 666 $DEV/tty c 5 0
mknod -m 600 $DEV/console c 5 1
mknod -m 666 $DEV/tty0 c 4 0
mknod -m 666 $DEV/full c 1 7
mknod -m 600 $DEV/initctl p
mknod -m 666 $DEV/ptmx c 5 2
IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION)
docker tag $IMAGE_ID crux:latest
docker run -i -t crux echo Success.
# Cleanup
umount $CRUX
rm -rf $ROOTFS
rm -rf $CRUX
rm -rf $TMP

View File

@@ -1,293 +0,0 @@
#!/usr/bin/env bash
set -e
variant='minbase'
include='iproute,iputils-ping'
arch='amd64' # intentionally undocumented for now
skipDetection=
strictDebootstrap=
justTar=
usage() {
echo >&2
echo >&2 "usage: $0 [options] repo suite [mirror]"
echo >&2
echo >&2 'options: (not recommended)'
echo >&2 " -p set an http_proxy for debootstrap"
echo >&2 " -v $variant # change default debootstrap variant"
echo >&2 " -i $include # change default package includes"
echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)"
echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list"
echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
echo >&2
echo >&2 " ie: $0 username/debian squeeze"
echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
echo >&2
echo >&2 " ie: $0 username/ubuntu precise"
echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
echo >&2
echo >&2 " ie: $0 -t precise.tar.bz2 precise"
echo >&2 " $0 -t wheezy.tgz wheezy"
echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
echo >&2
}
# these should match the names found at http://www.debian.org/releases/
debianStable=wheezy
debianUnstable=sid
# this should match the name found at http://releases.ubuntu.com/
ubuntuLatestLTS=precise
# this should match the name found at http://releases.tanglu.org/
tangluLatest=aequorea
while getopts v:i:a:p:dst name; do
case "$name" in
p)
http_proxy="$OPTARG"
;;
v)
variant="$OPTARG"
;;
i)
include="$OPTARG"
;;
a)
arch="$OPTARG"
;;
d)
strictDebootstrap=1
;;
s)
skipDetection=1
;;
t)
justTar=1
;;
?)
usage
exit 0
;;
esac
done
shift $(($OPTIND - 1))
repo="$1"
suite="$2"
mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
if [ ! "$repo" ] || [ ! "$suite" ]; then
usage
exit 1
fi
# some rudimentary detection for whether we need to "sudo" our docker calls
docker=''
if docker version > /dev/null 2>&1; then
docker='docker'
elif sudo docker version > /dev/null 2>&1; then
docker='sudo docker'
elif command -v docker > /dev/null 2>&1; then
docker='docker'
else
echo >&2 "warning: either docker isn't installed, or your current user cannot run it;"
echo >&2 " this script is not likely to work as expected"
sleep 3
docker='docker' # give us a command-not-found later
fi
# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory
if [ "$justTar" ]; then
if [ ! -d "$(dirname "$repo")" ]; then
echo >&2 "error: $(dirname "$repo") does not exist"
exit 1
fi
repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")"
fi
# will be filled in later, if [ -z "$skipDetection" ]
lsbDist=''
target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
returnTo="$(pwd -P)"
if [ "$suite" = 'lucid' ]; then
# lucid fails and doesn't include gpgv in minbase; "apt-get update" fails
include+=',gpgv'
fi
set -x
# bootstrap
mkdir -p "$target"
sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror"
cd "$target"
if [ -z "$strictDebootstrap" ]; then
# prevent init scripts from running during install/update
# policy-rc.d (for most scripts)
echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null
sudo chmod +x usr/sbin/policy-rc.d
# initctl (for some pesky upstart scripts)
sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
sudo ln -sf /bin/true sbin/initctl
# see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173
# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
sudo chroot . apt-get clean
if strings usr/bin/dpkg | grep -q unsafe-io; then
# while we're at it, apt is unnecessarily slow inside containers
# this forces dpkg not to call sync() after package extraction and speeds up install
# the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
# we have this wrapped up in an "if" because the "force-unsafe-io"
# option was added in dpkg 1.15.8.6
# (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82),
# and ubuntu lucid/10.04 only has 1.15.5.6
fi
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
{
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
echo "DPkg::Post-Invoke { ${aptGetClean} };"
echo "APT::Update::Post-Invoke { ${aptGetClean} };"
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
# and remove the translations, too
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
# rm /usr/sbin/policy-rc.d
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
# rm /etc/apt/apt.conf.d/no-cache
# rm /etc/apt/apt.conf.d/no-languages
if [ -z "$skipDetection" ]; then
# see also rudimentary platform detection in hack/install.sh
lsbDist=''
if [ -r etc/lsb-release ]; then
lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")"
fi
if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
lsbDist='Debian'
fi
case "$lsbDist" in
Debian)
# add the updates and security repositories
if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
# ${suite}-updates only applies to non-unstable
sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
# same for security updates
echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
fi
;;
Ubuntu)
# add the universe, updates, and security repositories
sudo sed -i "
s/ $suite main$/ $suite main universe/; p;
s/ $suite main/ ${suite}-updates main/; p;
s/ $suite-updates main/ ${suite}-security main/
" etc/apt/sources.list
;;
Tanglu)
# add the updates repository
if [ "$suite" = "$tangluLatest" ]; then
# ${suite}-updates only applies to stable Tanglu versions
sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
fi
;;
SteamOS)
# add contrib and non-free
sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list
;;
esac
fi
# make sure our packages lists are as up to date as we can get them
sudo chroot . apt-get update
sudo chroot . apt-get dist-upgrade -y
fi
if [ "$justTar" ]; then
# create the tarball file so it has the right permissions (ie, not root)
touch "$repo"
# fill the tarball
sudo tar --numeric-owner -caf "$repo" .
else
# create the image (and tag $repo:$suite)
sudo tar --numeric-owner -c . | $docker import - $repo:$suite
# test the image
$docker run -i -t $repo:$suite echo success
if [ -z "$skipDetection" ]; then
case "$lsbDist" in
Debian)
if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
# tag latest
$docker tag $repo:$suite $repo:latest
if [ -r etc/debian_version ]; then
# tag the specific debian release version (which is only reasonable to tag on debian stable)
ver=$(cat etc/debian_version)
$docker tag $repo:$suite $repo:$ver
fi
fi
;;
Ubuntu)
if [ "$suite" = "$ubuntuLatestLTS" ]; then
# tag latest
$docker tag $repo:$suite $repo:latest
fi
if [ -r etc/lsb-release ]; then
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
if [ "$lsbRelease" ]; then
# tag specific Ubuntu version number, if available (12.04, etc.)
$docker tag $repo:$suite $repo:$lsbRelease
fi
fi
;;
Tanglu)
if [ "$suite" = "$tangluLatest" ]; then
# tag latest
$docker tag $repo:$suite $repo:latest
fi
if [ -r etc/lsb-release ]; then
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
if [ "$lsbRelease" ]; then
# tag specific Tanglu version number, if available (1.0, 2.0, etc.)
$docker tag $repo:$suite $repo:$lsbRelease
fi
fi
;;
SteamOS)
if [ -r etc/lsb-release ]; then
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
if [ "$lsbRelease" ]; then
# tag specific SteamOS version number, if available (1.0, 2.0, etc.)
$docker tag $repo:$suite $repo:$lsbRelease
fi
fi
;;
esac
fi
fi
# cleanup
cd "$returnTo"
sudo rm -rf "$target"

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env bash
#
# Create a base CentOS Docker image.
# This script is useful on systems with rinse available (e.g.,
# building a CentOS image on Debian). See contrib/mkimage-yum.sh for
# a way to build CentOS images on systems with yum installed.
set -e
repo="$1"
distro="$2"
mirror="$3"
if [ ! "$repo" ] || [ ! "$distro" ]; then
self="$(basename $0)"
echo >&2 "usage: $self repo distro [mirror]"
echo >&2
echo >&2 " ie: $self username/centos centos-5"
echo >&2 " $self username/centos centos-6"
echo >&2
echo >&2 " ie: $self username/slc slc-5"
echo >&2 " $self username/slc slc-6"
echo >&2
echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/"
echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/"
echo >&2
echo >&2 'See /etc/rinse for supported values of "distro" and for examples of'
echo >&2 ' expected values of "mirror".'
echo >&2
echo >&2 'This script is tested to work with the original upstream version of rinse,'
echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at'
echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.'
echo >&2
exit 1
fi
target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
returnTo="$(pwd -P)"
rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" )
if [ "$mirror" ]; then
rinseArgs+=( --mirror "$mirror" )
fi
set -x
mkdir -p "$target"
sudo rinse "${rinseArgs[@]}"
cd "$target"
# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own
sudo rm -rf dev
sudo mkdir -m 755 dev
(
cd dev
sudo ln -sf /proc/self/fd ./
sudo mkdir -m 755 pts
sudo mkdir -m 1777 shm
sudo mknod -m 600 console c 5 1
sudo mknod -m 600 initctl p
sudo mknod -m 666 full c 1 7
sudo mknod -m 666 null c 1 3
sudo mknod -m 666 ptmx c 5 2
sudo mknod -m 666 random c 1 8
sudo mknod -m 666 tty c 5 0
sudo mknod -m 666 tty0 c 4 0
sudo mknod -m 666 urandom c 1 9
sudo mknod -m 666 zero c 1 5
)
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target"
# locales
sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
# docs
sudo rm -rf usr/share/{man,doc,info,gnome/help}
# cracklib
sudo rm -rf usr/share/cracklib
# i18n
sudo rm -rf usr/share/i18n
# yum cache
sudo rm -rf var/cache/yum
sudo mkdir -p --mode=0755 var/cache/yum
# sln
sudo rm -rf sbin/sln
# ldconfig
#sudo rm -rf sbin/ldconfig
sudo rm -rf etc/ld.so.cache var/cache/ldconfig
sudo mkdir -p --mode=0755 var/cache/ldconfig
# allow networking init scripts inside the container to work without extra steps
echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null
# to restore locales later:
# yum reinstall glibc-common
version=
if [ -r etc/redhat-release ]; then
version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)"
elif [ -r etc/SuSE-release ]; then
version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)"
fi
if [ -z "$version" ]; then
echo >&2 "warning: cannot autodetect OS version, using $distro as tag"
sleep 20
version="$distro"
fi
sudo tar --numeric-owner -c . | docker import - $repo:$version
docker run -i -t $repo:$version echo success
cd "$returnTo"
sudo rm -rf "$target"

View File

@@ -1,49 +0,0 @@
#!/usr/bin/env bash
# Generate a very minimal filesystem based on busybox-static,
# and load it into the local docker under the name "docker-ut".
missing_pkg() {
echo "Sorry, I could not locate $1"
echo "Try 'apt-get install ${2:-$1}'?"
exit 1
}
BUSYBOX=$(which busybox)
[ "$BUSYBOX" ] || missing_pkg busybox busybox-static
SOCAT=$(which socat)
[ "$SOCAT" ] || missing_pkg socat
shopt -s extglob
set -ex
ROOTFS=`mktemp -d /tmp/rootfs-busybox.XXXXXXXXXX`
trap "rm -rf $ROOTFS" INT QUIT TERM
cd $ROOTFS
mkdir bin etc dev dev/pts lib proc sys tmp
touch etc/resolv.conf
cp /etc/nsswitch.conf etc/nsswitch.conf
echo root:x:0:0:root:/:/bin/sh > etc/passwd
echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd
echo root:x:0: > etc/group
echo daemon:x:1: >> etc/group
ln -s lib lib64
ln -s bin sbin
cp $BUSYBOX $SOCAT bin
for X in $(busybox --list)
do
ln -s busybox bin/$X
done
rm bin/init
ln bin/busybox bin/init
cp -P /lib/x86_64-linux-gnu/lib{pthread*,c*(-*),dl*(-*),nsl*(-*),nss_*,util*(-*),wrap,z}.so* lib
cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib
cp -P /usr/lib/x86_64-linux-gnu/lib{crypto,ssl}.so* lib
for X in console null ptmx random stdin stdout stderr tty urandom zero
do
cp -a /dev/$X dev
done
chmod 0755 $ROOTFS # See #486
tar --numeric-owner -cf- . | docker import - docker-ut
docker run -i -u root docker-ut /bin/echo Success.
rm -rf $ROOTFS

View File

@@ -1,98 +0,0 @@
#!/usr/bin/env bash
#
# Create a base CentOS Docker image.
#
# This script is useful on systems with yum installed (e.g., building
# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way
# to build CentOS images on other systems.
usage() {
cat <<EOOPTS
$(basename $0) [OPTIONS] <name>
OPTIONS:
-y <yumconf> The path to the yum config to install packages from. The
default is /etc/yum.conf.
EOOPTS
exit 1
}
# option defaults
yum_config=/etc/yum.conf
while getopts ":y:h" opt; do
case $opt in
y)
yum_config=$OPTARG
;;
h)
usage
;;
\?)
echo "Invalid option: -$OPTARG"
usage
;;
esac
done
shift $((OPTIND - 1))
name=$1
if [[ -z $name ]]; then
usage
fi
#--------------------
target=$(mktemp -d --tmpdir $(basename $0).XXXXXX)
set -x
mkdir -m 755 "$target"/dev
mknod -m 600 "$target"/dev/console c 5 1
mknod -m 600 "$target"/dev/initctl p
mknod -m 666 "$target"/dev/full c 1 7
mknod -m 666 "$target"/dev/null c 1 3
mknod -m 666 "$target"/dev/ptmx c 5 2
mknod -m 666 "$target"/dev/random c 1 8
mknod -m 666 "$target"/dev/tty c 5 0
mknod -m 666 "$target"/dev/tty0 c 4 0
mknod -m 666 "$target"/dev/urandom c 1 9
mknod -m 666 "$target"/dev/zero c 1 5
yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \
--setopt=group_package_types=mandatory -y groupinstall Core
yum -c "$yum_config" --installroot="$target" -y clean all
cat > "$target"/etc/sysconfig/network <<EOF
NETWORKING=yes
HOSTNAME=localhost.localdomain
EOF
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb
# --keep-services "$target". Stolen from mkimage-rinse.sh
# locales
rm -rf "$target"/usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
# docs
rm -rf "$target"/usr/share/{man,doc,info,gnome/help}
# cracklib
rm -rf "$target"/usr/share/cracklib
# i18n
rm -rf "$target"/usr/share/i18n
# sln
rm -rf "$target"/sbin/sln
# ldconfig
rm -rf "$target"/etc/ld.so.cache
rm -rf "$target"/var/cache/ldconfig/*
version=
if [ -r "$target"/etc/redhat-release ]; then
version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' "$target"/etc/redhat-release)"
fi
if [ -z "$version" ]; then
echo >&2 "warning: cannot autodetect OS version, using '$name' as tag"
version=$name
fi
tar --numeric-owner -c -C "$target" . | docker import - $name:$version
docker run -i -t $name:$version echo success
rm -rf "$target"

View File

@@ -1,77 +0,0 @@
#!/usr/bin/perl
#
# A simple helper script to help people build seccomp profiles for
# Docker/LXC. The goal is mostly to reduce the attack surface to the
# kernel, by restricting access to rarely used, recently added or not used
# syscalls.
#
# This script processes one or more files which contain the list of system
# calls to be allowed. See mkseccomp.sample for more information how you
# can configure the list of syscalls. When run, this script produces output
# which, when stored in a file, can be passed to docker as follows:
#
# docker run --lxc-conf="lxc.seccomp=$file" <rest of arguments>
#
# The included sample file shows how to cut about a quarter of all syscalls,
# which affecting most applications.
#
# For specific situations it is possible to reduce the list further. By
# reducing the list to just those syscalls required by a certain application
# you can make it difficult for unknown/unexpected code to run.
#
# Run this script as follows:
#
# ./mkseccomp.pl < mkseccomp.sample >syscalls.list
# or
# ./mkseccomp.pl mkseccomp.sample >syscalls.list
#
# Multiple files can be specified, in which case the lists of syscalls are
# combined.
#
# By Martijn van Oosterhout <kleptog@svana.org> Nov 2013
# How it works:
#
# This program basically spawns two processes to form a chain like:
#
# <process data section to prefix __NR_> | cpp | <add header and filter unknown syscalls>
use strict;
use warnings;
if( -t ) {
print STDERR "Helper script to make seccomp filters for Docker/LXC.\n";
print STDERR "Usage: mkseccomp.pl < [files...]\n";
exit 1;
}
my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n";
if($pid == 0) { # Child
$pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n";
if($pid == 0) { # Child, which execs cpp
exec "cpp" or die "Couldn't exec cpp ($!)\n";
exit 1;
}
# Process the DATA section and output to cpp
print $out "#include <sys/syscall.h>\n";
while(<>) {
if(/^\w/) {
print $out "__NR_$_";
}
}
close $out;
exit 0;
}
# Print header and then process output from cpp.
print "1\n";
print "whitelist\n";
while(<$in>) {
print if( /^[0-9]/ );
}

Some files were not shown because too many files have changed in this diff Show More