mirror of
https://github.com/moby/moby.git
synced 2026-01-13 11:42:02 +00:00
Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38b8373434 | ||
|
|
03b5f8a585 | ||
|
|
bc260f0225 | ||
|
|
45dcd1125b | ||
|
|
d2e063d9e1 | ||
|
|
567a484b66 | ||
|
|
5d4b886ad6 |
@@ -1,2 +0,0 @@
|
||||
bundles
|
||||
.gopath
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -1,6 +1,3 @@
|
||||
# Docker project generated files to ignore
|
||||
# if you want to ignore files created by your editor/tools,
|
||||
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||
.vagrant*
|
||||
bin
|
||||
docker/docker
|
||||
@@ -8,22 +5,13 @@ docker/docker
|
||||
a.out
|
||||
*.orig
|
||||
build_src
|
||||
command-line-arguments.test
|
||||
.flymake*
|
||||
docker.test
|
||||
auth/auth.test
|
||||
.idea
|
||||
.DS_Store
|
||||
docs/_build
|
||||
docs/_static
|
||||
docs/_templates
|
||||
.gopath/
|
||||
.dotcloud
|
||||
*.test
|
||||
bundles/
|
||||
.hg/
|
||||
.git/
|
||||
vendor/pkg/
|
||||
pyenv
|
||||
Vagrantfile
|
||||
docs/AWS_S3_BUCKET
|
||||
docs/GIT_BRANCH
|
||||
docs/VERSION
|
||||
docs/GITCOMMIT
|
||||
|
||||
100
.mailmap
100
.mailmap
@@ -1,99 +1,19 @@
|
||||
# Generate AUTHORS: hack/generate-authors.sh
|
||||
|
||||
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
|
||||
# duplicates that aren't also email duplicates): scan the output of:
|
||||
# git log --format='%aE - %aN' | sort -uf
|
||||
|
||||
<charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
# Generate AUTHORS: git log --all --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
|
||||
<charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@docker.com>
|
||||
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@charmes.net>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com> creack <charmes.guillaume@gmail.com>
|
||||
<guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
|
||||
<kencochrane@gmail.com> <KenCochrane@gmail.com>
|
||||
Thatcher Peskens <thatcher@docker.com>
|
||||
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
|
||||
Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
|
||||
<sridharr@activestate.com> <github@srid.name>
|
||||
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@dotcloud.com>
|
||||
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@gmx.net>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
||||
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
||||
Joffrey F <joffrey@dotcloud.com>
|
||||
<joffrey@dotcloud.com> <f.joffrey@gmail.com>
|
||||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
<kalessin@kalessin.fr> <louis@dotcloud.com>
|
||||
<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
|
||||
<victor.vieux@docker.com> <victor@dotcloud.com>
|
||||
<victor.vieux@docker.com> <dev@vvieux.com>
|
||||
<victor.vieux@docker.com> <victor@docker.com>
|
||||
<victor.vieux@docker.com> <vieux@docker.com>
|
||||
<victor.vieux@dotcloud.com> <victor@dotcloud.com>
|
||||
<dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
<daniel@gasienica.ch> <dgasienica@zynga.com>
|
||||
Roberto Hashioka <roberto_hashioka@hotmail.com>
|
||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
Nolan Darilek <nolan@thewordnerd.info>
|
||||
<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Faiz Khan <faizkhan00@gmail.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Matthew Mueller <mattmuelle@gmail.com>
|
||||
<mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
<proppy@google.com> <proppy@aminche.com>
|
||||
<michael@docker.com> <michael@crosbymichael.com>
|
||||
<michael@docker.com> <crosby.michael@gmail.com>
|
||||
<github@developersupport.net> <github@metaliveblog.com>
|
||||
<brandon@ifup.org> <brandon@ifup.co>
|
||||
<dano@spotify.com> <daniel.norberg@gmail.com>
|
||||
<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
|
||||
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
|
||||
<shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
||||
<solomon@docker.com> <solomon.hykes@dotcloud.com>
|
||||
<solomon@docker.com> <solomon@dotcloud.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
|
||||
unclejack <unclejacksons@gmail.com> <unclejack@users.noreply.github.com>
|
||||
<alexl@redhat.com> <alexander.larsson@gmail.com>
|
||||
Alexandr Morozov <lk4d4math@gmail.com>
|
||||
<git.nivoc@neverbox.com> <kuehnle@online.de>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
<ostezer@gmail.com> <ostezer@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
<justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
|
||||
<taim@bosboot.org> <maztaim@users.noreply.github.com>
|
||||
<viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
|
||||
<vbatts@redhat.com> <vbatts@hashbangbash.com>
|
||||
<altsysrq@gmail.com> <iamironbob@gmail.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
|
||||
<github@hollensbe.org> <erik+github@hollensbe.org>
|
||||
<github@albersweb.de> <albers@users.noreply.github.com>
|
||||
<lsm5@fedoraproject.org> <lsm5@redhat.com>
|
||||
<marc@marc-abramowitz.com> <msabramo@gmail.com>
|
||||
Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
|
||||
<bernat@luffy.cx> <vincent@bernat.im>
|
||||
<p@pwaller.net> <peter@scraperwiki.com>
|
||||
<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
<julienbordellier@gmail.com> <git@julienbordellier.com>
|
||||
|
||||
30
.travis.yml
30
.travis.yml
@@ -1,30 +0,0 @@
|
||||
# Note: right now we don't use go-specific features of travis.
|
||||
# Later we might automate "go test" etc. (or do it inside a docker container...?)
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
# This should match the version in the Dockerfile.
|
||||
- 1.3.1
|
||||
# Test against older versions too, just for a little extra retrocompat.
|
||||
- 1.2
|
||||
|
||||
# Let us have pretty experimental Docker-based Travis workers.
|
||||
# (These spin up much faster than the VM-based ones.)
|
||||
sudo: false
|
||||
|
||||
# Disable the normal go build.
|
||||
install:
|
||||
- export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false")
|
||||
- export AUTO_GOPATH=1
|
||||
|
||||
before_script:
|
||||
- env | sort
|
||||
|
||||
script:
|
||||
- hack/make.sh validate-dco
|
||||
- hack/make.sh validate-gofmt
|
||||
- ./hack/make.sh dynbinary
|
||||
- DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary
|
||||
|
||||
# vim:set sw=2 ts=2:
|
||||
566
AUTHORS
566
AUTHORS
@@ -1,597 +1,45 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `hack/generate-authors.sh`.
|
||||
|
||||
Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aaron Feng <aaron.feng@gmail.com>
|
||||
Aaron Huslage <huslage@gmail.com>
|
||||
Abel Muiño <amuino@gmail.com>
|
||||
Adam Miller <admiller@redhat.com>
|
||||
Adam Singer <financeCoding@gmail.com>
|
||||
Aditya <aditya@netroy.in>
|
||||
Adrian Mouat <adrian.mouat@gmail.com>
|
||||
Adrien Folie <folie.adrien@gmail.com>
|
||||
AJ Bowen <aj@gandi.net>
|
||||
Al Tobey <al@ooyala.com>
|
||||
alambike <alambike@gmail.com>
|
||||
Albert Zhang <zhgwenming@gmail.com>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Alex Gaynor <alex.gaynor@gmail.com>
|
||||
Alex Warhawk <ax.warhawk@gmail.com>
|
||||
Alexander Larsson <alexl@redhat.com>
|
||||
Alexander Shopov <ash@kambanaria.org>
|
||||
Alexandr Morozov <lk4d4math@gmail.com>
|
||||
Alexey Kotlyarov <alexey@infoxchange.net.au>
|
||||
Alexey Shamrin <shamrin@gmail.com>
|
||||
Alexis THOMAS <fr.alexisthomas@gmail.com>
|
||||
almoehi <almoehi@users.noreply.github.com>
|
||||
amangoel <amangoel@gmail.com>
|
||||
AnandkumarPatel <anandkumarpatel@gmail.com>
|
||||
Andre Dublin <81dublin@gmail.com>
|
||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||
Andrea Turli <andrea.turli@gmail.com>
|
||||
Andreas Savvides <andreas@editd.com>
|
||||
Andreas Tiefenthaler <at@an-ti.eu>
|
||||
Andrew Duckworth <grillopress@gmail.com>
|
||||
Andrew France <andrew@avito.co.uk>
|
||||
Andrew Macgregor <andrew.macgregor@agworld.com.au>
|
||||
Andrew Munsell <andrew@wizardapps.net>
|
||||
Andrew Weiss <andrew.weiss@outlook.com>
|
||||
Andrew Williams <williams.andrew@gmail.com>
|
||||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
Andy Chambers <anchambers@paypal.com>
|
||||
andy diller <dillera@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Andy Kipp <andy@rstudio.com>
|
||||
Andy Rothfusz <github@developersupport.net>
|
||||
Andy Rothfusz <github@metaliveblog.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
Anthony Bishopric <git@anthonybishopric.com>
|
||||
Anton Löfgren <anton.lofgren@gmail.com>
|
||||
Anton Nikitin <anton.k.nikitin@gmail.com>
|
||||
Antony Messerli <amesserl@rackspace.com>
|
||||
apocas <petermdias@gmail.com>
|
||||
Arnaud Porterie <icecrime@gmail.com>
|
||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||
Barnaby Gray <barnaby@pickle.me.uk>
|
||||
Barry Allard <barry.allard@gmail.com>
|
||||
Bartłomiej Piotrowski <b@bpiotrowski.pl>
|
||||
bdevloed <boris.de.vloed@gmail.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
Ben Sargent <ben@brokendigits.com>
|
||||
Ben Toews <mastahyeti@gmail.com>
|
||||
Ben Wiklund <ben@daisyowl.com>
|
||||
Benjamin Atkin <ben@benatkin.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
bin liu <liubin0329@users.noreply.github.com>
|
||||
Bouke Haarsma <bouke@webatoom.nl>
|
||||
Boyd Hemphill <boyd@feedmagnet.com>
|
||||
Brandon Liu <bdon@bdon.org>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brandon Rhodes <brandon@rhodesmill.org>
|
||||
Brett Kochendorfer <brett.kochendorfer@gmail.com>
|
||||
Brian (bex) Exelbierd <bexelbie@redhat.com>
|
||||
Brian Dorsey <brian@dorseys.org>
|
||||
Brian Flad <bflad417@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Brian McCallister <brianm@skife.org>
|
||||
Brian Olsen <brian@maven-group.org>
|
||||
Brian Shumate <brian@couchbase.com>
|
||||
Brice Jaglin <bjaglin@teads.tv>
|
||||
Briehan Lombaard <briehan.lombaard@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Bruno Renié <brutasse@gmail.com>
|
||||
Bryan Bess <squarejaw@bsbess.com>
|
||||
Bryan Matsuo <bryan.matsuo@gmail.com>
|
||||
Bryan Murphy <bmurphy1976@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Calen Pennington <cale@edx.org>
|
||||
Cameron Boehmer <cameron.boehmer@gmail.com>
|
||||
Carl X. Su <bcbcarl@gmail.com>
|
||||
Charles Hooper <charles.hooper@dotcloud.com>
|
||||
Charles Lindsay <chaz@chazomatic.us>
|
||||
Charles Merriam <charles.merriam@gmail.com>
|
||||
Charlie Lewis <charliel@lab41.org>
|
||||
Chewey <prosto-chewey@users.noreply.github.com>
|
||||
Chia-liang Kao <clkao@clkao.org>
|
||||
Chris Alfonso <calfonso@redhat.com>
|
||||
Chris Snow <chsnow123@gmail.com>
|
||||
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
||||
chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Christian Berendt <berendt@b1-systems.de>
|
||||
ChristoperBiscardi <biscarch@sketcht.com>
|
||||
Christophe Troestler <christophe.Troestler@umons.ac.be>
|
||||
Christopher Currie <codemonkey+github@gmail.com>
|
||||
Christopher Rigor <crigor@gmail.com>
|
||||
Ciro S. Costa <ciro.costa@usp.br>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Colin Dunklau <colin.dunklau@gmail.com>
|
||||
Colin Rice <colin@daedrum.net>
|
||||
Colin Walters <walters@verbum.org>
|
||||
Cory Forsyth <cory.forsyth@gmail.com>
|
||||
cpuguy83 <cpuguy83@gmail.com>
|
||||
cressie176 <github@stephen-cresswell.net>
|
||||
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
||||
Daan van Berkel <daan.v.berkel.1980@gmail.com>
|
||||
Dafydd Crosby <dtcrsby@gmail.com>
|
||||
Dan Buch <d.buch@modcloth.com>
|
||||
Dan Hirsch <thequux@upstandinghackers.com>
|
||||
Dan Keder <dan.keder@gmail.com>
|
||||
Dan McPherson <dmcphers@redhat.com>
|
||||
Dan Stine <sw@stinemail.com>
|
||||
Dan Walsh <dwalsh@redhat.com>
|
||||
Dan Williams <me@deedubs.com>
|
||||
Daniel Exner <dex@dragonslave.de>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
|
||||
Daniel Norberg <dano@spotify.com>
|
||||
Daniel Nordberg <dnordberg@gmail.com>
|
||||
Daniel Robinson <gottagetmac@gmail.com>
|
||||
Daniel Von Fange <daniel@leancoder.com>
|
||||
Daniel YC Lin <dlin.tw@gmail.com>
|
||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
||||
Danny Berger <dpb587@gmail.com>
|
||||
Danny Yates <danny@codeaholics.org>
|
||||
Darren Coxall <darren@darrencoxall.com>
|
||||
Darren Shepherd <darren.s.shepherd@gmail.com>
|
||||
David Anderson <dave@natulte.net>
|
||||
David Calavera <david.calavera@gmail.com>
|
||||
David Corking <dmc-source@dcorking.com>
|
||||
David Gageot <david@gageot.net>
|
||||
David Mcanulty <github@hellspark.com>
|
||||
David Röthlisberger <david@rothlis.net>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
Deni Bertovic <deni@kset.org>
|
||||
Derek <crq@kernel.org>
|
||||
Deric Crago <deric.crago@gmail.com>
|
||||
Dinesh Subhraveti <dineshs@altiscale.com>
|
||||
Djibril Koné <kone.djibril@gmail.com>
|
||||
dkumor <daniel@dkumor.com>
|
||||
Dmitry Demeshchuk <demeshchuk@gmail.com>
|
||||
Dolph Mathews <dolph.mathews@gmail.com>
|
||||
Dominik Honnef <dominik@honnef.co>
|
||||
Don Spaulding <donspauldingii@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
doug tangren <d.tangren@gmail.com>
|
||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
Dražen Lučanin <kermit666@gmail.com>
|
||||
Dustin Sallings <dustin@spy.net>
|
||||
Edmund Wagner <edmund-wagner@web.de>
|
||||
Eiichi Tsukata <devel@etsukata.com>
|
||||
Eivind Uggedal <eivind@uggedal.com>
|
||||
Elias Probst <mail@eliasprobst.eu>
|
||||
Emil Hernvall <emil@quench.at>
|
||||
Emily Rose <emily@contactvibe.com>
|
||||
Eric Hanchrow <ehanchrow@ine.com>
|
||||
Eric Lee <thenorthsecedes@gmail.com>
|
||||
Eric Myhre <hash@exultant.us>
|
||||
Eric Windisch <eric@windisch.us>
|
||||
Eric Windisch <ewindisch@docker.com>
|
||||
Erik Hollensbe <github@hollensbe.org>
|
||||
Erik Inge Bolsø <knan@redpill-linpro.com>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
eugenkrizo <eugen.krizo@gmail.com>
|
||||
Evan Hazlett <ejhazlett@gmail.com>
|
||||
Evan Krall <krall@yelp.com>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Evan Wies <evan@neomantra.net>
|
||||
evanderkoogh <info@erronis.nl>
|
||||
Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
|
||||
ezbercih <cem.ezberci@gmail.com>
|
||||
Fabio Falci <fabiofalci@gmail.com>
|
||||
Fabio Rehm <fgrehm@gmail.com>
|
||||
Fabrizio Regini <freegenie@gmail.com>
|
||||
Faiz Khan <faizkhan00@gmail.com>
|
||||
Fareed Dudhia <fareeddudhia@googlemail.com>
|
||||
Felix Rabe <felix@rabe.io>
|
||||
Fernando <fermayo@gmail.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
FLGMwt <ryan.stelly@live.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Frank Macreery <frank@macreery.com>
|
||||
Fred Lifton <fred.lifton@docker.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
Frederik Loeffert <frederik@zitrusmedia.de>
|
||||
Freek Kalter <freek@kalteronline.org>
|
||||
Gabe Rosenhouse <gabe@missionst.com>
|
||||
Gabor Nagy <mail@aigeruth.hu>
|
||||
Gabriel Monroy <gabriel@opdemand.com>
|
||||
Galen Sampson <galen.sampson@gmail.com>
|
||||
Gareth Rushgrove <gareth@morethanseven.net>
|
||||
Geoffrey Bachelet <grosfrais@gmail.com>
|
||||
Gereon Frey <gereon.frey@dynport.de>
|
||||
German DZ <germ@ndz.com.ar>
|
||||
Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
|
||||
Giuseppe Mazzotta <gdm85@users.noreply.github.com>
|
||||
Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
|
||||
Glyn Normington <gnormington@gopivotal.com>
|
||||
Goffert van Gool <goffert@phusion.nl>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Thornton <xdissent@me.com>
|
||||
grunny <mwgrunny@gmail.com>
|
||||
Guilherme Salgado <gsalgado@gmail.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com>
|
||||
Gurjeet Singh <gurjeet@singh.im>
|
||||
Guruprasad <lgp171188@gmail.com>
|
||||
Harald Albers <github@albersweb.de>
|
||||
Harley Laue <losinggeneration@gmail.com>
|
||||
Hector Castro <hectcastro@gmail.com>
|
||||
Henning Sprang <henning.sprang@gmail.com>
|
||||
Hobofan <goisser94@gmail.com>
|
||||
Hollie Teal <hollie.teal@docker.com>
|
||||
Hollie Teal <hollietealok@users.noreply.github.com>
|
||||
hollietealok <hollie@docker.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
|
||||
Hunter Blanks <hunter@twilio.com>
|
||||
hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
|
||||
Ian Babrou <ibobrik@gmail.com>
|
||||
Ian Bull <irbull@gmail.com>
|
||||
Ian Main <imain@redhat.com>
|
||||
Ian Truslove <ian.truslove@gmail.com>
|
||||
ILYA Khlopotov <ilya.khlopotov@gmail.com>
|
||||
inglesp <peter.inglesby@gmail.com>
|
||||
Isaac Dupree <antispam@idupree.com>
|
||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||
Isao Jonas <isao.jonas@gmail.com>
|
||||
Ivan Fraixedes <ifcdev@gmail.com>
|
||||
Jack Danger Canty <jackdanger@squareup.com>
|
||||
Jake Moshenko <jake@devtable.com>
|
||||
jakedt <jake@devtable.com>
|
||||
James Allen <jamesallen0108@gmail.com>
|
||||
James Carr <james.r.carr@gmail.com>
|
||||
James DeFelice <james.defelice@ishisystems.com>
|
||||
James Harrison Fisher <jameshfisher@gmail.com>
|
||||
James Kyle <james@jameskyle.org>
|
||||
James Mills <prologic@shortcircuit.net.au>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
Jan Pazdziora <jpazdziora@redhat.com>
|
||||
Jan Toebes <jan@toebes.info>
|
||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||
jaseg <jaseg@jaseg.net>
|
||||
Jason Giedymin <jasong@apache.org>
|
||||
Jason Hall <imjasonh@gmail.com>
|
||||
Jason Livesay <ithkuil@gmail.com>
|
||||
Jason McVetta <jason.mcvetta@gmail.com>
|
||||
Jason Plum <jplum@devonit.com>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
Jeff Lindsay <progrium@gmail.com>
|
||||
Jeff Welch <whatthejeff@gmail.com>
|
||||
Jeffrey Bolle <jeffreybolle@gmail.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Jesse Dubay <jesse@thefortytwo.net>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||
Jim Alateras <jima@comware.com.au>
|
||||
Jim Perrin <jperrin@centos.org>
|
||||
Jimmy Cuadra <jimmy@jimmycuadra.com>
|
||||
Jiří Župka <jzupka@redhat.com>
|
||||
Joe Beda <joe.github@bedafamily.com>
|
||||
Joe Shaw <joe@joeshaw.org>
|
||||
Joe Van Dyk <joe@tanga.com>
|
||||
Joel Handwell <joelhandwell@gmail.com>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Johan Rydberg <johan.rydberg@gmail.com>
|
||||
Johannes 'fish' Ziemke <github@freigeist.org>
|
||||
Joffrey F <joffrey@dotcloud.com>
|
||||
John Costa <john.costa@gmail.com>
|
||||
John Feminella <jxf@jxf.me>
|
||||
John Gardiner Myers <jgmyers@proofpoint.com>
|
||||
John OBrien III <jobrieniii@yahoo.com>
|
||||
John Warwick <jwarwick@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jonathan Camp <jonathan@irondojo.com>
|
||||
Jonathan McCrohan <jmccrohan@gmail.com>
|
||||
Jonathan Mueller <j.mueller@apoveda.ch>
|
||||
Jonathan Pares <jonathanpa@users.noreply.github.com>
|
||||
Jonathan Rudenberg <jonathan@titanous.com>
|
||||
Joost Cassee <joost@cassee.net>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Jordan Sissel <jls@semicomplete.com>
|
||||
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
|
||||
Joseph Hager <ajhager@gmail.com>
|
||||
Josh <jokajak@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
JP <jpellerin@leapfrogonline.com>
|
||||
Julien Barbier <write0@gmail.com>
|
||||
Julien Bordellier <julienbordellier@gmail.com>
|
||||
Julien Dubois <julien.dubois@gmail.com>
|
||||
Justin Force <justin.force@gmail.com>
|
||||
Justin Plock <jplock@users.noreply.github.com>
|
||||
Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
|
||||
Karan Lyons <karan@karanlyons.com>
|
||||
Karl Grzeszczak <karlgrz@gmail.com>
|
||||
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
||||
Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
|
||||
Keli Hu <dev@keli.hu>
|
||||
Ken Cochrane <kencochrane@gmail.com>
|
||||
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
|
||||
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
|
||||
Kevin Clark <kevin.clark@gmail.com>
|
||||
Kevin J. Lynagh <kevin@keminglabs.com>
|
||||
Kevin Menard <kevin@nirvdrum.com>
|
||||
Kevin Wallace <kevin@pentabarf.net>
|
||||
Keyvan Fatehi <keyvanfatehi@gmail.com>
|
||||
kies <lleelm@gmail.com>
|
||||
Kim BKC Carlbacker <kim.carlbacker@gmail.com>
|
||||
kim0 <email.ahmedkamal@googlemail.com>
|
||||
Kimbro Staken <kstaken@kstaken.com>
|
||||
Kiran Gangadharan <kiran.daredevil@gmail.com>
|
||||
knappe <tyler.knappe@gmail.com>
|
||||
Kohei Tsuruta <coheyxyz@gmail.com>
|
||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
Kyle Conroy <kyle.j.conroy@gmail.com>
|
||||
kyu <leehk1227@gmail.com>
|
||||
Lachlan Coote <lcoote@vmware.com>
|
||||
lalyos <lalyos@yahoo.com>
|
||||
Lance Chen <cyen0312@gmail.com>
|
||||
Lars R. Damerow <lars@pixar.com>
|
||||
Laurie Voss <github@seldo.com>
|
||||
leeplay <hyeongkyu.lee@navercorp.com>
|
||||
Len Weincier <len@cloudafrica.net>
|
||||
Levi Gross <levi@levigross.com>
|
||||
Lewis Peckover <lew+github@lew.io>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Lokesh Mandvekar <lsm5@fedoraproject.org>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
lukaspustina <lukas.pustina@centerdevice.com>
|
||||
lukemarsden <luke@digital-crocus.com>
|
||||
Mahesh Tiyyagura <tmahesh@gmail.com>
|
||||
Manfred Zabarauskas <manfredas@zabarauskas.com>
|
||||
Manuel Meurer <manuel@krautcomputing.com>
|
||||
Manuel Woelker <github@manuel.woelker.org>
|
||||
Marc Abramowitz <marc@marc-abramowitz.com>
|
||||
Marc Kuo <kuomarc2@gmail.com>
|
||||
Marc Tamsky <mtamsky@gmail.com>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marcus Farkas <toothlessgear@finitebox.com>
|
||||
Marcus Ramberg <marcus@nordaaker.com>
|
||||
marcuslinke <marcus.linke@gmx.de>
|
||||
Marek Goldmann <marek.goldmann@gmail.com>
|
||||
Marius Voila <marius.voila@gmail.com>
|
||||
Mark Allen <mrallen1@yahoo.com>
|
||||
Mark McGranaghan <mmcgrana@gmail.com>
|
||||
Marko Mikulicic <mmikulicic@gmail.com>
|
||||
Markus Fix <lispmeister@gmail.com>
|
||||
Martijn van Oosterhout <kleptog@svana.org>
|
||||
Martin Redmond <martin@tinychat.com>
|
||||
Mason Malone <mason.malone@gmail.com>
|
||||
Mateusz Sulima <sulima.mateusz@gmail.com>
|
||||
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
|
||||
Matt Apperson <me@mattapperson.com>
|
||||
Matt Bachmann <bachmann.matt@gmail.com>
|
||||
Matt Haggard <haggardii@gmail.com>
|
||||
Matthew Heon <mheon@redhat.com>
|
||||
Matthew Mueller <mattmuelle@gmail.com>
|
||||
Matthias Klumpp <matthias@tenstral.net>
|
||||
Matthias Kühnle <git.nivoc@neverbox.com>
|
||||
mattymo <raytrac3r@gmail.com>
|
||||
mattyw <mattyw@me.com>
|
||||
Max Shytikov <mshytikov@gmail.com>
|
||||
Maxim Treskin <zerthurd@gmail.com>
|
||||
Maxime Petazzoni <max@signalfuse.com>
|
||||
meejah <meejah@meejah.ca>
|
||||
Michael Brown <michael@netdirect.ca>
|
||||
Michael Crosby <michael@docker.com>
|
||||
Michael Gorsuch <gorsuch@github.com>
|
||||
Michael Neale <michael.neale@gmail.com>
|
||||
Michael Prokop <github@michael-prokop.at>
|
||||
Michael Stapelberg <michael+gh@stapelberg.de>
|
||||
Michaël Pailloncy <mpapo.dev@gmail.com>
|
||||
Michiel@unhosted <michiel@unhosted.org>
|
||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||
Mike Chelen <michael.chelen@gmail.com>
|
||||
Mike Gaffney <mike@uberu.com>
|
||||
Mike MacCana <mike.maccana@gmail.com>
|
||||
Mike Naberezny <mike@naberezny.com>
|
||||
Mike Snitzer <snitzer@redhat.com>
|
||||
Mikhail Sobolev <mss@mawhrin.net>
|
||||
Mohit Soni <mosoni@ebay.com>
|
||||
Morgante Pell <morgante.pell@morgante.net>
|
||||
Morten Siebuhr <sbhr@sbhr.dk>
|
||||
Mrunal Patel <mrunalp@gmail.com>
|
||||
Nan Monnand Deng <monnand@gmail.com>
|
||||
Naoki Orii <norii@cs.cmu.edu>
|
||||
Nate Jones <nate@endot.org>
|
||||
Nathan Kleyn <nathan@nathankleyn.com>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com>
|
||||
Nelson Chen <crazysim@gmail.com>
|
||||
Niall O'Higgins <niallo@unworkable.org>
|
||||
Nick Payne <nick@kurai.co.uk>
|
||||
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
|
||||
Nick Stinemates <nick@stinemates.org>
|
||||
Nicolas Dudebout <nicolas.dudebout@gatech.edu>
|
||||
Nicolas Kaiser <nikai@nikai.net>
|
||||
NikolaMandic <mn080202@gmail.com>
|
||||
noducks <onemannoducks@gmail.com>
|
||||
Nolan Darilek <nolan@thewordnerd.info>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
OddBloke <daniel@daniel-watkins.co.uk>
|
||||
odk- <github@odkurzacz.org>
|
||||
Oguz Bilgic <fisyonet@gmail.com>
|
||||
Ole Reifschneider <mail@ole-reifschneider.de>
|
||||
Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
pandrew <letters@paulnotcom.se>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
pattichen <craftsbear@gmail.com>
|
||||
Paul <paul9869@gmail.com>
|
||||
Paul Annesley <paul@annesley.cc>
|
||||
Paul Bowsher <pbowsher@globalpersonals.co.uk>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Paul Jimenez <pj@place.org>
|
||||
Paul Lietar <paul@lietar.net>
|
||||
Paul Morie <pmorie@gmail.com>
|
||||
Paul Nasrat <pnasrat@gmail.com>
|
||||
Paul Weaver <pauweave@cisco.com>
|
||||
Peter Bourgon <peter@bourgon.org>
|
||||
Peter Braden <peterbraden@peterbraden.co.uk>
|
||||
Peter Waller <p@pwaller.net>
|
||||
Phil <underscorephil@gmail.com>
|
||||
Phil Spitler <pspitler@gmail.com>
|
||||
Phillip Alexander <git@phillipalexander.io>
|
||||
Piergiuliano Bossi <pgbossi@gmail.com>
|
||||
Pierre-Alain RIVIERE <pariviere@ippon.fr>
|
||||
Piotr Bogdan <ppbogdan@gmail.com>
|
||||
pysqz <randomq@126.com>
|
||||
Quentin Brossard <qbrossard@gmail.com>
|
||||
r0n22 <cameron.regan@gmail.com>
|
||||
Rafal Jeczalik <rjeczalik@gmail.com>
|
||||
Rajat Pandit <rp@rajatpandit.com>
|
||||
Rajdeep Dua <dua_rajdeep@yahoo.com>
|
||||
Ralph Bean <rbean@redhat.com>
|
||||
Ramkumar Ramachandra <artagnon@gmail.com>
|
||||
Ramon van Alteren <ramon@vanalteren.nl>
|
||||
Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
|
||||
rgstephens <greg@udon.org>
|
||||
Rhys Hiltner <rhys@twitch.tv>
|
||||
Richard Harvey <richard@squarecows.com>
|
||||
Richo Healey <richo@psych0tik.net>
|
||||
Rick Bradley <rick@users.noreply.github.com>
|
||||
Rick van de Loo <rickvandeloo@gmail.com>
|
||||
Robert Bachmann <rb@robertbachmann.at>
|
||||
Robert Obryk <robryk@gmail.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
||||
Robin Speekenbrink <robin@kingsquare.nl>
|
||||
robpc <rpcann@gmail.com>
|
||||
Rodrigo Vaz <rodrigo.vaz@gmail.com>
|
||||
Roel Van Nyen <roel.vannyen@gmail.com>
|
||||
Roger Peppe <rogpeppe@gmail.com>
|
||||
Rohit Jnagal <jnagal@google.com>
|
||||
Roland Huß <roland@jolokia.org>
|
||||
Roland Moriz <rmoriz@users.noreply.github.com>
|
||||
Ron Smits <ron.smits@gmail.com>
|
||||
Rovanion Luckey <rovanion.luckey@gmail.com>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
Ryan Anderson <anderson.ryanc@gmail.com>
|
||||
Ryan Aslett <github@mixologic.com>
|
||||
Ryan Fowler <rwfowler@gmail.com>
|
||||
Ryan O'Donnell <odonnellryanc@gmail.com>
|
||||
Ryan Seto <ryanseto@yak.net>
|
||||
Ryan Thomas <rthomas@atlassian.com>
|
||||
s-ko <aleks@s-ko.net>
|
||||
Sam Alba <sam.alba@gmail.com>
|
||||
Sam Bailey <cyprix@cyprix.com.au>
|
||||
Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
|
||||
Sam Reis <sreis@atlassian.com>
|
||||
Sam Rijs <srijs@airpost.net>
|
||||
Samuel Andaya <samuel@andaya.net>
|
||||
satoru <satorulogic@gmail.com>
|
||||
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
|
||||
Scott Bessler <scottbessler@gmail.com>
|
||||
Scott Collier <emailscottcollier@gmail.com>
|
||||
Sean Cronin <seancron@gmail.com>
|
||||
Sean P. Kane <skane@newrelic.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastiaan van Stijn <thaJeztah@users.noreply.github.com>
|
||||
Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
|
||||
SeongJae Park <sj38.park@gmail.com>
|
||||
Shane Canon <scanon@lbl.gov>
|
||||
shaunol <shaunol@gmail.com>
|
||||
Shawn Landden <shawn@churchofgit.com>
|
||||
Shawn Siefkas <shawn.siefkas@meredith.com>
|
||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||
Silas Sewell <silas@sewell.org>
|
||||
Simon Taranto <simon.taranto@gmail.com>
|
||||
Sindhu S <sindhus@live.in>
|
||||
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
|
||||
Solomon Hykes <solomon@docker.com>
|
||||
Song Gao <song@gao.io>
|
||||
Soulou <leo@unbekandt.eu>
|
||||
soulshake <amy@gandi.net>
|
||||
Sridatta Thatipamala <sthatipamala@gmail.com>
|
||||
Solomon Hykes <solomon@dotcloud.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Steeve Morin <steeve.morin@gmail.com>
|
||||
Stefan Praszalowicz <stefan@greplin.com>
|
||||
Stephen Crosby <stevecrozz@gmail.com>
|
||||
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||
sudosurootdev <sudosurootdev@gmail.com>
|
||||
Sven Dowideit <svendowideit@home.org.au>
|
||||
Sylvain Bellemare <sylvain.bellemare@ezeep.com>
|
||||
Sébastien <sebastien@yoozio.com>
|
||||
Sébastien Luttringer <seblu@seblu.net>
|
||||
Sébastien Stormacq <sebsto@users.noreply.github.com>
|
||||
tang0th <tang0th@gmx.com>
|
||||
Tatsuki Sugiura <sugi@nemui.org>
|
||||
Tehmasp Chaudhri <tehmasp@gmail.com>
|
||||
Thatcher Peskens <thatcher@docker.com>
|
||||
Thermionix <bond711@gmail.com>
|
||||
Thijs Terlouw <thijsterlouw@gmail.com>
|
||||
Thomas Bikeev <thomas.bikeev@mac.com>
|
||||
Thomas Frössman <thomasf@jossystem.se>
|
||||
Thomas Hansen <thomas.hansen@gmail.com>
|
||||
Thomas LEVEIL <thomasleveil@gmail.com>
|
||||
Thomas Schroeter <thomas@cliqz.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tim Bosse <taim@bosboot.org>
|
||||
Tim Ruffles <oi@truffles.me.uk>
|
||||
Tim Ruffles <timruffles@googlemail.com>
|
||||
Thatcher Peskens <thatcher@dotcloud.com>
|
||||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||
tjmehta <tj@init.me>
|
||||
Tobias Bieniek <Tobias.Bieniek@gmx.de>
|
||||
Tobias Gesellchen <tobias@gesellix.de>
|
||||
Tobias Schmidt <ts@soundcloud.com>
|
||||
Tobias Schwab <tobias.schwab@dynport.de>
|
||||
Todd Lunter <tlunter@gmail.com>
|
||||
Tom Fotherby <tom+github@peopleperhour.com>
|
||||
Tom Hulihan <hulihan.tom159@gmail.com>
|
||||
Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
|
||||
Tommaso Visconti <tommaso.visconti@gmail.com>
|
||||
Tony Daws <tony@daws.ca>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Trent Ogren <tedwardo2@gmail.com>
|
||||
Tyler Brock <tyler.brock@gmail.com>
|
||||
Tzu-Jung Lee <roylee17@gmail.com>
|
||||
Ulysse Carion <ulyssecarion@gmail.com>
|
||||
Troy Howard <thoward37@gmail.com>
|
||||
unclejack <unclejacksons@gmail.com>
|
||||
vgeta <gopikannan.venugopalsamy@gmail.com>
|
||||
Victor Coisne <victor.coisne@dotcloud.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Victor Marmol <vmarmol@google.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Bernat <bernat@luffy.cx>
|
||||
Vincent Mayers <vincent.mayers@inbloom.org>
|
||||
Vincent Woo <me@vincentwoo.com>
|
||||
Vinod Kulkarni <vinod.kulkarni@gmail.com>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
Vitor Monteiro <vmrmonteiro@gmail.com>
|
||||
Victor Vieux <victor.vieux@dotcloud.com>
|
||||
Vivek Agarwal <me@vivek.im>
|
||||
Vladimir Bulyga <xx@ccxx.cc>
|
||||
Vladimir Kirillov <proger@wilab.org.ua>
|
||||
Vladimir Rutsky <altsysrq@gmail.com>
|
||||
waitingkuo <waitingkuo0527@gmail.com>
|
||||
Walter Leibbrandt <github@wrl.co.za>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
WarheadsSE <max@warheads.net>
|
||||
Wes Morgan <cap10morgan@gmail.com>
|
||||
Will Dietz <w@wdtz.org>
|
||||
Will Rouesnel <w.rouesnel@gmail.com>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
William Delanoue <william.delanoue@gmail.com>
|
||||
William Henry <whenry@redhat.com>
|
||||
William Riancho <wr.wllm@gmail.com>
|
||||
William Thurston <thurstw@amazon.com>
|
||||
wyc <wayne@neverfear.org>
|
||||
Xiuming Chen <cc@cxm.cc>
|
||||
Yang Bai <hamo.by@gmail.com>
|
||||
Yasunori Mahata <nori@mahata.net>
|
||||
Yurii Rashkovskii <yrashk@gmail.com>
|
||||
Zac Dover <zdover@redhat.com>
|
||||
Zain Memon <zain@inzain.net>
|
||||
Zaiste! <oh@zaiste.net>
|
||||
Zane DeGraffenried <zane.deg@gmail.com>
|
||||
Zilin Du <zilin.du@gmail.com>
|
||||
zimbatm <zimbatm@zimbatm.com>
|
||||
Zoltan Tombol <zoltan.tombol@gmail.com>
|
||||
zqh <zqhxuyuan@gmail.com>
|
||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||
|
||||
1545
CHANGELOG.md
1545
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
277
CONTRIBUTING.md
277
CONTRIBUTING.md
@@ -1,61 +1,10 @@
|
||||
# Contributing to Docker
|
||||
|
||||
Want to hack on Docker? Awesome! Here are instructions to get you
|
||||
started. They are probably not perfect, please let us know if anything
|
||||
feels wrong or incomplete.
|
||||
Want to hack on Docker? Awesome! There are instructions to get you
|
||||
started on the website: http://docker.io/gettingstarted.html
|
||||
|
||||
## Topics
|
||||
|
||||
* [Security Reports](#security-reports)
|
||||
* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [Build Environment](#build-environment)
|
||||
* [Contribution Guidelines](#contribution-guidelines)
|
||||
* [Community Guidelines](#docker-community-guidelines)
|
||||
|
||||
## Security Reports
|
||||
|
||||
Please **DO NOT** file an issue for security related issues. Please send your
|
||||
reports to [security@docker.com](mailto:security@docker.com) instead.
|
||||
|
||||
## Design and Cleanup Proposals
|
||||
|
||||
When considering a design proposal, we are looking for:
|
||||
|
||||
* A description of the problem this design proposal solves
|
||||
* An issue -- not a pull request -- that describes what you will take action on
|
||||
* Please prefix your issue with `Proposal:` in the title
|
||||
* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open)
|
||||
before reporting a new issue. You can always pair with someone if you both
|
||||
have the same idea.
|
||||
|
||||
When considering a cleanup task, we are looking for:
|
||||
|
||||
* A description of the refactors made
|
||||
* Please note any logic changes if necessary
|
||||
* A pull request with the code
|
||||
* Please prefix your PR's title with `Cleanup:` so we can quickly address it.
|
||||
* Your pull request must remain up to date with master, so rebase as necessary.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
When reporting [issues](https://github.com/docker/docker/issues) on
|
||||
GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc).
|
||||
Please include:
|
||||
|
||||
* The output of `uname -a`.
|
||||
* The output of `docker version`.
|
||||
* The output of `docker -D info`.
|
||||
|
||||
Please also include the steps required to reproduce the problem if
|
||||
possible and applicable. This information will help us review and fix
|
||||
your issue faster.
|
||||
|
||||
## Build Environment
|
||||
|
||||
For instructions on setting up your development environment, please
|
||||
see our dedicated [dev environment setup
|
||||
docs](http://docs.docker.com/contributing/devenvironment/).
|
||||
They are probably not perfect, please let us know if anything feels
|
||||
wrong or incomplete.
|
||||
|
||||
## Contribution guidelines
|
||||
|
||||
@@ -72,12 +21,12 @@ received feedback on what to improve.
|
||||
We're trying very hard to keep Docker lean and focused. We don't want it
|
||||
to do everything for everybody. This means that we might decide against
|
||||
incorporating a new feature. However, there might be a way to implement
|
||||
that feature *on top of* Docker.
|
||||
that feature *on top of* docker.
|
||||
|
||||
### Discuss your design on the mailing list
|
||||
|
||||
We recommend discussing your plans [on the mailing
|
||||
list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev)
|
||||
list](https://groups.google.com/forum/?fromgroups#!forum/docker-club)
|
||||
before starting to code - especially for more ambitious contributions.
|
||||
This gives other contributors a chance to point you in the right
|
||||
direction, give feedback on your design, and maybe point out if someone
|
||||
@@ -85,8 +34,8 @@ else is working on the same thing.
|
||||
|
||||
### Create issues...
|
||||
|
||||
Any significant improvement should be documented as [a GitHub
|
||||
issue](https://github.com/docker/docker/issues) before anybody
|
||||
Any significant improvement should be documented as [a github
|
||||
issue](https://github.com/dotcloud/docker/issues) before anybody
|
||||
starts working on it.
|
||||
|
||||
### ...but check for existing issues first!
|
||||
@@ -98,219 +47,47 @@ help prioritize the most common problems and requests.
|
||||
|
||||
### Conventions
|
||||
|
||||
Fork the repository and make changes on your fork in a feature branch:
|
||||
Fork the repo and make changes on your fork in a feature branch:
|
||||
|
||||
- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the
|
||||
issue.
|
||||
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
|
||||
issue
|
||||
- If it's a feature branch, create an enhancement issue to announce your
|
||||
intentions, and name it XXXX-something where XXXX is the number of the issue.
|
||||
intentions, and name it XXX-something where XXX is the number of the issue.
|
||||
|
||||
Submit unit tests for your changes. Go has a great test framework built in; use
|
||||
it! Take a look at existing tests for inspiration. Run the full test suite on
|
||||
your branch before submitting a pull request.
|
||||
|
||||
Update the documentation when creating or modifying features. Test
|
||||
your documentation changes for clarity, concision, and correctness, as
|
||||
well as a clean documentation build. See `docs/README.md` for more
|
||||
information on building the docs and how they get released.
|
||||
Make sure you include relevant updates or additions to documentation when
|
||||
creating or modifying features.
|
||||
|
||||
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
|
||||
committing your changes. Most editors have plug-ins that do this automatically.
|
||||
and maintenance. Always run `go fmt` before committing your changes. Most
|
||||
editors have plugins that do this automatically, and there's also a git
|
||||
pre-commit hook:
|
||||
|
||||
```
|
||||
curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
Pull requests descriptions should be as clear as possible and include a
|
||||
reference to all the issues that they address.
|
||||
|
||||
Commit messages must start with a capitalized and short summary (max. 50
|
||||
chars) written in the imperative, followed by an optional, more detailed
|
||||
explanatory text which is separated from the summary by an empty line.
|
||||
|
||||
Code review comments may be added to your pull request. Discuss, then make the
|
||||
suggested modifications and push additional commits to your feature branch. Be
|
||||
sure to post a comment after pushing. The new commits will show up in the pull
|
||||
request automatically, but the reviewers will not be notified unless you
|
||||
comment.
|
||||
|
||||
Pull requests must be cleanly rebased ontop of master without multiple branches
|
||||
mixed into the PR.
|
||||
|
||||
**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
|
||||
feature branch to update your pull request rather than `merge master`.
|
||||
|
||||
Before the pull request is merged, make sure that you squash your commits into
|
||||
logical units of work using `git rebase -i` and `git push -f`. After every
|
||||
commit the test suite should be passing. Include documentation changes in the
|
||||
same commit so that a revert would remove all traces of the feature or fix.
|
||||
|
||||
Commits that fix or close an issue should include a reference like
|
||||
`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the
|
||||
issue when merged.
|
||||
|
||||
Please do not add yourself to the `AUTHORS` file, as it is regenerated
|
||||
regularly from the Git history.
|
||||
|
||||
### Merge approval
|
||||
|
||||
Docker maintainers use LGTM (Looks Good To Me) in comments on the code review
|
||||
to indicate acceptance.
|
||||
|
||||
A change requires LGTMs from an absolute majority of the maintainers of each
|
||||
component affected. For example, if a change affects `docs/` and `registry/`, it
|
||||
needs an absolute majority from the maintainers of `docs/` AND, separately, an
|
||||
absolute majority of the maintainers of `registry/`.
|
||||
|
||||
For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below (from
|
||||
[developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still
|
||||
accepted, so there is no need to update outstanding pull requests to the new
|
||||
format right away, but please do adjust your processes for future contributions.
|
||||
|
||||
#### Small patch exception
|
||||
|
||||
There are several exceptions to the signing requirement. Currently these are:
|
||||
|
||||
* Your patch fixes spelling or grammar errors.
|
||||
* Your patch is a single line change to documentation contained in the
|
||||
`docs` directory.
|
||||
* Your patch fixes Markdown formatting or syntax errors in the
|
||||
documentation contained in the `docs` directory.
|
||||
|
||||
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.com)
|
||||
|
||||
### How can I become a maintainer?
|
||||
|
||||
* Step 1: Learn the component inside out
|
||||
* Step 2: Make yourself useful by contributing code, bug fixes, support etc.
|
||||
* Step 3: Volunteer on the IRC channel (#docker at Freenode)
|
||||
* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev
|
||||
|
||||
Don't forget: being a maintainer is a time investment. Make sure you
|
||||
will have time to make yourself available. You don't have to be a
|
||||
maintainer to make a difference on the project!
|
||||
|
||||
### IRC Meetings
|
||||
|
||||
There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones.
|
||||
Anybody can ask for a topic to be discussed prior to the meeting.
|
||||
|
||||
If you feel the conversation is going off-topic, feel free to point it out.
|
||||
|
||||
For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes).
|
||||
They also contain all the notes from previous meetings.
|
||||
|
||||
## Docker Community Guidelines
|
||||
|
||||
We want to keep the Docker community awesome, growing and collaborative. We
|
||||
need your help to keep it that way. To help with this we've come up with some
|
||||
general guidelines for the community as a whole:
|
||||
|
||||
* Be nice: Be courteous, respectful and polite to fellow community members: no
|
||||
regional, racial, gender, or other abuse will be tolerated. We like nice people
|
||||
way better than mean ones!
|
||||
|
||||
* Encourage diversity and participation: Make everyone in our community
|
||||
feel welcome, regardless of their background and the extent of their
|
||||
contributions, and do everything possible to encourage participation in
|
||||
our community.
|
||||
|
||||
* Keep it legal: Basically, don't get us in trouble. Share only content that
|
||||
you own, do not share private or sensitive information, and don't break the
|
||||
law.
|
||||
|
||||
* Stay on topic: Make sure that you are posting to the correct channel
|
||||
and avoid off-topic discussions. Remember when you update an issue or
|
||||
respond to an email you are potentially sending to a large number of
|
||||
people. Please consider this before you update. Also remember that
|
||||
nobody likes spam.
|
||||
|
||||
### Guideline Violations — 3 Strikes Method
|
||||
|
||||
The point of this section is not to find opportunities to punish people, but we
|
||||
do need a fair way to deal with people who are making our community suck.
|
||||
|
||||
1. First occurrence: We'll give you a friendly, but public reminder that the
|
||||
behavior is inappropriate according to our guidelines.
|
||||
|
||||
2. Second occurrence: We will send you a private message with a warning that
|
||||
any additional violations will result in removal from the community.
|
||||
|
||||
3. Third occurrence: Depending on the violation, we may need to delete or ban
|
||||
your account.
|
||||
|
||||
**Notes:**
|
||||
|
||||
* Obvious spammers are banned on first occurrence. If we don't do this, we'll
|
||||
have spam all over the place.
|
||||
|
||||
* Violations are forgiven after 6 months of good behavior, and we won't
|
||||
hold a grudge.
|
||||
|
||||
* People who commit minor infractions will get some education,
|
||||
rather than hammering them in the 3 strikes process.
|
||||
|
||||
* The rules apply equally to everyone in the community, no matter how
|
||||
much you've contributed.
|
||||
|
||||
* Extreme violations of a threatening, abusive, destructive or illegal nature
|
||||
will be addressed immediately and are not subject to 3 strikes or
|
||||
forgiveness.
|
||||
|
||||
* Contact james@docker.com to report abuse or appeal violations. In the case of
|
||||
appeals, we know that mistakes happen, and we'll work with you to come up with
|
||||
a fair solution if there has been a misunderstanding.
|
||||
Commits that fix or close an issue should include a reference like `Closes #XXX`
|
||||
or `Fixes #XXX`, which will automatically close the issue when merged.
|
||||
|
||||
Add your name to the AUTHORS file, but make sure the list is sorted and your
|
||||
name and email address match your git configuration. The AUTHORS file is
|
||||
regenerated occasionally from the git commit history, so a mismatch may result
|
||||
in your changes being overwritten.
|
||||
|
||||
110
Dockerfile
110
Dockerfile
@@ -1,110 +0,0 @@
|
||||
# This file describes the standard way to build Docker, using docker
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# # Assemble the full dev environment. This is slow the first time.
|
||||
# docker build -t docker .
|
||||
#
|
||||
# # Mount your source in an interactive container for quick testing:
|
||||
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
|
||||
#
|
||||
# # Run the test suite:
|
||||
# docker run --privileged docker hack/make.sh test
|
||||
#
|
||||
# # Publish a release:
|
||||
# docker run --privileged \
|
||||
# -e AWS_S3_BUCKET=baz \
|
||||
# -e AWS_ACCESS_KEY=foo \
|
||||
# -e AWS_SECRET_KEY=bar \
|
||||
# -e GPG_PASSPHRASE=gloubiboulga \
|
||||
# docker hack/release.sh
|
||||
#
|
||||
# Note: Apparmor used to mess with privileged mode, but this is no longer
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
docker-version 0.6.1
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
aufs-tools \
|
||||
automake \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
curl \
|
||||
dpkg-sig \
|
||||
git \
|
||||
iptables \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libsqlite3-dev \
|
||||
lxc=1.0* \
|
||||
mercurial \
|
||||
parallel \
|
||||
reprepro \
|
||||
ruby1.9.1 \
|
||||
ruby1.9.1-dev \
|
||||
s3cmd=1.1.0* \
|
||||
--no-install-recommends
|
||||
|
||||
# Get lvm2 source for compiling statically
|
||||
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
|
||||
# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
|
||||
|
||||
# Compile and install lvm2
|
||||
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
# Install Go
|
||||
RUN curl -sSL https://golang.org/dl/go1.3.1.src.tar.gz | tar -v -C /usr/local -xz
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
|
||||
ENV PATH /go/bin:$PATH
|
||||
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
|
||||
|
||||
# Compile Go for cross compilation
|
||||
ENV DOCKER_CROSSPLATFORMS \
|
||||
linux/386 linux/arm \
|
||||
darwin/amd64 darwin/386 \
|
||||
freebsd/amd64 freebsd/386 freebsd/arm
|
||||
# (set an explicit GOARM of 5 for maximum compatibility)
|
||||
ENV GOARM 5
|
||||
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
|
||||
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
RUN go get code.google.com/p/go.tools/cmd/cover
|
||||
|
||||
# TODO replace FPM with some very minimal debhelper stuff
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
|
||||
|
||||
# Install man page generator
|
||||
RUN mkdir -p /go/src/github.com/cpuguy83 \
|
||||
&& git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
|
||||
&& cd /go/src/github.com/cpuguy83/go-md2man \
|
||||
&& go get -v ./...
|
||||
|
||||
# Get the "busybox" image source so we can build locally instead of pulling
|
||||
RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
|
||||
|
||||
# Setup s3cmd config
|
||||
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
|
||||
|
||||
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||
RUN git config --global user.email 'docker-dummy@example.com'
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
ENV DOCKER_BUILDTAGS apparmor selinux
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
13
LICENSE
13
LICENSE
@@ -176,7 +176,18 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014 Docker, Inc.
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
Solomon Hykes <solomon@docker.com> (@shykes)
|
||||
Victor Vieux <vieux@docker.com> (@vieux)
|
||||
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
||||
.mailmap: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
AUTHORS: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
.dockerignore: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
108
Makefile
108
Makefile
@@ -1,68 +1,78 @@
|
||||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
|
||||
DOCKER_PACKAGE := github.com/dotcloud/docker
|
||||
RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
|
||||
SRCRELEASE := docker-$(RELEASE_VERSION)
|
||||
BINRELEASE := docker-$(RELEASE_VERSION).tgz
|
||||
|
||||
# to allow `make BINDDIR=. shell` or `make BINDDIR= test`
|
||||
# (default to no bind mount if DOCKER_HOST is set)
|
||||
BINDDIR := $(if $(DOCKER_HOST),,bundles)
|
||||
# to allow `make DOCSPORT=9000 docs`
|
||||
DOCSPORT := 8000
|
||||
GIT_ROOT := $(shell git rev-parse --show-toplevel)
|
||||
BUILD_DIR := $(CURDIR)/.gopath
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
|
||||
GOPATH ?= $(BUILD_DIR)
|
||||
export GOPATH
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
|
||||
# to allow `make DOCSDIR=docs docs-shell`
|
||||
DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET
|
||||
GO_OPTIONS ?=
|
||||
ifeq ($(VERBOSE), 1)
|
||||
GO_OPTIONS += -v
|
||||
endif
|
||||
|
||||
default: binary
|
||||
GIT_COMMIT = $(shell git rev-parse --short HEAD)
|
||||
GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
|
||||
|
||||
all: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh
|
||||
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
|
||||
|
||||
binary: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
SRC_DIR := $(GOPATH)/src
|
||||
|
||||
cross: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
|
||||
DOCKER_DIR := $(SRC_DIR)/$(DOCKER_PACKAGE)
|
||||
DOCKER_MAIN := $(DOCKER_DIR)/docker
|
||||
|
||||
docs: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve
|
||||
DOCKER_BIN_RELATIVE := bin/docker
|
||||
DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
|
||||
|
||||
docs-shell: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
|
||||
.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
|
||||
|
||||
docs-release: docs-build
|
||||
$(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh
|
||||
all: $(DOCKER_BIN)
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli
|
||||
$(DOCKER_BIN): $(DOCKER_DIR)
|
||||
@mkdir -p $(dir $@)
|
||||
@(cd $(DOCKER_MAIN); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
|
||||
@echo $(DOCKER_BIN_RELATIVE) is created.
|
||||
|
||||
test-unit: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
|
||||
$(DOCKER_DIR):
|
||||
@mkdir -p $(dir $@)
|
||||
@rm -f $@
|
||||
@ln -sf $(CURDIR)/ $@
|
||||
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS))
|
||||
|
||||
test-integration: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
|
||||
whichrelease:
|
||||
echo $(RELEASE_VERSION)
|
||||
|
||||
test-integration-cli: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
|
||||
release: $(BINRELEASE)
|
||||
srcrelease: $(SRCRELEASE)
|
||||
deps: $(DOCKER_DIR)
|
||||
|
||||
validate: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco
|
||||
# A clean checkout of $RELEASE_VERSION, with vendored dependencies
|
||||
$(SRCRELEASE):
|
||||
rm -fr $(SRCRELEASE)
|
||||
git clone $(GIT_ROOT) $(SRCRELEASE)
|
||||
cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
|
||||
|
||||
shell: build
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
# A binary release ready to be uploaded to a mirror
|
||||
$(BINRELEASE): $(SRCRELEASE)
|
||||
rm -f $(BINRELEASE)
|
||||
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
|
||||
|
||||
build: bundles
|
||||
docker build -t "$(DOCKER_IMAGE)" .
|
||||
clean:
|
||||
@rm -rf $(dir $(DOCKER_BIN))
|
||||
ifeq ($(GOPATH), $(BUILD_DIR))
|
||||
@rm -rf $(BUILD_DIR)
|
||||
else ifneq ($(DOCKER_DIR), $(realpath $(DOCKER_DIR)))
|
||||
@rm -f $(DOCKER_DIR)
|
||||
endif
|
||||
|
||||
docs-build:
|
||||
cp ./VERSION docs/VERSION
|
||||
echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
|
||||
echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
|
||||
echo "$(GITCOMMIT)" > docs/GITCOMMIT
|
||||
docker build -t "$(DOCKER_DOCS_IMAGE)" docs
|
||||
test: all
|
||||
@(cd $(DOCKER_DIR); sudo -E go test $(GO_OPTIONS))
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
fmt:
|
||||
@gofmt -s -l -w .
|
||||
|
||||
hack:
|
||||
cd $(CURDIR)/buildbot && vagrant up
|
||||
|
||||
19
NOTICE
19
NOTICE
@@ -1,19 +1,6 @@
|
||||
Docker
|
||||
Copyright 2012-2014 Docker, Inc.
|
||||
Copyright 2012-2013 dotCloud, inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
This product includes software developed at dotCloud, inc. (http://www.dotcloud.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||
This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License.
|
||||
444
README.md
444
README.md
@@ -1,205 +1,317 @@
|
||||
Docker: the Linux container engine
|
||||
==================================
|
||||
Docker: the Linux container runtime
|
||||
===================================
|
||||
|
||||
Docker is an open source project to pack, ship and run any application
|
||||
as a lightweight container
|
||||
Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
|
||||
|
||||
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
|
||||
This means that they can run anywhere, from your laptop to the largest
|
||||
EC2 compute instance and everything in between - and they don't require
|
||||
that you use a particular language, framework or packaging system. That
|
||||
makes them great building blocks for deploying and scaling web apps,
|
||||
databases and backend services without depending on a particular stack
|
||||
or provider.
|
||||
Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
|
||||
|
||||
Docker is an open-source implementation of the deployment engine which
|
||||
powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service.
|
||||
It benefits directly from the experience accumulated over several years
|
||||
of large-scale operation and support of hundreds of thousands of
|
||||
applications and databases.
|
||||

|
||||
|
||||

|
||||
* *Heterogeneous payloads*: any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
|
||||
|
||||
## Security Disclosure
|
||||
* *Any server*: docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
|
||||
|
||||
Security is very important to us. If you have any issue regarding security,
|
||||
please disclose the information responsibly by sending an email to
|
||||
security@docker.com and not by creating a github issue.
|
||||
* *Isolation*: docker isolates processes from each other and from the underlying host, using lightweight containers.
|
||||
|
||||
## Better than VMs
|
||||
|
||||
A common method for distributing applications and sandboxing their
|
||||
execution is to use virtual machines, or VMs. Typical VM formats are
|
||||
VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory
|
||||
these formats should allow every developer to automatically package
|
||||
their application into a "machine" for easy distribution and deployment.
|
||||
In practice, that almost never happens, for a few reasons:
|
||||
|
||||
* *Size*: VMs are very large which makes them impractical to store
|
||||
and transfer.
|
||||
* *Performance*: running VMs consumes significant CPU and memory,
|
||||
which makes them impractical in many scenarios, for example local
|
||||
development of multi-tier applications, and large-scale deployment
|
||||
of cpu and memory-intensive applications on large numbers of
|
||||
machines.
|
||||
* *Portability*: competing VM environments don't play well with each
|
||||
other. Although conversion tools do exist, they are limited and
|
||||
add even more overhead.
|
||||
* *Hardware-centric*: VMs were designed with machine operators in
|
||||
mind, not software developers. As a result, they offer very
|
||||
limited tooling for what developers need most: building, testing
|
||||
and running their software. For example, VMs offer no facilities
|
||||
for application versioning, monitoring, configuration, logging or
|
||||
service discovery.
|
||||
|
||||
By contrast, Docker relies on a different sandboxing method known as
|
||||
*containerization*. Unlike traditional virtualization, containerization
|
||||
takes place at the kernel level. Most modern operating system kernels
|
||||
now support the primitives necessary for containerization, including
|
||||
Linux with [openvz](http://openvz.org),
|
||||
[vserver](http://linux-vserver.org) and more recently
|
||||
[lxc](http://lxc.sourceforge.net), Solaris with
|
||||
[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc)
|
||||
and FreeBSD with
|
||||
[Jails](http://www.freebsd.org/doc/handbook/jails.html).
|
||||
|
||||
Docker builds on top of these low-level primitives to offer developers a
|
||||
portable format and runtime environment that solves all 4 problems.
|
||||
Docker containers are small (and their transfer can be optimized with
|
||||
layers), they have basically zero memory and cpu overhead, they are
|
||||
completely portable and are designed from the ground up with an
|
||||
application-centric design.
|
||||
|
||||
The best part: because Docker operates at the OS level, it can still be
|
||||
run inside a VM!
|
||||
|
||||
## Plays well with others
|
||||
|
||||
Docker does not require that you buy into a particular programming
|
||||
language, framework, packaging system or configuration language.
|
||||
|
||||
Is your application a Unix process? Does it use files, tcp connections,
|
||||
environment variables, standard Unix streams and command-line arguments
|
||||
as inputs and outputs? Then Docker can run it.
|
||||
|
||||
Can your application's build be expressed as a sequence of such
|
||||
commands? Then Docker can build it.
|
||||
|
||||
## Escape dependency hell
|
||||
|
||||
A common problem for developers is the difficulty of managing all
|
||||
their application's dependencies in a simple and automated way.
|
||||
|
||||
This is usually difficult for several reasons:
|
||||
|
||||
* *Cross-platform dependencies*. Modern applications often depend on
|
||||
a combination of system libraries and binaries, language-specific
|
||||
packages, framework-specific modules, internal components
|
||||
developed for another project, etc. These dependencies live in
|
||||
different "worlds" and require different tools - these tools
|
||||
typically don't work well with each other, requiring awkward
|
||||
custom integrations.
|
||||
|
||||
* Conflicting dependencies. Different applications may depend on
|
||||
different versions of the same dependency. Packaging tools handle
|
||||
these situations with various degrees of ease - but they all
|
||||
handle them in different and incompatible ways, which again forces
|
||||
the developer to do extra work.
|
||||
|
||||
* Custom dependencies. A developer may need to prepare a custom
|
||||
version of their application's dependency. Some packaging systems
|
||||
can handle custom versions of a dependency, others can't - and all
|
||||
of them handle it differently.
|
||||
* *Repeatability*: because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
|
||||
|
||||
|
||||
Docker solves dependency hell by giving the developer a simple way to
|
||||
express *all* their application's dependencies in one place, and
|
||||
streamline the process of assembling them. If this makes you think of
|
||||
[XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
|
||||
*replace* your favorite packaging systems. It simply orchestrates
|
||||
their use in a simple and repeatable way. How does it do that? With
|
||||
layers.
|
||||
Notable features
|
||||
-----------------
|
||||
|
||||
Docker defines a build as running a sequence of Unix commands, one
|
||||
after the other, in the same container. Build commands modify the
|
||||
contents of the container (usually by installing new files on the
|
||||
filesystem), the next command modifies it some more, etc. Since each
|
||||
build command inherits the result of the previous commands, the
|
||||
*order* in which the commands are executed expresses *dependencies*.
|
||||
* Filesystem isolation: each process container runs in a completely separate root filesystem.
|
||||
|
||||
Here's a typical Docker build process:
|
||||
* Resource isolation: system resources like cpu and memory can be allocated differently to each process container, using cgroups.
|
||||
|
||||
* Network isolation: each process container runs in its own network namespace, with a virtual interface and IP address of its own.
|
||||
|
||||
* Copy-on-write: root filesystems are created using copy-on-write, which makes deployment extremely fast, memory-cheap and disk-cheap.
|
||||
|
||||
* Logging: the standard streams (stdout/stderr/stdin) of each process container are collected and logged for real-time or batch retrieval.
|
||||
|
||||
* Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.
|
||||
|
||||
* Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.
|
||||
|
||||
Install instructions
|
||||
==================
|
||||
|
||||
Quick install on Ubuntu 12.04 and 12.10
|
||||
---------------------------------------
|
||||
|
||||
```bash
|
||||
FROM ubuntu:12.04
|
||||
RUN apt-get update && apt-get install -y python python-pip curl
|
||||
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
|
||||
RUN cd helloflask-master && pip install -r requirements.txt
|
||||
curl get.docker.io | sh -x
|
||||
```
|
||||
|
||||
Note that Docker doesn't care *how* dependencies are built - as long
|
||||
as they can be built by running a Unix command in a container.
|
||||
Binary installs
|
||||
----------------
|
||||
|
||||
Docker supports the following binary installation methods.
|
||||
Note that some methods are community contributions and not yet officially supported.
|
||||
|
||||
Getting started
|
||||
===============
|
||||
* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
|
||||
* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
|
||||
* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
|
||||
* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
|
||||
* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
|
||||
|
||||
Docker can be installed on your local machine as well as servers - both
|
||||
bare metal and virtualized. It is available as a binary on most modern
|
||||
Linux systems, or as a VM on Windows, Mac and other systems.
|
||||
Installing from source
|
||||
----------------------
|
||||
|
||||
We also offer an [interactive tutorial](http://www.docker.com/tryit/)
|
||||
for quickly learning the basics of using Docker.
|
||||
1. Make sure you have a [Go language](http://golang.org/doc/install) compiler and [git](http://git-scm.com) installed.
|
||||
|
||||
For up-to-date install instructions, see the [Docs](http://docs.docker.com).
|
||||
2. Checkout the source code
|
||||
|
||||
```bash
|
||||
git clone http://github.com/dotcloud/docker
|
||||
```
|
||||
|
||||
3. Build the docker binary
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
make VERBOSE=1
|
||||
sudo cp ./bin/docker /usr/local/bin/docker
|
||||
```
|
||||
|
||||
Usage examples
|
||||
==============
|
||||
|
||||
Docker can be used to run short-lived commands, long-running daemons
|
||||
(app servers, databases etc.), interactive shell sessions, etc.
|
||||
First run the docker daemon
|
||||
---------------------------
|
||||
|
||||
You can find a [list of real-world
|
||||
examples](http://docs.docker.com/examples/) in the
|
||||
documentation.
|
||||
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
|
||||
|
||||
```bash
|
||||
# On a production system you want this running in an init script
|
||||
sudo docker -d &
|
||||
```
|
||||
|
||||
Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client can run from any account.
|
||||
|
||||
```bash
|
||||
# Now you can run docker commands from any account.
|
||||
docker help
|
||||
```
|
||||
|
||||
|
||||
Throwaway shell in a base ubuntu image
|
||||
--------------------------------------
|
||||
|
||||
```bash
|
||||
docker pull ubuntu:12.10
|
||||
|
||||
# Run an interactive shell, allocate a tty, attach stdin and stdout
|
||||
# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q
|
||||
docker run -i -t ubuntu:12.10 /bin/bash
|
||||
```
|
||||
|
||||
Starting a long-running worker process
|
||||
--------------------------------------
|
||||
|
||||
```bash
|
||||
# Start a very useful long-running process
|
||||
JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
|
||||
|
||||
# Collect the output of the job so far
|
||||
docker logs $JOB
|
||||
|
||||
# Kill the job
|
||||
docker kill $JOB
|
||||
```
|
||||
|
||||
Running an irc bouncer
|
||||
----------------------
|
||||
|
||||
```bash
|
||||
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
|
||||
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
|
||||
```
|
||||
|
||||
Running Redis
|
||||
-------------
|
||||
|
||||
```bash
|
||||
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
|
||||
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
|
||||
```
|
||||
|
||||
Share your own image!
|
||||
---------------------
|
||||
|
||||
```bash
|
||||
CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl)
|
||||
docker commit -m "Installed curl" $CONTAINER $USER/betterbase
|
||||
docker push $USER/betterbase
|
||||
```
|
||||
|
||||
A list of publicly available images is [available here](https://github.com/dotcloud/docker/wiki/Public-docker-images).
|
||||
|
||||
Expose a service on a TCP port
|
||||
------------------------------
|
||||
|
||||
```bash
|
||||
# Expose port 4444 of this container, and tell netcat to listen on it
|
||||
JOB=$(docker run -d -p 4444 base /bin/nc -l -p 4444)
|
||||
|
||||
# Which public port is NATed to my container?
|
||||
PORT=$(docker port $JOB 4444)
|
||||
|
||||
# Connect to the public port via the host's public address
|
||||
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
|
||||
IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
|
||||
echo hello world | nc $IP $PORT
|
||||
|
||||
# Verify that the network connection worked
|
||||
echo "Daemon received: $(docker logs $JOB)"
|
||||
```
|
||||
|
||||
Under the hood
|
||||
--------------
|
||||
|
||||
Under the hood, Docker is built on the following components:
|
||||
|
||||
* The
|
||||
[cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c)
|
||||
and
|
||||
[namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part)
|
||||
capabilities of the Linux kernel;
|
||||
* The [Go](http://golang.org) programming language.
|
||||
|
||||
* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
|
||||
|
||||
* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
|
||||
|
||||
* The [Go](http://golang.org) programming language;
|
||||
|
||||
* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
|
||||
|
||||
|
||||
|
||||
Contributing to Docker
|
||||
======================
|
||||
|
||||
[](https://godoc.org/github.com/docker/docker)
|
||||
[](https://travis-ci.org/docker/docker)
|
||||
Want to hack on Docker? Awesome! There are instructions to get you started on the website: http://docs.docker.io/en/latest/contributing/contributing/
|
||||
|
||||
Want to hack on Docker? Awesome! There are instructions to get you
|
||||
started [here](CONTRIBUTING.md).
|
||||
|
||||
They are probably not perfect, please let us know if anything feels
|
||||
wrong or incomplete.
|
||||
|
||||
### Legal
|
||||
|
||||
*Brought to you courtesy of our legal counsel. For more context,
|
||||
please see the Notice document.*
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
They are probably not perfect, please let us know if anything feels wrong or incomplete.
|
||||
|
||||
|
||||
Licensing
|
||||
=========
|
||||
Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text.
|
||||
Note
|
||||
----
|
||||
|
||||
We also keep the documentation in this repository. The website documentation is generated using sphinx using these sources.
|
||||
Please find it under docs/sources/ and read more about it https://github.com/dotcloud/docker/master/docs/README.md
|
||||
|
||||
Please feel free to fix / update the documentation and send us pull requests. More tutorials are also welcome.
|
||||
|
||||
|
||||
Setting up a dev environment
|
||||
----------------------------
|
||||
|
||||
Instructions that have been verified to work on Ubuntu 12.10,
|
||||
|
||||
```bash
|
||||
sudo apt-get -y install lxc wget bsdtar curl golang git
|
||||
|
||||
export GOPATH=~/go/
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
mkdir -p $GOPATH/src/github.com/dotcloud
|
||||
cd $GOPATH/src/github.com/dotcloud
|
||||
git clone git@github.com:dotcloud/docker.git
|
||||
cd docker
|
||||
|
||||
go get -v github.com/dotcloud/docker/...
|
||||
go install -v github.com/dotcloud/docker/...
|
||||
```
|
||||
|
||||
Then run the docker daemon,
|
||||
|
||||
```bash
|
||||
sudo $GOPATH/bin/docker -d
|
||||
```
|
||||
|
||||
Run the `go install` command (above) to recompile docker.
|
||||
|
||||
|
||||
What is a Standard Container?
|
||||
=============================
|
||||
|
||||
Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
|
||||
a format that is self-describing and portable, so that any compliant runtime can run it without extra dependencies, regardless of the underlying machine and the contents of the container.
|
||||
|
||||
The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
|
||||
|
||||
A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
|
||||
|
||||
### 1. STANDARD OPERATIONS
|
||||
|
||||
Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
|
||||
|
||||
|
||||
### 2. CONTENT-AGNOSTIC
|
||||
|
||||
Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
|
||||
|
||||
|
||||
### 3. INFRASTRUCTURE-AGNOSTIC
|
||||
|
||||
Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
|
||||
|
||||
|
||||
### 4. DESIGNED FOR AUTOMATION
|
||||
|
||||
Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
|
||||
|
||||
Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
|
||||
|
||||
Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
|
||||
|
||||
|
||||
### 5. INDUSTRIAL-GRADE DELIVERY
|
||||
|
||||
There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
|
||||
|
||||
With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
|
||||
|
||||
|
||||
|
||||
|
||||
Standard Container Specification
|
||||
--------------------------------
|
||||
|
||||
(TODO)
|
||||
|
||||
### Image format
|
||||
|
||||
|
||||
### Standard operations
|
||||
|
||||
* Copy
|
||||
* Run
|
||||
* Stop
|
||||
* Wait
|
||||
* Commit
|
||||
* Attach standard streams
|
||||
* List filesystem changes
|
||||
* ...
|
||||
|
||||
### Execution environment
|
||||
|
||||
#### Root filesystem
|
||||
|
||||
#### Environment variables
|
||||
|
||||
#### Process arguments
|
||||
|
||||
#### Networking
|
||||
|
||||
#### Process namespacing
|
||||
|
||||
#### Resource limits
|
||||
|
||||
#### Process monitoring
|
||||
|
||||
#### Logging
|
||||
|
||||
#### Signals
|
||||
|
||||
#### Pseudo-terminal allocation
|
||||
|
||||
#### Security
|
||||
|
||||
|
||||
|
||||
71
SPECS/data-volumes.md
Normal file
71
SPECS/data-volumes.md
Normal file
@@ -0,0 +1,71 @@
|
||||
|
||||
## Spec for data volumes
|
||||
|
||||
Spec owner: Solomon Hykes <solomon@dotcloud.com>
|
||||
|
||||
Data volumes (issue #111) are a much-requested feature which trigger much discussion and debate. Below is the current authoritative spec for implementing data volumes.
|
||||
This spec will be deprecated once the feature is fully implemented.
|
||||
|
||||
Discussion, requests, trolls, demands, offerings, threats and other forms of supplications concerning this spec should be addressed to Solomon here: https://github.com/dotcloud/docker/issues/111
|
||||
|
||||
|
||||
### 1. Creating data volumes
|
||||
|
||||
At container creation, parts of a container's filesystem can be mounted as separate data volumes. Volumes are defined with the -v flag.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
$ docker run -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres
|
||||
```
|
||||
|
||||
In this example, a new container is created from the 'postgres' image. At the same time, docker creates 2 new data volumes: one will be mapped to the container at /var/lib/postgres, the other at /var/log.
|
||||
|
||||
2 important notes:
|
||||
|
||||
1) Volumes don't have top-level names. At no point does the user provide a name, or is a name given to him. Volumes are identified by the path at which they are mounted inside their container.
|
||||
|
||||
2) The user doesn't choose the source of the volume. Docker only mounts volumes it created itself, in the same way that it only runs containers that it created itself. That is by design.
|
||||
|
||||
|
||||
### 2. Sharing data volumes
|
||||
|
||||
Instead of creating its own volumes, a container can share another container's volumes. For example:
|
||||
|
||||
```bash
|
||||
$ docker run --volumes-from $OTHER_CONTAINER_ID postgres /usr/local/bin/postgres-backup
|
||||
```
|
||||
|
||||
In this example, a new container is created from the 'postgres' example. At the same time, docker will *re-use* the 2 data volumes created in the previous example. One volume will be mounted on the /var/lib/postgres of *both* containers, and the other will be mounted on the /var/log of both containers.
|
||||
|
||||
### 3. Under the hood
|
||||
|
||||
Docker stores volumes in /var/lib/docker/volumes. Each volume receives a globally unique ID at creation, and is stored at /var/lib/docker/volumes/ID.
|
||||
|
||||
At creation, volumes are attached to a single container - the source of truth for this mapping will be the container's configuration.
|
||||
|
||||
Mounting a volume consists of calling "mount --bind" from the volume's directory to the appropriate sub-directory of the container mountpoint. This may be done by Docker itself, or farmed out to lxc (which supports mount-binding) if possible.
|
||||
|
||||
|
||||
### 4. Backups, transfers and other volume operations
|
||||
|
||||
Volumes sometimes need to be backed up, transfered between hosts, synchronized, etc. These operations typically are application-specific or site-specific, eg. rsync vs. S3 upload vs. replication vs...
|
||||
|
||||
Rather than attempting to implement all these scenarios directly, Docker will allow for custom implementations using an extension mechanism.
|
||||
|
||||
### 5. Custom volume handlers
|
||||
|
||||
Docker allows for arbitrary code to be executed against a container's volumes, to implement any custom action: backup, transfer, synchronization across hosts, etc.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```bash
|
||||
$ DB=$(docker run -d -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres)
|
||||
|
||||
$ BACKUP_JOB=$(docker run -d --volumes-from $DB shykes/backuper /usr/local/bin/backup-postgres --s3creds=$S3CREDS)
|
||||
|
||||
$ docker wait $BACKUP_JOB
|
||||
```
|
||||
|
||||
Congratulations, you just implemented a custom volume handler, using Docker's built-in ability to 1) execute arbitrary code and 2) share volumes between containers.
|
||||
|
||||
82
Vagrantfile
vendored
Normal file
82
Vagrantfile
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
def v10(config)
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
|
||||
# Install ubuntu packaging dependencies and create ubuntu packages
|
||||
config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
|
||||
config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker'
|
||||
end
|
||||
|
||||
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
||||
v10(config)
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("1") do |config|
|
||||
v10(config)
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.provider :aws do |aws|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
aws.region = "us-east-1"
|
||||
aws.ami = "ami-d0f89fb9"
|
||||
aws.ssh_username = "ubuntu"
|
||||
aws.instance_type = "t1.micro"
|
||||
end
|
||||
|
||||
config.vm.provider :rackspace do |rs|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
|
||||
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
|
||||
rs.username = ENV["RS_USERNAME"]
|
||||
rs.api_key = ENV["RS_API_KEY"]
|
||||
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
|
||||
rs.flavor = /512MB/
|
||||
rs.image = /Ubuntu/
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.provider :aws do |aws, override|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
override.ssh.username = "ubuntu"
|
||||
aws.region = "us-east-1"
|
||||
aws.ami = "ami-d0f89fb9"
|
||||
aws.instance_type = "t1.micro"
|
||||
end
|
||||
|
||||
config.vm.provider :rackspace do |rs|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
|
||||
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
|
||||
rs.username = ENV["RS_USERNAME"]
|
||||
rs.api_key = ENV["RS_API_KEY"]
|
||||
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
|
||||
rs.flavor = /512MB/
|
||||
rs.image = /Ubuntu/
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
end
|
||||
|
||||
end
|
||||
@@ -1 +0,0 @@
|
||||
Victor Vieux <vieux@docker.com> (@vieux)
|
||||
@@ -1,5 +0,0 @@
|
||||
This directory contains code pertaining to the Docker API:
|
||||
|
||||
- Used by the docker client when communicating with the docker daemon
|
||||
|
||||
- Used by third party tools wishing to interface with the docker daemon
|
||||
@@ -1,19 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJsonContentType(t *testing.T) {
|
||||
if !MatchesContentType("application/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !MatchesContentType("application/json; charset=utf-8", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if MatchesContentType("dockerapplication/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
type DockerCli struct {
|
||||
proto string
|
||||
addr string
|
||||
configFile *registry.ConfigFile
|
||||
in io.ReadCloser
|
||||
out io.Writer
|
||||
err io.Writer
|
||||
isTerminal bool
|
||||
terminalFd uintptr
|
||||
tlsConfig *tls.Config
|
||||
scheme string
|
||||
}
|
||||
|
||||
var funcMap = template.FuncMap{
|
||||
"json": func(v interface{}) string {
|
||||
a, _ := json.Marshal(v)
|
||||
return string(a)
|
||||
},
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
|
||||
if len(name) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
|
||||
method := reflect.ValueOf(cli).MethodByName(methodName)
|
||||
if !method.IsValid() {
|
||||
return nil, false
|
||||
}
|
||||
return method.Interface().(func(...string) error), true
|
||||
}
|
||||
|
||||
// Cmd executes the specified command
|
||||
func (cli *DockerCli) Cmd(args ...string) error {
|
||||
if len(args) > 0 {
|
||||
method, exists := cli.getMethod(args[0])
|
||||
if !exists {
|
||||
fmt.Println("Error: Command not found:", args[0])
|
||||
return cli.CmdHelp(args[1:]...)
|
||||
}
|
||||
return method(args[1:]...)
|
||||
}
|
||||
return cli.CmdHelp(args...)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
|
||||
flags := flag.NewFlagSet(name, flag.ContinueOnError)
|
||||
flags.Usage = func() {
|
||||
fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
|
||||
flags.PrintDefaults()
|
||||
os.Exit(2)
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func (cli *DockerCli) LoadConfigFile() (err error) {
|
||||
cli.configFile, err = registry.LoadConfig(os.Getenv("HOME"))
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "WARNING: %s\n", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli {
|
||||
var (
|
||||
isTerminal = false
|
||||
terminalFd uintptr
|
||||
scheme = "http"
|
||||
)
|
||||
|
||||
if tlsConfig != nil {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
if in != nil {
|
||||
if file, ok := out.(*os.File); ok {
|
||||
terminalFd = file.Fd()
|
||||
isTerminal = term.IsTerminal(terminalFd)
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = out
|
||||
}
|
||||
return &DockerCli{
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
in: in,
|
||||
out: out,
|
||||
err: err,
|
||||
isTerminal: isTerminal,
|
||||
terminalFd: terminalFd,
|
||||
tlsConfig: tlsConfig,
|
||||
scheme: scheme,
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,134 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
func (cli *DockerCli) dial() (net.Conn, error) {
|
||||
if cli.tlsConfig != nil && cli.proto != "unix" {
|
||||
return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
|
||||
}
|
||||
return net.Dial(cli.proto, cli.addr)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
|
||||
defer func() {
|
||||
if started != nil {
|
||||
close(started)
|
||||
}
|
||||
}()
|
||||
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
req.Host = cli.addr
|
||||
|
||||
dial, err := cli.dial()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
}
|
||||
return err
|
||||
}
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
defer clientconn.Close()
|
||||
|
||||
// Server hijacks the connection, error 'connection closed' expected
|
||||
clientconn.Do(req)
|
||||
|
||||
rwc, br := clientconn.Hijack()
|
||||
defer rwc.Close()
|
||||
|
||||
if started != nil {
|
||||
started <- rwc
|
||||
}
|
||||
|
||||
var receiveStdout chan error
|
||||
|
||||
var oldState *term.State
|
||||
|
||||
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
|
||||
oldState, err = term.SetRawTerminal(cli.terminalFd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
}
|
||||
|
||||
if stdout != nil || stderr != nil {
|
||||
receiveStdout = utils.Go(func() (err error) {
|
||||
defer func() {
|
||||
if in != nil {
|
||||
if setRawTerminal && cli.isTerminal {
|
||||
term.RestoreTerminal(cli.terminalFd, oldState)
|
||||
}
|
||||
// For some reason this Close call blocks on darwin..
|
||||
// As the client exists right after, simply discard the close
|
||||
// until we find a better solution.
|
||||
if runtime.GOOS != "darwin" {
|
||||
in.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// When TTY is ON, use regular copy
|
||||
if setRawTerminal && stdout != nil {
|
||||
_, err = io.Copy(stdout, br)
|
||||
} else {
|
||||
_, err = utils.StdCopy(stdout, stderr, br)
|
||||
}
|
||||
log.Debugf("[hijack] End of stdout")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
sendStdin := utils.Go(func() error {
|
||||
if in != nil {
|
||||
io.Copy(rwc, in)
|
||||
log.Debugf("[hijack] End of stdin")
|
||||
}
|
||||
if tcpc, ok := rwc.(*net.TCPConn); ok {
|
||||
if err := tcpc.CloseWrite(); err != nil {
|
||||
log.Debugf("Couldn't send EOF: %s", err)
|
||||
}
|
||||
} else if unixc, ok := rwc.(*net.UnixConn); ok {
|
||||
if err := unixc.CloseWrite(); err != nil {
|
||||
log.Debugf("Couldn't send EOF: %s", err)
|
||||
}
|
||||
}
|
||||
// Discard errors due to pipe interruption
|
||||
return nil
|
||||
})
|
||||
|
||||
if stdout != nil || stderr != nil {
|
||||
if err := <-receiveStdout; err != nil {
|
||||
log.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !cli.isTerminal {
|
||||
if err := <-sendStdin; err != nil {
|
||||
log.Debugf("Error sendStdin: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,261 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
)
|
||||
|
||||
func (cli *DockerCli) HTTPClient() *http.Client {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: cli.tlsConfig,
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(cli.proto, cli.addr)
|
||||
},
|
||||
}
|
||||
return &http.Client{Transport: tr}
|
||||
}
|
||||
|
||||
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
|
||||
params := bytes.NewBuffer(nil)
|
||||
if data != nil {
|
||||
if env, ok := data.(engine.Env); ok {
|
||||
if err := env.Encode(params); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
} else {
|
||||
buf, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if _, err := params.Write(buf); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if passAuthInfo {
|
||||
cli.LoadConfigFile()
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress())
|
||||
getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registryAuthHeader := []string{
|
||||
base64.URLEncoding.EncodeToString(buf),
|
||||
}
|
||||
return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
|
||||
}
|
||||
if headers, err := getHeaders(authConfig); err == nil && headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.URL.Host = cli.addr
|
||||
req.URL.Scheme = cli.scheme
|
||||
if data != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
} else if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
resp, err := cli.HTTPClient().Do(req)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return nil, -1, ErrConnectionRefused
|
||||
}
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if len(body) == 0 {
|
||||
return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL)
|
||||
}
|
||||
return nil, resp.StatusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body))
|
||||
}
|
||||
return resp.Body, resp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
|
||||
return cli.streamHelper(method, path, true, in, out, nil, headers)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error {
|
||||
if (method == "POST" || method == "PUT") && in == nil {
|
||||
in = bytes.NewReader([]byte{})
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("http://v%s%s", api.APIVERSION, path), in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.URL.Host = cli.addr
|
||||
req.URL.Scheme = cli.scheme
|
||||
if method == "POST" {
|
||||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
}
|
||||
resp, err := cli.HTTPClient().Do(req)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(body) == 0 {
|
||||
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
|
||||
}
|
||||
return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
|
||||
}
|
||||
|
||||
if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") {
|
||||
return utils.DisplayJSONMessagesStream(resp.Body, stdout, cli.terminalFd, cli.isTerminal)
|
||||
}
|
||||
if stdout != nil || stderr != nil {
|
||||
// When TTY is ON, use regular copy
|
||||
if setRawTerminal {
|
||||
_, err = io.Copy(stdout, resp.Body)
|
||||
} else {
|
||||
_, err = utils.StdCopy(stdout, stderr, resp.Body)
|
||||
}
|
||||
log.Debugf("[stream] End of stdout")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) resizeTty(id string) {
|
||||
height, width := cli.getTtySize()
|
||||
if height == 0 && width == 0 {
|
||||
return
|
||||
}
|
||||
v := url.Values{}
|
||||
v.Set("h", strconv.Itoa(height))
|
||||
v.Set("w", strconv.Itoa(width))
|
||||
if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
|
||||
log.Debugf("Error resize: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForExit(cli *DockerCli, containerId string) (int, error) {
|
||||
stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
var out engine.Env
|
||||
if err := out.Decode(stream); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return out.GetInt("StatusCode"), nil
|
||||
}
|
||||
|
||||
// getExitCode perform an inspect on the container. It returns
|
||||
// the running state and the exit code.
|
||||
func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
|
||||
steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if err != ErrConnectionRefused {
|
||||
return false, -1, err
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
var result engine.Env
|
||||
if err := result.Decode(steam); err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
|
||||
state := result.GetSubEnv("State")
|
||||
return state.GetBool("Running"), state.GetInt("ExitCode"), nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) monitorTtySize(id string) error {
|
||||
cli.resizeTty(id)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
gosignal.Notify(sigchan, syscall.SIGWINCH)
|
||||
go func() {
|
||||
for _ = range sigchan {
|
||||
cli.resizeTty(id)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getTtySize() (int, int) {
|
||||
if !cli.isTerminal {
|
||||
return 0, 0
|
||||
}
|
||||
ws, err := term.GetWinsize(cli.terminalFd)
|
||||
if err != nil {
|
||||
log.Debugf("Error getting size: %s", err)
|
||||
if ws == nil {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
return int(ws.Height), int(ws.Width)
|
||||
}
|
||||
|
||||
func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
|
||||
if stream != nil {
|
||||
defer stream.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, statusCode, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(stream)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return body, statusCode, nil
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"mime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
APIVERSION version.Version = "1.14"
|
||||
DEFAULTHTTPHOST = "127.0.0.1"
|
||||
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
|
||||
)
|
||||
|
||||
func ValidateHost(val string) (string, error) {
|
||||
host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
return host, nil
|
||||
}
|
||||
|
||||
//TODO remove, used on < 1.5 in getContainersJSON
|
||||
func DisplayablePorts(ports *engine.Table) string {
|
||||
result := []string{}
|
||||
ports.SetKey("PublicPort")
|
||||
ports.Sort()
|
||||
for _, port := range ports.Data {
|
||||
if port.Get("IP") == "" {
|
||||
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PrivatePort"), port.Get("Type")))
|
||||
} else {
|
||||
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
|
||||
}
|
||||
}
|
||||
return strings.Join(result, ", ")
|
||||
}
|
||||
|
||||
func MatchesContentType(contentType, expectedType string) bool {
|
||||
mimetype, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
|
||||
}
|
||||
return err == nil && mimetype == expectedType
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
Victor Vieux <vieux@docker.com> (@vieux)
|
||||
# off the grid until september
|
||||
# Johan Euphrosine <proppy@google.com> (@proppy)
|
||||
1380
api/server/server.go
1380
api/server/server.go
File diff suppressed because it is too large
Load Diff
@@ -1,555 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
)
|
||||
|
||||
func TestGetBoolParam(t *testing.T) {
|
||||
if ret, err := getBoolParam("true"); err != nil || !ret {
|
||||
t.Fatalf("true -> true, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("True"); err != nil || !ret {
|
||||
t.Fatalf("True -> true, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("1"); err != nil || !ret {
|
||||
t.Fatalf("1 -> true, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam(""); err != nil || ret {
|
||||
t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("false"); err != nil || ret {
|
||||
t.Fatalf("false -> false, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("0"); err != nil || ret {
|
||||
t.Fatalf("0 -> false, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("faux"); err == nil || ret {
|
||||
t.Fatalf("faux -> false, err | got %t %s", ret, err)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TesthttpError(t *testing.T) {
|
||||
r := httptest.NewRecorder()
|
||||
|
||||
httpError(r, fmt.Errorf("No such method"))
|
||||
if r.Code != http.StatusNotFound {
|
||||
t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
|
||||
}
|
||||
|
||||
httpError(r, fmt.Errorf("This accound hasn't been activated"))
|
||||
if r.Code != http.StatusForbidden {
|
||||
t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
|
||||
}
|
||||
|
||||
httpError(r, fmt.Errorf("Some error"))
|
||||
if r.Code != http.StatusInternalServerError {
|
||||
t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("version", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
v := &engine.Env{}
|
||||
v.SetJson("Version", "42.1")
|
||||
v.Set("ApiVersion", "1.1.1.1.1")
|
||||
v.Set("GoVersion", "2.42")
|
||||
v.Set("Os", "Linux")
|
||||
v.Set("Arch", "x86_64")
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/version", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
v := readEnv(r.Body, t)
|
||||
if v.Get("Version") != "42.1" {
|
||||
t.Fatalf("%#v\n", v)
|
||||
}
|
||||
if r.HeaderMap.Get("Content-Type") != "application/json" {
|
||||
t.Fatalf("%#v\n", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInfo(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("info", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
v := &engine.Env{}
|
||||
v.SetInt("Containers", 1)
|
||||
v.SetInt("Images", 42000)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/info", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
v := readEnv(r.Body, t)
|
||||
if v.GetInt("Images") != 42000 {
|
||||
t.Fatalf("%#v\n", v)
|
||||
}
|
||||
if v.GetInt("Containers") != 1 {
|
||||
t.Fatalf("%#v\n", v)
|
||||
}
|
||||
assertContentType(r, "application/json", t)
|
||||
}
|
||||
|
||||
func TestGetImagesJSON(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
v := createEnvFromGetImagesJSONStruct(sampleImage)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/images/json", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
assertHttpNotError(r, t)
|
||||
assertContentType(r, "application/json", t)
|
||||
var observed getImagesJSONStruct
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(observed, sampleImage) {
|
||||
t.Errorf("Expected %#v but got %#v", sampleImage, observed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesJSONFilter(t *testing.T) {
|
||||
eng := engine.New()
|
||||
filter := "nothing"
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
filter = job.Getenv("filter")
|
||||
return engine.StatusOK
|
||||
})
|
||||
serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t)
|
||||
if filter != "aaaa" {
|
||||
t.Errorf("%#v", filter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesJSONFilters(t *testing.T) {
|
||||
eng := engine.New()
|
||||
filter := "nothing"
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
filter = job.Getenv("filters")
|
||||
return engine.StatusOK
|
||||
})
|
||||
serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t)
|
||||
if filter != "nnnn" {
|
||||
t.Errorf("%#v", filter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesJSONAll(t *testing.T) {
|
||||
eng := engine.New()
|
||||
allFilter := "-1"
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
allFilter = job.Getenv("all")
|
||||
return engine.StatusOK
|
||||
})
|
||||
serveRequest("GET", "/images/json?all=1", nil, eng, t)
|
||||
if allFilter != "1" {
|
||||
t.Errorf("%#v", allFilter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesJSONLegacyFormat(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
outsLegacy := engine.NewTable("Created", 0)
|
||||
outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage))
|
||||
if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
assertHttpNotError(r, t)
|
||||
assertContentType(r, "application/json", t)
|
||||
images := engine.NewTable("Created", 0)
|
||||
if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if images.Len() != 1 {
|
||||
t.Fatalf("Expected 1 image, %d found", images.Len())
|
||||
}
|
||||
image := images.Data[0]
|
||||
if image.Get("Tag") != "test-tag" {
|
||||
t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag"))
|
||||
}
|
||||
if image.Get("Repository") != "test-name" {
|
||||
t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainersByName(t *testing.T) {
|
||||
eng := engine.New()
|
||||
name := "container_name"
|
||||
var called bool
|
||||
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if job.Args[0] != name {
|
||||
t.Errorf("name != '%s': %#v", name, job.Args[0])
|
||||
}
|
||||
if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
|
||||
t.Errorf("dirty env variable not set")
|
||||
} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
|
||||
t.Errorf("dirty env variable set when it shouldn't")
|
||||
}
|
||||
v := &engine.Env{}
|
||||
v.SetBool("dirty", true)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
assertContentType(r, "application/json", t)
|
||||
var stdoutJson interface{}
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
|
||||
t.Fatalf("%#v", err)
|
||||
}
|
||||
if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
|
||||
t.Fatalf("%#v", stdoutJson)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEvents(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("events", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
since := job.Getenv("since")
|
||||
if since != "1" {
|
||||
t.Fatalf("'since' should be 1, found %#v instead", since)
|
||||
}
|
||||
until := job.Getenv("until")
|
||||
if until != "0" {
|
||||
t.Fatalf("'until' should be 0, found %#v instead", until)
|
||||
}
|
||||
v := &engine.Env{}
|
||||
v.Set("since", since)
|
||||
v.Set("until", until)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
assertContentType(r, "application/json", t)
|
||||
var stdout_json struct {
|
||||
Since int
|
||||
Until int
|
||||
}
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stdout_json.Since != 1 {
|
||||
t.Errorf("since != 1: %#v", stdout_json.Since)
|
||||
}
|
||||
if stdout_json.Until != 0 {
|
||||
t.Errorf("until != 0: %#v", stdout_json.Until)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogs(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var inspect bool
|
||||
var logs bool
|
||||
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
||||
inspect = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
expected := "logs"
|
||||
eng.Register("logs", func(job *engine.Job) engine.Status {
|
||||
logs = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
follow := job.Getenv("follow")
|
||||
if follow != "1" {
|
||||
t.Fatalf("follow: %s, must be 1", follow)
|
||||
}
|
||||
stdout := job.Getenv("stdout")
|
||||
if stdout != "1" {
|
||||
t.Fatalf("stdout %s, must be 1", stdout)
|
||||
}
|
||||
stderr := job.Getenv("stderr")
|
||||
if stderr != "" {
|
||||
t.Fatalf("stderr %s, must be empty", stderr)
|
||||
}
|
||||
timestamps := job.Getenv("timestamps")
|
||||
if timestamps != "1" {
|
||||
t.Fatalf("timestamps %s, must be 1", timestamps)
|
||||
}
|
||||
job.Stdout.Write([]byte(expected))
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t)
|
||||
if r.Code != http.StatusOK {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
|
||||
}
|
||||
if !inspect {
|
||||
t.Fatal("container_inspect job was not called")
|
||||
}
|
||||
if !logs {
|
||||
t.Fatal("logs job was not called")
|
||||
}
|
||||
res := r.Body.String()
|
||||
if res != expected {
|
||||
t.Fatalf("Output %s, expected %s", res, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogsNoStreams(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var inspect bool
|
||||
var logs bool
|
||||
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
||||
inspect = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
eng.Register("logs", func(job *engine.Job) engine.Status {
|
||||
logs = true
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/containers/test/logs", nil, eng, t)
|
||||
if r.Code != http.StatusBadRequest {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest)
|
||||
}
|
||||
if inspect {
|
||||
t.Fatal("container_inspect job was called, but it shouldn't")
|
||||
}
|
||||
if logs {
|
||||
t.Fatal("logs job was called, but it shouldn't")
|
||||
}
|
||||
res := strings.TrimSpace(r.Body.String())
|
||||
expected := "Bad parameters: you must choose at least one stream"
|
||||
if !strings.Contains(res, expected) {
|
||||
t.Fatalf("Output %s, expected %s in it", res, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesHistory(t *testing.T) {
|
||||
eng := engine.New()
|
||||
imageName := "docker-test-image"
|
||||
var called bool
|
||||
eng.Register("history", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != imageName {
|
||||
t.Fatalf("name != '%s': %#v", imageName, job.Args[0])
|
||||
}
|
||||
v := &engine.Env{}
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
if r.Code != http.StatusOK {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
|
||||
}
|
||||
if r.HeaderMap.Get("Content-Type") != "application/json" {
|
||||
t.Fatalf("%#v\n", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesByName(t *testing.T) {
|
||||
eng := engine.New()
|
||||
name := "image_name"
|
||||
var called bool
|
||||
eng.Register("image_inspect", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if job.Args[0] != name {
|
||||
t.Fatalf("name != '%s': %#v", name, job.Args[0])
|
||||
}
|
||||
if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
|
||||
t.Fatal("dirty env variable not set")
|
||||
} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
|
||||
t.Fatal("dirty env variable set when it shouldn't")
|
||||
}
|
||||
v := &engine.Env{}
|
||||
v.SetBool("dirty", true)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
if r.HeaderMap.Get("Content-Type") != "application/json" {
|
||||
t.Fatalf("%#v\n", r)
|
||||
}
|
||||
var stdoutJson interface{}
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
|
||||
t.Fatalf("%#v", err)
|
||||
}
|
||||
if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
|
||||
t.Fatalf("%#v", stdoutJson)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteContainers(t *testing.T) {
|
||||
eng := engine.New()
|
||||
name := "foo"
|
||||
var called bool
|
||||
eng.Register("delete", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatalf("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != name {
|
||||
t.Fatalf("name != '%s': %#v", name, job.Args[0])
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("DELETE", "/containers/"+name, nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
if r.Code != http.StatusNoContent {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent)
|
||||
}
|
||||
}
|
||||
|
||||
func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
|
||||
return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t)
|
||||
}
|
||||
|
||||
func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
|
||||
r := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, target, body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ServeRequest(eng, version, r, req); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func readEnv(src io.Reader, t *testing.T) *engine.Env {
|
||||
out := engine.NewOutput()
|
||||
v, err := out.AddEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := io.Copy(out, src); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out.Close()
|
||||
return v
|
||||
}
|
||||
|
||||
func toJson(data interface{}, t *testing.T) io.Reader {
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &buf
|
||||
}
|
||||
|
||||
func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) {
|
||||
if recorder.HeaderMap.Get("Content-Type") != content_type {
|
||||
t.Fatalf("%#v\n", recorder)
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that
|
||||
// should die as soon as we converted all integration tests?
|
||||
// assertHttpNotError expect the given response to not have an error.
|
||||
// Otherwise the it causes the test to fail.
|
||||
func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) {
|
||||
// Non-error http status are [200, 400)
|
||||
if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
|
||||
t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
|
||||
}
|
||||
}
|
||||
|
||||
func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env {
|
||||
v := &engine.Env{}
|
||||
v.SetList("RepoTags", data.RepoTags)
|
||||
v.Set("Id", data.Id)
|
||||
v.SetInt64("Created", data.Created)
|
||||
v.SetInt64("Size", data.Size)
|
||||
v.SetInt64("VirtualSize", data.VirtualSize)
|
||||
return v
|
||||
}
|
||||
|
||||
type getImagesJSONStruct struct {
|
||||
RepoTags []string
|
||||
Id string
|
||||
Created int64
|
||||
Size int64
|
||||
VirtualSize int64
|
||||
}
|
||||
|
||||
var sampleImage getImagesJSONStruct = getImagesJSONStruct{
|
||||
RepoTags: []string{"test-name:test-tag"},
|
||||
Id: "ID",
|
||||
Created: 999,
|
||||
Size: 777,
|
||||
VirtualSize: 666,
|
||||
}
|
||||
124
archive.go
Normal file
124
archive.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
type Archive io.Reader
|
||||
|
||||
type Compression uint32
|
||||
|
||||
const (
|
||||
Uncompressed Compression = iota
|
||||
Bzip2
|
||||
Gzip
|
||||
Xz
|
||||
)
|
||||
|
||||
func (compression *Compression) Flag() string {
|
||||
switch *compression {
|
||||
case Bzip2:
|
||||
return "j"
|
||||
case Gzip:
|
||||
return "z"
|
||||
case Xz:
|
||||
return "J"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func Tar(path string, compression Compression) (io.Reader, error) {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-c"+compression.Flag(), ".")
|
||||
return CmdStream(cmd)
|
||||
}
|
||||
|
||||
func Untar(archive io.Reader, path string) error {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-x")
|
||||
cmd.Stdin = archive
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.New(err.Error() + ": " + string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
// If the command fails to run or doesn't complete successfully, an error
|
||||
// will be returned, including anything written on stderr.
|
||||
func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipeR, pipeW := io.Pipe()
|
||||
errChan := make(chan []byte)
|
||||
// Collect stderr, we will use it in case of an error
|
||||
go func() {
|
||||
errText, e := ioutil.ReadAll(stderr)
|
||||
if e != nil {
|
||||
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
|
||||
}
|
||||
errChan <- errText
|
||||
}()
|
||||
// Copy stdout to the returned pipe
|
||||
go func() {
|
||||
_, err := io.Copy(pipeW, stdout)
|
||||
if err != nil {
|
||||
pipeW.CloseWithError(err)
|
||||
}
|
||||
errText := <-errChan
|
||||
if err := cmd.Wait(); err != nil {
|
||||
pipeW.CloseWithError(errors.New(err.Error() + ": " + string(errText)))
|
||||
} else {
|
||||
pipeW.Close()
|
||||
}
|
||||
}()
|
||||
// Run the command and return the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pipeR, nil
|
||||
}
|
||||
|
||||
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
||||
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
||||
// the file will be deleted.
|
||||
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
|
||||
f, err := ioutil.TempFile(dir, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := io.Copy(f, src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := st.Size()
|
||||
return &TempArchive{f, size}, nil
|
||||
}
|
||||
|
||||
type TempArchive struct {
|
||||
*os.File
|
||||
Size int64 // Pre-computed from Stat().Size() as a convenience
|
||||
}
|
||||
|
||||
func (archive *TempArchive) Read(data []byte) (int, error) {
|
||||
n, err := archive.File.Read(data)
|
||||
if err != nil {
|
||||
os.Remove(archive.File.Name())
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
|
||||
Tibor Vass <teabee89@gmail.com> (@tiborvass)
|
||||
@@ -1,3 +0,0 @@
|
||||
This code provides helper functions for dealing with archive files.
|
||||
|
||||
**TODO**: Move this to either `pkg` or (if not possible) to `utils`.
|
||||
@@ -1,685 +0,0 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
type (
|
||||
Archive io.ReadCloser
|
||||
ArchiveReader io.Reader
|
||||
Compression int
|
||||
TarOptions struct {
|
||||
Includes []string
|
||||
Excludes []string
|
||||
Compression Compression
|
||||
NoLchown bool
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotImplemented = errors.New("Function not implemented")
|
||||
)
|
||||
|
||||
const (
|
||||
Uncompressed Compression = iota
|
||||
Bzip2
|
||||
Gzip
|
||||
Xz
|
||||
)
|
||||
|
||||
func IsArchive(header []byte) bool {
|
||||
compression := DetectCompression(header)
|
||||
if compression != Uncompressed {
|
||||
return true
|
||||
}
|
||||
r := tar.NewReader(bytes.NewBuffer(header))
|
||||
_, err := r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Bzip2: {0x42, 0x5A, 0x68},
|
||||
Gzip: {0x1F, 0x8B, 0x08},
|
||||
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
||||
} {
|
||||
if len(source) < len(m) {
|
||||
log.Debugf("Len too short")
|
||||
continue
|
||||
}
|
||||
if bytes.Compare(m, source[:len(m)]) == 0 {
|
||||
return compression
|
||||
}
|
||||
}
|
||||
return Uncompressed
|
||||
}
|
||||
|
||||
func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
|
||||
args := []string{"xz", "-d", "-c", "-q"}
|
||||
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), archive)
|
||||
}
|
||||
|
||||
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
||||
buf := bufio.NewReader(archive)
|
||||
bs, err := buf.Peek(10)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("[tar autodetect] n: %v", bs)
|
||||
|
||||
compression := DetectCompression(bs)
|
||||
|
||||
switch compression {
|
||||
case Uncompressed:
|
||||
return ioutil.NopCloser(buf), nil
|
||||
case Gzip:
|
||||
return gzip.NewReader(buf)
|
||||
case Bzip2:
|
||||
return ioutil.NopCloser(bzip2.NewReader(buf)), nil
|
||||
case Xz:
|
||||
return xzDecompress(buf)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
|
||||
|
||||
switch compression {
|
||||
case Uncompressed:
|
||||
return utils.NopWriteCloser(dest), nil
|
||||
case Gzip:
|
||||
return gzip.NewWriter(dest), nil
|
||||
case Bzip2, Xz:
|
||||
// archive/bzip2 does not support writing, and there is no xz support at all
|
||||
// However, this is not a problem as docker only currently generates gzipped tars
|
||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
func (compression *Compression) Extension() string {
|
||||
switch *compression {
|
||||
case Uncompressed:
|
||||
return "tar"
|
||||
case Bzip2:
|
||||
return "tar.bz2"
|
||||
case Gzip:
|
||||
return "tar.gz"
|
||||
case Xz:
|
||||
return "tar.xz"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
link := ""
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
if link, err = os.Readlink(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
hdr, err := tar.FileInfoHeader(fi, link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.IsDir() && !strings.HasSuffix(name, "/") {
|
||||
name = name + "/"
|
||||
}
|
||||
|
||||
hdr.Name = name
|
||||
|
||||
stat, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if ok {
|
||||
// Currently go does not fill in the major/minors
|
||||
if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK ||
|
||||
stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR {
|
||||
hdr.Devmajor = int64(major(uint64(stat.Rdev)))
|
||||
hdr.Devminor = int64(minor(uint64(stat.Rdev)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
capability, _ := system.Lgetxattr(path, "security.capability")
|
||||
if capability != nil {
|
||||
hdr.Xattrs = make(map[string]string)
|
||||
hdr.Xattrs["security.capability"] = string(capability)
|
||||
}
|
||||
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
twBuf.Reset(tw)
|
||||
_, err = io.Copy(twBuf, file)
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = twBuf.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
twBuf.Reset(nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
|
||||
// hdr.Mode is in linux format, which we can use for sycalls,
|
||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
hdrInfo := hdr.FileInfo()
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
// Create directory unless it exists as a directory already.
|
||||
// In that case we just want to merge the two
|
||||
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
|
||||
if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
// Source is regular file
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(file, reader); err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
|
||||
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= syscall.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= syscall.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= syscall.S_IFIFO
|
||||
}
|
||||
|
||||
if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeLink:
|
||||
if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err := os.Symlink(hdr.Linkname, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeXGlobalHeader:
|
||||
log.Debugf("PAX Global Extended Headers found and ignored")
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
|
||||
}
|
||||
|
||||
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
|
||||
return err
|
||||
}
|
||||
|
||||
for key, value := range hdr.Xattrs {
|
||||
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
// must happen after chown, as that can modify the file mode
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tar creates an archive from the directory at `path`, and returns it as a
|
||||
// stream of bytes.
|
||||
func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
||||
return TarWithOptions(path, &TarOptions{Compression: compression})
|
||||
}
|
||||
|
||||
func escapeName(name string) string {
|
||||
escaped := make([]byte, 0)
|
||||
for i, c := range []byte(name) {
|
||||
if i == 0 && c == '/' {
|
||||
continue
|
||||
}
|
||||
// all printable chars except "-" which is 0x2d
|
||||
if (0x20 <= c && c <= 0x7E) && c != 0x2d {
|
||||
escaped = append(escaped, c)
|
||||
} else {
|
||||
escaped = append(escaped, fmt.Sprintf("\\%03o", c)...)
|
||||
}
|
||||
}
|
||||
return string(escaped)
|
||||
}
|
||||
|
||||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
compressWriter, err := CompressStream(pipeWriter, options.Compression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tw := tar.NewWriter(compressWriter)
|
||||
|
||||
go func() {
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
|
||||
if options.Includes == nil {
|
||||
options.Includes = []string{"."}
|
||||
}
|
||||
|
||||
twBuf := bufio.NewWriterSize(nil, twBufSize)
|
||||
|
||||
for _, include := range options.Includes {
|
||||
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
relFilePath, err := filepath.Rel(srcPath, filePath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
skip, err := utils.Matches(relFilePath, options.Excludes)
|
||||
if err != nil {
|
||||
log.Debugf("Error matching %s", relFilePath, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if skip {
|
||||
if f.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil {
|
||||
log.Debugf("Can't add file %s to tar: %s", srcPath, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
log.Debugf("Can't close tar writer: %s", err)
|
||||
}
|
||||
if err := compressWriter.Close(); err != nil {
|
||||
log.Debugf("Can't close compress writer: %s", err)
|
||||
}
|
||||
if err := pipeWriter.Close(); err != nil {
|
||||
log.Debugf("Can't close pipe writer: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return pipeReader, nil
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `path`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
||||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
|
||||
if archive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
}
|
||||
|
||||
if options.Excludes == nil {
|
||||
options.Excludes = []string{}
|
||||
}
|
||||
|
||||
decompressedArchive, err := DecompressStream(archive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
|
||||
tr := tar.NewReader(decompressedArchive)
|
||||
trBuf := bufio.NewReaderSize(nil, trBufSize)
|
||||
|
||||
var dirs []*tar.Header
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
loop:
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
for _, exclude := range options.Excludes {
|
||||
if strings.HasPrefix(hdr.Name, exclude) {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(hdr.Name, "/") {
|
||||
// Not the root directory, ensure that the parent directory exists
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
|
||||
// If path exits we almost always just want to remove and replace it
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if fi.IsDir() && hdr.Name == "." {
|
||||
continue
|
||||
}
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
trBuf.Reset(tr)
|
||||
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with
|
||||
// the output of one piped into the other. If either Tar or Untar fails,
|
||||
// TarUntar aborts and returns the error.
|
||||
func TarUntar(src string, dst string) error {
|
||||
log.Debugf("TarUntar(%s %s)", src, dst)
|
||||
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
return Untar(archive, dst, nil)
|
||||
}
|
||||
|
||||
// UntarPath is a convenience function which looks for an archive
|
||||
// at filesystem path `src`, and unpacks it at `dst`.
|
||||
func UntarPath(src, dst string) error {
|
||||
archive, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
if err := Untar(archive, dst, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
||||
// unpacks it at filesystem path `dst`.
|
||||
// The archive is streamed directly with fixed buffering and no
|
||||
// intermediary disk IO.
|
||||
//
|
||||
func CopyWithTar(src, dst string) error {
|
||||
srcSt, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !srcSt.IsDir() {
|
||||
return CopyFileWithTar(src, dst)
|
||||
}
|
||||
// Create dst, copy src's content into it
|
||||
log.Debugf("Creating dest directory: %s", dst)
|
||||
if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
return TarUntar(src, dst)
|
||||
}
|
||||
|
||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
||||
// for a single file. It copies a regular file from path `src` to
|
||||
// path `dst`, and preserves all its metadata.
|
||||
//
|
||||
// If `dst` ends with a trailing slash '/', the final destination path
|
||||
// will be `dst/base(src)`.
|
||||
func CopyFileWithTar(src, dst string) (err error) {
|
||||
log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
||||
srcSt, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if srcSt.IsDir() {
|
||||
return fmt.Errorf("Can't copy a directory")
|
||||
}
|
||||
// Clean up the trailing /
|
||||
if dst[len(dst)-1] == '/' {
|
||||
dst = path.Join(dst, filepath.Base(src))
|
||||
}
|
||||
// Create the holding directory if necessary
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
errC := utils.Go(func() error {
|
||||
defer w.Close()
|
||||
|
||||
srcF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
tw := tar.NewWriter(w)
|
||||
defer tw.Close()
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if er := <-errC; err != nil {
|
||||
err = er
|
||||
}
|
||||
}()
|
||||
return Untar(r, filepath.Dir(dst), nil)
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
// If the command fails to run or doesn't complete successfully, an error
|
||||
// will be returned, including anything written on stderr.
|
||||
func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
|
||||
if input != nil {
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Write stdin if any
|
||||
go func() {
|
||||
io.Copy(stdin, input)
|
||||
stdin.Close()
|
||||
}()
|
||||
}
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipeR, pipeW := io.Pipe()
|
||||
errChan := make(chan []byte)
|
||||
// Collect stderr, we will use it in case of an error
|
||||
go func() {
|
||||
errText, e := ioutil.ReadAll(stderr)
|
||||
if e != nil {
|
||||
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
|
||||
}
|
||||
errChan <- errText
|
||||
}()
|
||||
// Copy stdout to the returned pipe
|
||||
go func() {
|
||||
_, err := io.Copy(pipeW, stdout)
|
||||
if err != nil {
|
||||
pipeW.CloseWithError(err)
|
||||
}
|
||||
errText := <-errChan
|
||||
if err := cmd.Wait(); err != nil {
|
||||
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
|
||||
} else {
|
||||
pipeW.Close()
|
||||
}
|
||||
}()
|
||||
// Run the command and return the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pipeR, nil
|
||||
}
|
||||
|
||||
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
||||
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
||||
// the file will be deleted.
|
||||
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
|
||||
f, err := ioutil.TempFile(dir, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := io.Copy(f, src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = f.Sync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := st.Size()
|
||||
return &TempArchive{f, size}, nil
|
||||
}
|
||||
|
||||
type TempArchive struct {
|
||||
*os.File
|
||||
Size int64 // Pre-computed from Stat().Size() as a convenience
|
||||
}
|
||||
|
||||
func (archive *TempArchive) Read(data []byte) (int, error) {
|
||||
n, err := archive.File.Read(data)
|
||||
if err != nil {
|
||||
os.Remove(archive.File.Name())
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
)
|
||||
|
||||
func TestCmdStreamLargeStderr(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
|
||||
out, err := CmdStream(cmd, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: %s", err)
|
||||
}
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
_, err := io.Copy(ioutil.Discard, out)
|
||||
errCh <- err
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatalf("Command should not have failed (err=%.100s...)", err)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmdStreamBad(t *testing.T) {
|
||||
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
|
||||
out, err := CmdStream(badCmd, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: %s", err)
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err == nil {
|
||||
t.Fatalf("Command should have failed")
|
||||
} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
|
||||
t.Fatalf("Wrong error value (%s)", err)
|
||||
} else if s := string(output); s != "hello\n" {
|
||||
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmdStreamGood(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
|
||||
out, err := CmdStream(cmd, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err != nil {
|
||||
t.Fatalf("Command should not have failed (err=%s)", err)
|
||||
} else if s := string(output); s != "hello\n" {
|
||||
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
|
||||
}
|
||||
}
|
||||
|
||||
func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) {
|
||||
archive, err := TarWithOptions(origin, options)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer archive.Close()
|
||||
|
||||
buf := make([]byte, 10)
|
||||
if _, err := archive.Read(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wrap := io.MultiReader(bytes.NewReader(buf), archive)
|
||||
|
||||
detectedCompression := DetectCompression(buf)
|
||||
compression := options.Compression
|
||||
if detectedCompression.Extension() != compression.Extension() {
|
||||
return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
|
||||
}
|
||||
|
||||
tmp, err := ioutil.TempDir("", "docker-test-untar")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
if err := Untar(wrap, tmp, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := os.Stat(tmp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ChangesDirs(origin, tmp)
|
||||
}
|
||||
|
||||
func TestTarUntar(t *testing.T) {
|
||||
origin, err := ioutil.TempDir("", "docker-test-untar-origin")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(origin)
|
||||
if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range []Compression{
|
||||
Uncompressed,
|
||||
Gzip,
|
||||
} {
|
||||
changes, err := tarUntar(t, origin, &TarOptions{
|
||||
Compression: c,
|
||||
Excludes: []string{"3"},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
|
||||
}
|
||||
|
||||
if len(changes) != 1 || changes[0].Path != "/3" {
|
||||
t.Fatalf("Unexpected differences after tarUntar: %v", changes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTarWithOptions(t *testing.T) {
|
||||
origin, err := ioutil.TempDir("", "docker-test-untar-origin")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(origin)
|
||||
if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
opts *TarOptions
|
||||
numChanges int
|
||||
}{
|
||||
{&TarOptions{Includes: []string{"1"}}, 1},
|
||||
{&TarOptions{Excludes: []string{"2"}}, 1},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
changes, err := tarUntar(t, origin, testCase.opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err)
|
||||
}
|
||||
if len(changes) != testCase.numChanges {
|
||||
t.Errorf("Expected %d changes, got %d for %+v:",
|
||||
testCase.numChanges, len(changes), testCase.opts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz
|
||||
// use PAX Global Extended Headers.
|
||||
// Failing prevents the archives from being uncompressed during ADD
|
||||
func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
|
||||
hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
|
||||
err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things.
|
||||
// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work.
|
||||
func TestUntarUstarGnuConflict(t *testing.T) {
|
||||
f, err := os.Open("testdata/broken.tar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
found := false
|
||||
tr := tar.NewReader(f)
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
|
||||
}
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
func BenchmarkTarUntar(b *testing.B) {
|
||||
origin, err := ioutil.TempDir("", "docker-test-untar-origin")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
target := path.Join(tempDir, "dest")
|
||||
n, err := prepareUntarSourceDirectory(100, origin)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
b.SetBytes(int64(n))
|
||||
defer os.RemoveAll(origin)
|
||||
defer os.RemoveAll(tempDir)
|
||||
for n := 0; n < b.N; n++ {
|
||||
err := TarUntar(origin, target)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
os.RemoveAll(target)
|
||||
}
|
||||
}
|
||||
@@ -1,384 +0,0 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
|
||||
"github.com/docker/docker/pkg/log"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
ChangeModify = iota
|
||||
ChangeAdd
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
var kind string
|
||||
switch change.Kind {
|
||||
case ChangeModify:
|
||||
kind = "C"
|
||||
case ChangeAdd:
|
||||
kind = "A"
|
||||
case ChangeDelete:
|
||||
kind = "D"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", kind, change.Path)
|
||||
}
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
return a.Sec == b.Sec &&
|
||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||
}
|
||||
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
var changes []Change
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path = filepath.Join("/", path)
|
||||
|
||||
// Skip root
|
||||
if path == "/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip AUFS metadata
|
||||
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
|
||||
return err
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
file := filepath.Base(path)
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(file, ".wh.") {
|
||||
originalFile := file[len(".wh."):]
|
||||
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
parent *FileInfo
|
||||
name string
|
||||
stat syscall.Stat_t
|
||||
children map[string]*FileInfo
|
||||
capability []byte
|
||||
}
|
||||
|
||||
func (root *FileInfo) LookUp(path string) *FileInfo {
|
||||
parent := root
|
||||
if path == "/" {
|
||||
return root
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, "/")
|
||||
for _, elem := range pathElements {
|
||||
if elem != "" {
|
||||
child := parent.children[elem]
|
||||
if child == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child
|
||||
}
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func (info *FileInfo) path() string {
|
||||
if info.parent == nil {
|
||||
return "/"
|
||||
}
|
||||
return filepath.Join(info.parent.path(), info.name)
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
if oldInfo == nil {
|
||||
// add
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeAdd,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// We make a copy so we can modify it to detect additions
|
||||
// also, we only recurse on the old dir if the new info is a directory
|
||||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
oldChild, _ := oldChildren[name]
|
||||
if oldChild != nil {
|
||||
// change?
|
||||
oldStat := &oldChild.stat
|
||||
newStat := &newChild.stat
|
||||
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||
// when copying a file into a container. However, that is not generally a problem
|
||||
// because any content change will change mtime, and any status change should
|
||||
// be visible when actually comparing the stat fields. The only time this
|
||||
// breaks down is if some code intentionally hides a change by setting
|
||||
// back mtime
|
||||
if oldStat.Mode != newStat.Mode ||
|
||||
oldStat.Uid != newStat.Uid ||
|
||||
oldStat.Gid != newStat.Gid ||
|
||||
oldStat.Rdev != newStat.Rdev ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
|
||||
!sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) ||
|
||||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// Remove from copy so we can detect deletions
|
||||
delete(oldChildren, name)
|
||||
}
|
||||
|
||||
newChild.addChanges(oldChild, changes)
|
||||
}
|
||||
for _, oldChild := range oldChildren {
|
||||
// delete
|
||||
change := Change{
|
||||
Path: oldChild.path(),
|
||||
Kind: ChangeDelete,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
var changes []Change
|
||||
|
||||
info.addChanges(oldInfo, &changes)
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newRootFileInfo() *FileInfo {
|
||||
root := &FileInfo{
|
||||
name: "/",
|
||||
children: make(map[string]*FileInfo),
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath = filepath.Join("/", relPath)
|
||||
|
||||
if relPath == "/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
if err := syscall.Lstat(path, &info.stat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compare two directories and generate an array of Change objects describing the changes
|
||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
err1, err2 error
|
||||
errs = make(chan error, 2)
|
||||
)
|
||||
go func() {
|
||||
oldRoot, err1 = collectFileInfo(oldDir)
|
||||
errs <- err1
|
||||
}()
|
||||
go func() {
|
||||
newRoot, err2 = collectFileInfo(newDir)
|
||||
errs <- err2
|
||||
}()
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errs; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return newRoot.Changes(oldRoot), nil
|
||||
}
|
||||
|
||||
func ChangesSize(newDir string, changes []Change) int64 {
|
||||
var size int64
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
file := filepath.Join(newDir, change.Path)
|
||||
fileInfo, _ := os.Lstat(file)
|
||||
if fileInfo != nil && !fileInfo.IsDir() {
|
||||
size += fileInfo.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func major(device uint64) uint64 {
|
||||
return (device >> 8) & 0xfff
|
||||
}
|
||||
|
||||
func minor(device uint64) uint64 {
|
||||
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||
}
|
||||
|
||||
func ExportChanges(dir string, changes []Change) (Archive, error) {
|
||||
reader, writer := io.Pipe()
|
||||
tw := tar.NewWriter(writer)
|
||||
|
||||
go func() {
|
||||
twBuf := bufio.NewWriterSize(nil, twBufSize)
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeDelete {
|
||||
whiteOutDir := filepath.Dir(change.Path)
|
||||
whiteOutBase := filepath.Base(change.Path)
|
||||
whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
|
||||
timestamp := time.Now()
|
||||
hdr := &tar.Header{
|
||||
Name: whiteOut[1:],
|
||||
Size: 0,
|
||||
ModTime: timestamp,
|
||||
AccessTime: timestamp,
|
||||
ChangeTime: timestamp,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
log.Debugf("Can't write whiteout header: %s", err)
|
||||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil {
|
||||
log.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
log.Debugf("Can't close layer: %s", err)
|
||||
}
|
||||
writer.Close()
|
||||
}()
|
||||
return reader, nil
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func max(x, y int) int {
|
||||
if x >= y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func copyDir(src, dst string) error {
|
||||
cmd := exec.Command("cp", "-a", src, dst)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper to sort []Change by path
|
||||
type byPath struct{ changes []Change }
|
||||
|
||||
func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path }
|
||||
func (b byPath) Len() int { return len(b.changes) }
|
||||
func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] }
|
||||
|
||||
type FileType uint32
|
||||
|
||||
const (
|
||||
Regular FileType = iota
|
||||
Dir
|
||||
Symlink
|
||||
)
|
||||
|
||||
type FileData struct {
|
||||
filetype FileType
|
||||
path string
|
||||
contents string
|
||||
permissions os.FileMode
|
||||
}
|
||||
|
||||
func createSampleDir(t *testing.T, root string) {
|
||||
files := []FileData{
|
||||
{Regular, "file1", "file1\n", 0600},
|
||||
{Regular, "file2", "file2\n", 0666},
|
||||
{Regular, "file3", "file3\n", 0404},
|
||||
{Regular, "file4", "file4\n", 0600},
|
||||
{Regular, "file5", "file5\n", 0600},
|
||||
{Regular, "file6", "file6\n", 0600},
|
||||
{Regular, "file7", "file7\n", 0600},
|
||||
{Dir, "dir1", "", 0740},
|
||||
{Regular, "dir1/file1-1", "file1-1\n", 01444},
|
||||
{Regular, "dir1/file1-2", "file1-2\n", 0666},
|
||||
{Dir, "dir2", "", 0700},
|
||||
{Regular, "dir2/file2-1", "file2-1\n", 0666},
|
||||
{Regular, "dir2/file2-2", "file2-2\n", 0666},
|
||||
{Dir, "dir3", "", 0700},
|
||||
{Regular, "dir3/file3-1", "file3-1\n", 0666},
|
||||
{Regular, "dir3/file3-2", "file3-2\n", 0666},
|
||||
{Dir, "dir4", "", 0700},
|
||||
{Regular, "dir4/file3-1", "file4-1\n", 0666},
|
||||
{Regular, "dir4/file3-2", "file4-2\n", 0666},
|
||||
{Symlink, "symlink1", "target1", 0666},
|
||||
{Symlink, "symlink2", "target2", 0666},
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for _, info := range files {
|
||||
p := path.Join(root, info.path)
|
||||
if info.filetype == Dir {
|
||||
if err := os.MkdirAll(p, info.permissions); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if info.filetype == Regular {
|
||||
if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if info.filetype == Symlink {
|
||||
if err := os.Symlink(info.contents, p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if info.filetype != Symlink {
|
||||
// Set a consistent ctime, atime for all files and dirs
|
||||
if err := os.Chtimes(p, now, now); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create an directory, copy it, make sure we report no changes between the two
|
||||
func TestChangesDirsEmpty(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes) != 0 {
|
||||
t.Fatalf("Reported changes for identical dirs: %v", changes)
|
||||
}
|
||||
os.RemoveAll(src)
|
||||
os.RemoveAll(dst)
|
||||
}
|
||||
|
||||
func mutateSampleDir(t *testing.T, root string) {
|
||||
// Remove a regular file
|
||||
if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove a directory
|
||||
if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove a symlink
|
||||
if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Rewrite a file
|
||||
if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace a file
|
||||
if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Touch file
|
||||
if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace file with dir
|
||||
if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create new file
|
||||
if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create new dir
|
||||
if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a new symlink
|
||||
if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Change a symlink
|
||||
if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace dir with file
|
||||
if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Touch dir
|
||||
if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangesDirsMutated(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(src)
|
||||
defer os.RemoveAll(dst)
|
||||
|
||||
mutateSampleDir(t, dst)
|
||||
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sort.Sort(byPath{changes})
|
||||
|
||||
expectedChanges := []Change{
|
||||
{"/dir1", ChangeDelete},
|
||||
{"/dir2", ChangeModify},
|
||||
{"/dir3", ChangeModify},
|
||||
{"/dirnew", ChangeAdd},
|
||||
{"/file1", ChangeDelete},
|
||||
{"/file2", ChangeModify},
|
||||
{"/file3", ChangeModify},
|
||||
{"/file4", ChangeModify},
|
||||
{"/file5", ChangeModify},
|
||||
{"/filenew", ChangeAdd},
|
||||
{"/symlink1", ChangeDelete},
|
||||
{"/symlink2", ChangeModify},
|
||||
{"/symlinknew", ChangeAdd},
|
||||
}
|
||||
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
if i >= len(changes) {
|
||||
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
|
||||
}
|
||||
if changes[i].Path == expectedChanges[i].Path {
|
||||
if changes[i] != expectedChanges[i] {
|
||||
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
|
||||
}
|
||||
} else if changes[i].Path < expectedChanges[i].Path {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
} else {
|
||||
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyLayer(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
defer os.RemoveAll(src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mutateSampleDir(t, dst)
|
||||
defer os.RemoveAll(dst)
|
||||
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
layer, err := ExportChanges(dst, changes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
layerCopy, err := NewTempArchive(layer, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ApplyLayer(src, layerCopy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
changes2, err := ChangesDirs(src, dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes2) != 0 {
|
||||
t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
package archive
|
||||
|
||||
const twBufSize = 32 * 1024
|
||||
const trBufSize = 32 * 1024
|
||||
154
archive/diff.go
154
archive/diff.go
@@ -1,154 +0,0 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
)
|
||||
|
||||
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
||||
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
|
||||
// then the top 12 bits of the minor
|
||||
func mkdev(major int64, minor int64) uint32 {
|
||||
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`.
|
||||
func ApplyLayer(dest string, layer ArchiveReader) error {
|
||||
// We need to be able to set any perms
|
||||
oldmask := syscall.Umask(0)
|
||||
defer syscall.Umask(oldmask)
|
||||
|
||||
layer, err := DecompressStream(layer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr := tar.NewReader(layer)
|
||||
trBuf := bufio.NewReaderSize(nil, trBufSize)
|
||||
|
||||
var dirs []*tar.Header
|
||||
|
||||
aufsTempdir := ""
|
||||
aufsHardlinks := make(map[string]*tar.Header)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
if !strings.HasSuffix(hdr.Name, "/") {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, ".wh..wh.") {
|
||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||
// We don't want this directory, but we need the files in them so that
|
||||
// such hardlinks can be resolved.
|
||||
if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
|
||||
basename := filepath.Base(hdr.Name)
|
||||
aufsHardlinks[basename] = hdr
|
||||
if aufsTempdir == "" {
|
||||
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
base := filepath.Base(path)
|
||||
if strings.HasPrefix(base, ".wh.") {
|
||||
originalBase := base[len(".wh."):]
|
||||
originalPath := filepath.Join(filepath.Dir(path), originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trBuf.Reset(tr)
|
||||
srcData := io.Reader(trBuf)
|
||||
srcHdr := hdr
|
||||
|
||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||
// we manually retarget these into the temporary files we extracted them into
|
||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
|
||||
linkBasename := filepath.Base(hdr.Linkname)
|
||||
srcHdr = aufsHardlinks[linkBasename]
|
||||
if srcHdr == nil {
|
||||
return fmt.Errorf("Invalid aufs hardlink")
|
||||
}
|
||||
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
srcData = tmpFile
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
BIN
archive/testdata/broken.tar
vendored
BIN
archive/testdata/broken.tar
vendored
Binary file not shown.
@@ -1,16 +0,0 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
if time.IsZero() {
|
||||
// Return UTIME_OMIT special value
|
||||
ts.Sec = 0
|
||||
ts.Nsec = ((1 << 30) - 2)
|
||||
return
|
||||
}
|
||||
return syscall.NsecToTimespec(time.UnixNano())
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
nsec := int64(0)
|
||||
if !time.IsZero() {
|
||||
nsec = time.UnixNano()
|
||||
}
|
||||
return syscall.NsecToTimespec(nsec)
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// Generate generates a new archive from the content provided
|
||||
// as input.
|
||||
//
|
||||
// `files` is a sequence of path/content pairs. A new file is
|
||||
// added to the archive for each pair.
|
||||
// If the last pair is incomplete, the file is created with an
|
||||
// empty content. For example:
|
||||
//
|
||||
// Generate("foo.txt", "hello world", "emptyfile")
|
||||
//
|
||||
// The above call will return an archive with 2 files:
|
||||
// * ./foo.txt with content "hello world"
|
||||
// * ./empty with empty content
|
||||
//
|
||||
// FIXME: stream content instead of buffering
|
||||
// FIXME: specify permissions and other archive metadata
|
||||
func Generate(input ...string) (Archive, error) {
|
||||
files := parseStringPairs(input...)
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, file := range files {
|
||||
name, content := file[0], file[1]
|
||||
hdr := &tar.Header{
|
||||
Name: name,
|
||||
Size: int64(len(content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(content)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ioutil.NopCloser(buf), nil
|
||||
}
|
||||
|
||||
func parseStringPairs(input ...string) (output [][2]string) {
|
||||
output = make([][2]string, 0, len(input)/2+1)
|
||||
for i := 0; i < len(input); i += 2 {
|
||||
var pair [2]string
|
||||
pair[0] = input[i]
|
||||
if i+1 < len(input) {
|
||||
pair[1] = input[i+1]
|
||||
}
|
||||
output = append(output, pair)
|
||||
}
|
||||
return
|
||||
}
|
||||
77
archive_test.go
Normal file
77
archive_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCmdStreamLargeStderr(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
|
||||
out, err := CmdStream(cmd)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: " + err.Error())
|
||||
}
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
_, err := io.Copy(ioutil.Discard, out)
|
||||
errCh <- err
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatalf("Command should not have failed (err=%s...)", err.Error()[:100])
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmdStreamBad(t *testing.T) {
|
||||
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
|
||||
out, err := CmdStream(badCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: " + err.Error())
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err == nil {
|
||||
t.Fatalf("Command should have failed")
|
||||
} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
|
||||
t.Fatalf("Wrong error value (%s)", err.Error())
|
||||
} else if s := string(output); s != "hello\n" {
|
||||
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmdStreamGood(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
|
||||
out, err := CmdStream(cmd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err != nil {
|
||||
t.Fatalf("Command should not have failed (err=%s)", err)
|
||||
} else if s := string(output); s != "hello\n" {
|
||||
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTarUntar(t *testing.T) {
|
||||
archive, err := Tar(".", Uncompressed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp, err := ioutil.TempDir("", "docker-test-untar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
if err := Untar(archive, tmp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := os.Stat(tmp); err != nil {
|
||||
t.Fatalf("Error stating %s: %s", tmp, err.Error())
|
||||
}
|
||||
}
|
||||
168
auth/auth.go
Normal file
168
auth/auth.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Where we store the config file
|
||||
const CONFIGFILE = ".dockercfg"
|
||||
|
||||
// the registry server we want to login against
|
||||
const REGISTRY_SERVER = "https://registry.docker.io"
|
||||
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Email string `json:"email"`
|
||||
rootPath string `json:-`
|
||||
}
|
||||
|
||||
func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
|
||||
return &AuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Email: email,
|
||||
rootPath: rootPath,
|
||||
}
|
||||
}
|
||||
|
||||
// create a base64 encoded auth string to store in config
|
||||
func EncodeAuth(authConfig *AuthConfig) string {
|
||||
authStr := authConfig.Username + ":" + authConfig.Password
|
||||
msg := []byte(authStr)
|
||||
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
|
||||
base64.StdEncoding.Encode(encoded, msg)
|
||||
return string(encoded)
|
||||
}
|
||||
|
||||
// decode the auth string
|
||||
func DecodeAuth(authStr string) (*AuthConfig, error) {
|
||||
decLen := base64.StdEncoding.DecodedLen(len(authStr))
|
||||
decoded := make([]byte, decLen)
|
||||
authByte := []byte(authStr)
|
||||
n, err := base64.StdEncoding.Decode(decoded, authByte)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n > decLen {
|
||||
return nil, fmt.Errorf("Something went wrong decoding auth config")
|
||||
}
|
||||
arr := strings.Split(string(decoded), ":")
|
||||
if len(arr) != 2 {
|
||||
return nil, fmt.Errorf("Invalid auth configuration file")
|
||||
}
|
||||
password := strings.Trim(arr[1], "\x00")
|
||||
return &AuthConfig{Username: arr[0], Password: password}, nil
|
||||
|
||||
}
|
||||
|
||||
// load up the auth config information and return values
|
||||
// FIXME: use the internal golang config parser
|
||||
func LoadConfig(rootPath string) (*AuthConfig, error) {
|
||||
confFile := path.Join(rootPath, CONFIGFILE)
|
||||
if _, err := os.Stat(confFile); err != nil {
|
||||
return &AuthConfig{}, fmt.Errorf("The Auth config file is missing")
|
||||
}
|
||||
b, err := ioutil.ReadFile(confFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arr := strings.Split(string(b), "\n")
|
||||
origAuth := strings.Split(arr[0], " = ")
|
||||
origEmail := strings.Split(arr[1], " = ")
|
||||
authConfig, err := DecodeAuth(origAuth[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authConfig.Email = origEmail[1]
|
||||
authConfig.rootPath = rootPath
|
||||
return authConfig, nil
|
||||
}
|
||||
|
||||
// save the auth config
|
||||
func saveConfig(rootPath, authStr string, email string) error {
|
||||
lines := "auth = " + authStr + "\n" + "email = " + email + "\n"
|
||||
b := []byte(lines)
|
||||
err := ioutil.WriteFile(path.Join(rootPath, CONFIGFILE), b, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// try to register/login to the registry server
|
||||
func Login(authConfig *AuthConfig) (string, error) {
|
||||
storeConfig := false
|
||||
reqStatusCode := 0
|
||||
var status string
|
||||
var errMsg string
|
||||
var reqBody []byte
|
||||
jsonBody, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Config Error: %s", err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
|
||||
b := strings.NewReader(string(jsonBody))
|
||||
req1, err := http.Post(REGISTRY_SERVER+"/v1/users", "application/json; charset=utf-8", b)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Server Error: %s", err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
reqStatusCode = req1.StatusCode
|
||||
defer req1.Body.Close()
|
||||
reqBody, err = ioutil.ReadAll(req1.Body)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Server Error: [%#v] %s", reqStatusCode, err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
if reqStatusCode == 201 {
|
||||
status = "Account Created\n"
|
||||
storeConfig = true
|
||||
} else if reqStatusCode == 400 {
|
||||
// FIXME: This should be 'exists', not 'exist'. Need to change on the server first.
|
||||
if string(reqBody) == "Username or email already exist" {
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("GET", REGISTRY_SERVER+"/v1/users", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
status = "Login Succeeded\n"
|
||||
storeConfig = true
|
||||
} else {
|
||||
status = fmt.Sprintf("Login: %s", body)
|
||||
return "", errors.New(status)
|
||||
}
|
||||
} else {
|
||||
status = fmt.Sprintf("Registration: %s", reqBody)
|
||||
return "", errors.New(status)
|
||||
}
|
||||
} else {
|
||||
status = fmt.Sprintf("[%s] : %s", reqStatusCode, reqBody)
|
||||
return "", errors.New(status)
|
||||
}
|
||||
if storeConfig {
|
||||
authStr := EncodeAuth(authConfig)
|
||||
saveConfig(authConfig.rootPath, authStr, authConfig.Email)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
23
auth/auth_test.go
Normal file
23
auth/auth_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncodeAuth(t *testing.T) {
|
||||
newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
|
||||
authStr := EncodeAuth(newAuthConfig)
|
||||
decAuthConfig, err := DecodeAuth(authStr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if newAuthConfig.Username != decAuthConfig.Username {
|
||||
t.Fatal("Encode Username doesn't match decoded Username")
|
||||
}
|
||||
if newAuthConfig.Password != decAuthConfig.Password {
|
||||
t.Fatal("Encode Password doesn't match decoded Password")
|
||||
}
|
||||
if authStr != "a2VuOnRlc3Q=" {
|
||||
t.Fatal("AuthString encoding isn't correct.")
|
||||
}
|
||||
}
|
||||
20
buildbot/README.rst
Normal file
20
buildbot/README.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
Buildbot
|
||||
========
|
||||
|
||||
Buildbot is a continuous integration system designed to automate the
|
||||
build/test cycle. By automatically rebuilding and testing the tree each time
|
||||
something has changed, build problems are pinpointed quickly, before other
|
||||
developers are inconvenienced by the failure.
|
||||
|
||||
When running 'make hack' at the docker root directory, it spawns a virtual
|
||||
machine in the background running a buildbot instance and adds a git
|
||||
post-commit hook that automatically run docker tests for you.
|
||||
|
||||
You can check your buildbot instance at http://192.168.33.21:8010/waterfall
|
||||
|
||||
|
||||
Buildbot dependencies
|
||||
---------------------
|
||||
|
||||
vagrant, virtualbox packages and python package requests
|
||||
|
||||
28
buildbot/Vagrantfile
vendored
Normal file
28
buildbot/Vagrantfile
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
$BUILDBOT_IP = '192.168.33.21'
|
||||
|
||||
def v10(config)
|
||||
config.vm.box = "quantal64_3.5.0-25"
|
||||
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
|
||||
config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..'
|
||||
config.vm.network :hostonly, $BUILDBOT_IP
|
||||
|
||||
# Ensure puppet is installed on the instance
|
||||
config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet'
|
||||
|
||||
config.vm.provision :puppet do |puppet|
|
||||
puppet.manifests_path = '.'
|
||||
puppet.manifest_file = 'buildbot.pp'
|
||||
puppet.options = ['--templatedir','.']
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
|
||||
v10(config)
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
|
||||
v10(config)
|
||||
end
|
||||
43
buildbot/buildbot-cfg/buildbot-cfg.sh
Executable file
43
buildbot/buildbot-cfg/buildbot-cfg.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Auto setup of buildbot configuration. Package installation is being done
|
||||
# on buildbot.pp
|
||||
# Dependencies: buildbot, buildbot-slave, supervisor
|
||||
|
||||
SLAVE_NAME='buildworker'
|
||||
SLAVE_SOCKET='localhost:9989'
|
||||
BUILDBOT_PWD='pass-docker'
|
||||
USER='vagrant'
|
||||
ROOT_PATH='/data/buildbot'
|
||||
DOCKER_PATH='/data/docker'
|
||||
BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg"
|
||||
IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }')
|
||||
|
||||
function run { su $USER -c "$1"; }
|
||||
|
||||
export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin
|
||||
|
||||
# Exit if buildbot has already been installed
|
||||
[ -d "$ROOT_PATH" ] && exit 0
|
||||
|
||||
# Setup buildbot
|
||||
run "mkdir -p ${ROOT_PATH}"
|
||||
cd ${ROOT_PATH}
|
||||
run "buildbot create-master master"
|
||||
run "cp $BUILDBOT_CFG/master.cfg master"
|
||||
run "sed -i 's/localhost/$IP/' master/master.cfg"
|
||||
run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
|
||||
|
||||
# Allow buildbot subprocesses (docker tests) to properly run in containers,
|
||||
# in particular with docker -u
|
||||
run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac"
|
||||
|
||||
# Setup supervisor
|
||||
cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
|
||||
sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
|
||||
kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"`
|
||||
|
||||
# Add git hook
|
||||
cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks
|
||||
sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit
|
||||
|
||||
18
buildbot/buildbot-cfg/buildbot.conf
Normal file
18
buildbot/buildbot-cfg/buildbot.conf
Normal file
@@ -0,0 +1,18 @@
|
||||
[program:buildmaster]
|
||||
command=su vagrant -c "buildbot start master"
|
||||
directory=/data/buildbot
|
||||
chown= root:root
|
||||
redirect_stderr=true
|
||||
stdout_logfile=/var/log/supervisor/buildbot-master.log
|
||||
stderr_logfile=/var/log/supervisor/buildbot-master.log
|
||||
|
||||
[program:buildworker]
|
||||
command=buildslave start slave
|
||||
directory=/data/buildbot
|
||||
chown= root:root
|
||||
redirect_stderr=true
|
||||
stdout_logfile=/var/log/supervisor/buildbot-slave.log
|
||||
stderr_logfile=/var/log/supervisor/buildbot-slave.log
|
||||
|
||||
[group:buildbot]
|
||||
programs=buildmaster,buildworker
|
||||
46
buildbot/buildbot-cfg/master.cfg
Normal file
46
buildbot/buildbot-cfg/master.cfg
Normal file
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
from buildbot.buildslave import BuildSlave
|
||||
from buildbot.schedulers.forcesched import ForceScheduler
|
||||
from buildbot.config import BuilderConfig
|
||||
from buildbot.process.factory import BuildFactory
|
||||
from buildbot.steps.shell import ShellCommand
|
||||
from buildbot.status import html
|
||||
from buildbot.status.web import authz, auth
|
||||
|
||||
PORT_WEB = 8010 # Buildbot webserver port
|
||||
PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
|
||||
TEST_USER = 'buildbot' # Credential to authenticate build triggers
|
||||
TEST_PWD = 'docker' # Credential to authenticate build triggers
|
||||
BUILDER_NAME = 'docker'
|
||||
BUILDPASSWORD = 'pass-docker' # Credential to authenticate buildworkers
|
||||
DOCKER_PATH = '/data/docker'
|
||||
|
||||
|
||||
c = BuildmasterConfig = {}
|
||||
|
||||
c['title'] = "Docker"
|
||||
c['titleURL'] = "waterfall"
|
||||
c['buildbotURL'] = "http://localhost:{0}/".format(PORT_WEB)
|
||||
c['db'] = {'db_url':"sqlite:///state.sqlite"}
|
||||
c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)]
|
||||
c['slavePortnum'] = PORT_MASTER
|
||||
|
||||
c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
|
||||
|
||||
# Docker test command
|
||||
test_cmd = """(
|
||||
cd {0}/..; rm -rf docker-tmp; git clone docker docker-tmp;
|
||||
cd docker-tmp; make test; exit_status=$?;
|
||||
cd ..; rm -rf docker-tmp; exit $exit_status)""".format(DOCKER_PATH)
|
||||
|
||||
# Builder
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='Docker',logEnviron=False,
|
||||
usePTY=True,command=test_cmd))
|
||||
c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Status
|
||||
authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]),
|
||||
forceBuild='auth')
|
||||
c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]
|
||||
21
buildbot/buildbot-cfg/post-commit
Executable file
21
buildbot/buildbot-cfg/post-commit
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
'''Trigger buildbot docker test build
|
||||
|
||||
post-commit git hook designed to automatically trigger buildbot on
|
||||
the provided vagrant docker VM.'''
|
||||
|
||||
import requests
|
||||
|
||||
USERNAME = 'buildbot'
|
||||
PASSWORD = 'docker'
|
||||
BASE_URL = 'http://localhost:8010'
|
||||
path = lambda s: BASE_URL + '/' + s
|
||||
|
||||
try:
|
||||
session = requests.session()
|
||||
session.post(path('login'),data={'username':USERNAME,'passwd':PASSWORD})
|
||||
session.post(path('builders/docker/force'),
|
||||
data={'forcescheduler':'trigger','reason':'Test commit'})
|
||||
except:
|
||||
pass
|
||||
32
buildbot/buildbot.pp
Normal file
32
buildbot/buildbot.pp
Normal file
@@ -0,0 +1,32 @@
|
||||
node default {
|
||||
$USER = 'vagrant'
|
||||
$ROOT_PATH = '/data/buildbot'
|
||||
$DOCKER_PATH = '/data/docker'
|
||||
|
||||
exec {'apt_update': command => '/usr/bin/apt-get update' }
|
||||
Package { require => Exec['apt_update'] }
|
||||
group {'puppet': ensure => 'present'}
|
||||
|
||||
# Install dependencies
|
||||
Package { ensure => 'installed' }
|
||||
package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: }
|
||||
|
||||
file{[ '/data' ]:
|
||||
owner => $USER, group => $USER, ensure => 'directory' }
|
||||
|
||||
file {'/var/tmp/requirements.txt':
|
||||
content => template('requirements.txt') }
|
||||
|
||||
exec {'requirements':
|
||||
require => [ Package['python-dev'], Package['python-pip'],
|
||||
File['/var/tmp/requirements.txt'] ],
|
||||
cwd => '/var/tmp',
|
||||
command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt;
|
||||
rm /var/tmp/requirements.txt)'" }
|
||||
|
||||
exec {'buildbot-cfg-sh':
|
||||
require => [ Package['supervisor'], Exec['requirements']],
|
||||
path => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin',
|
||||
cwd => '/data',
|
||||
command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" }
|
||||
}
|
||||
6
buildbot/requirements.txt
Normal file
6
buildbot/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
sqlalchemy<=0.7.9
|
||||
sqlalchemy-migrate>=0.7.2
|
||||
buildbot==0.8.7p1
|
||||
buildbot_slave==0.8.7p1
|
||||
nose==1.2.1
|
||||
requests==1.1.0
|
||||
169
builder.go
Normal file
169
builder.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Builder struct {
|
||||
runtime *Runtime
|
||||
}
|
||||
|
||||
func NewBuilder(runtime *Runtime) *Builder {
|
||||
return &Builder{
|
||||
runtime: runtime,
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) Run(image *Image, cmd ...string) (*Container, error) {
|
||||
// FIXME: pass a NopWriter instead of nil
|
||||
config, err := ParseRun(append([]string{"-d", image.Id}, cmd...), nil, builder.runtime.capabilities)
|
||||
if config.Image == "" {
|
||||
return nil, fmt.Errorf("Image not specified")
|
||||
}
|
||||
if len(config.Cmd) == 0 {
|
||||
return nil, fmt.Errorf("Command not specified")
|
||||
}
|
||||
if config.Tty {
|
||||
return nil, fmt.Errorf("The tty mode is not supported within the builder")
|
||||
}
|
||||
|
||||
// Create new container
|
||||
container, err := builder.runtime.Create(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (builder *Builder) Commit(container *Container, repository, tag, comment, author string) (*Image, error) {
|
||||
return builder.runtime.Commit(container.Id, repository, tag, comment, author)
|
||||
}
|
||||
|
||||
func (builder *Builder) clearTmp(containers, images map[string]struct{}) {
|
||||
for c := range containers {
|
||||
tmp := builder.runtime.Get(c)
|
||||
builder.runtime.Destroy(tmp)
|
||||
Debugf("Removing container %s", c)
|
||||
}
|
||||
for i := range images {
|
||||
builder.runtime.graph.Delete(i)
|
||||
Debugf("Removing image %s", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) Build(dockerfile io.Reader, stdout io.Writer) error {
|
||||
var (
|
||||
image, base *Image
|
||||
tmpContainers map[string]struct{} = make(map[string]struct{})
|
||||
tmpImages map[string]struct{} = make(map[string]struct{})
|
||||
)
|
||||
defer builder.clearTmp(tmpContainers, tmpImages)
|
||||
|
||||
file := bufio.NewReader(dockerfile)
|
||||
for {
|
||||
line, err := file.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
// Skip comments and empty line
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
tmp := strings.SplitN(line, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return fmt.Errorf("Invalid Dockerfile format")
|
||||
}
|
||||
switch tmp[0] {
|
||||
case "from":
|
||||
fmt.Fprintf(stdout, "FROM %s\n", tmp[1])
|
||||
image, err = builder.runtime.repositories.LookupImage(tmp[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
case "run":
|
||||
fmt.Fprintf(stdout, "RUN %s\n", tmp[1])
|
||||
if image == nil {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
|
||||
// Create the container and start it
|
||||
c, err := builder.Run(image, "/bin/sh", "-c", tmp[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpContainers[c.Id] = struct{}{}
|
||||
|
||||
// Wait for it to finish
|
||||
if result := c.Wait(); result != 0 {
|
||||
return fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", tmp[1], result)
|
||||
}
|
||||
|
||||
// Commit the container
|
||||
base, err = builder.Commit(c, "", "", "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpImages[base.Id] = struct{}{}
|
||||
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
break
|
||||
case "copy":
|
||||
if image == nil {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to copy")
|
||||
}
|
||||
tmp2 := strings.SplitN(tmp[1], " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return fmt.Errorf("Invalid COPY format")
|
||||
}
|
||||
fmt.Fprintf(stdout, "COPY %s to %s in %s\n", tmp2[0], tmp2[1], base.ShortId())
|
||||
|
||||
file, err := Download(tmp2[0], stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
c, err := builder.Run(base, "echo", "insert", tmp2[0], tmp2[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Inject(file.Body, tmp2[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
base, err = builder.Commit(c, "", "", "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
break
|
||||
default:
|
||||
fmt.Fprintf(stdout, "Skipping unknown op %s\n", tmp[0])
|
||||
}
|
||||
}
|
||||
if base != nil {
|
||||
// The build is successful, keep the temporary containers and images
|
||||
for i := range tmpImages {
|
||||
delete(tmpImages, i)
|
||||
}
|
||||
for i := range tmpContainers {
|
||||
delete(tmpContainers, i)
|
||||
}
|
||||
fmt.Fprintf(stdout, "Build finished. image id: %s\n", base.ShortId())
|
||||
} else {
|
||||
fmt.Fprintf(stdout, "An error occured during the build\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package builtins
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
apiserver "github.com/docker/docker/api/server"
|
||||
"github.com/docker/docker/daemon/networkdriver/bridge"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/events"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
func Register(eng *engine.Engine) error {
|
||||
if err := daemon(eng); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := remote(eng); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := events.New().Install(eng); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := eng.Register("version", dockerVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
return registry.NewService().Install(eng)
|
||||
}
|
||||
|
||||
// remote: a RESTful api for cross-docker communication
|
||||
func remote(eng *engine.Engine) error {
|
||||
if err := eng.Register("serveapi", apiserver.ServeApi); err != nil {
|
||||
return err
|
||||
}
|
||||
return eng.Register("acceptconnections", apiserver.AcceptConnections)
|
||||
}
|
||||
|
||||
// daemon: a default execution and storage backend for Docker on Linux,
|
||||
// with the following underlying components:
|
||||
//
|
||||
// * Pluggable storage drivers including aufs, vfs, lvm and btrfs.
|
||||
// * Pluggable execution drivers including lxc and chroot.
|
||||
//
|
||||
// In practice `daemon` still includes most core Docker components, including:
|
||||
//
|
||||
// * The reference registry client implementation
|
||||
// * Image management
|
||||
// * The build facility
|
||||
// * Logging
|
||||
//
|
||||
// These components should be broken off into plugins of their own.
|
||||
//
|
||||
func daemon(eng *engine.Engine) error {
|
||||
return eng.Register("init_networkdriver", bridge.InitDriver)
|
||||
}
|
||||
|
||||
// builtins jobs independent of any subsystem
|
||||
func dockerVersion(job *engine.Job) engine.Status {
|
||||
v := &engine.Env{}
|
||||
v.SetJson("Version", dockerversion.VERSION)
|
||||
v.SetJson("ApiVersion", api.APIVERSION)
|
||||
v.Set("GitCommit", dockerversion.GITCOMMIT)
|
||||
v.Set("GoVersion", runtime.Version())
|
||||
v.Set("Os", runtime.GOOS)
|
||||
v.Set("Arch", runtime.GOARCH)
|
||||
if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
|
||||
v.Set("KernelVersion", kernelVersion.String())
|
||||
}
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
106
changes.go
Normal file
106
changes.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
ChangeModify = iota
|
||||
ChangeAdd
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
var kind string
|
||||
switch change.Kind {
|
||||
case ChangeModify:
|
||||
kind = "C"
|
||||
case ChangeAdd:
|
||||
kind = "A"
|
||||
case ChangeDelete:
|
||||
kind = "D"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", kind, change.Path)
|
||||
}
|
||||
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
var changes []Change
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path = filepath.Join("/", path)
|
||||
|
||||
// Skip root
|
||||
if path == "/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip AUFS metadata
|
||||
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
|
||||
return err
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
file := filepath.Base(path)
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(file, ".wh.") {
|
||||
originalFile := strings.TrimLeft(file, ".wh.")
|
||||
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && f.ModTime() == stat.ModTime() {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
1083
commands.go
Normal file
1083
commands.go
Normal file
File diff suppressed because it is too large
Load Diff
397
commands_test.go
Normal file
397
commands_test.go
Normal file
@@ -0,0 +1,397 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/rcli"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func closeWrap(args ...io.Closer) error {
|
||||
e := false
|
||||
ret := fmt.Errorf("Error closing elements")
|
||||
for _, c := range args {
|
||||
if err := c.Close(); err != nil {
|
||||
e = true
|
||||
ret = fmt.Errorf("%s\n%s", ret, err)
|
||||
}
|
||||
}
|
||||
if e {
|
||||
return ret
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
|
||||
c := make(chan bool)
|
||||
|
||||
// Make sure we are not too long
|
||||
go func() {
|
||||
time.Sleep(d)
|
||||
c <- true
|
||||
}()
|
||||
go func() {
|
||||
f()
|
||||
c <- false
|
||||
}()
|
||||
if <-c {
|
||||
t.Fatal(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error {
|
||||
for i := 0; i < count; i++ {
|
||||
if _, err := w.Write([]byte(input)); err != nil {
|
||||
return err
|
||||
}
|
||||
o, err := bufio.NewReader(r).ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.Trim(o, " \r\n") != output {
|
||||
return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", output, o)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdWait(srv *Server, container *Container) error {
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
||||
go func() {
|
||||
srv.CmdWait(nil, stdoutPipe, container.Id)
|
||||
}()
|
||||
|
||||
if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Cleanup pipes
|
||||
return closeWrap(stdout, stdoutPipe)
|
||||
}
|
||||
|
||||
// TestRunHostname checks that 'docker run -h' correctly sets a custom hostname
|
||||
func TestRunHostname(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
stdin, _ := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
if err := srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-h", "foobar", GetTestImage(runtime).Id, "hostname"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cmdOutput != "foobar\n" {
|
||||
t.Fatalf("'hostname' should display '%s', not '%s'", "foobar\n", cmdOutput)
|
||||
}
|
||||
|
||||
setTimeout(t, "CmdRun timed out", 2*time.Second, func() {
|
||||
<-c
|
||||
cmdWait(srv, srv.runtime.List()[0])
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestRunExit(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
c1 := make(chan struct{})
|
||||
go func() {
|
||||
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", GetTestImage(runtime).Id, "/bin/cat")
|
||||
close(c1)
|
||||
}()
|
||||
|
||||
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
|
||||
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
container := runtime.List()[0]
|
||||
|
||||
// Closing /bin/cat stdin, expect it to exit
|
||||
p, err := container.StdinPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := p.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// as the process exited, CmdRun must finish and unblock. Wait for it
|
||||
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
|
||||
<-c1
|
||||
cmdWait(srv, container)
|
||||
})
|
||||
|
||||
// Make sure that the client has been disconnected
|
||||
setTimeout(t, "The client should have been disconnected once the remote process exited.", 2*time.Second, func() {
|
||||
// Expecting pipe i/o error, just check that read does not block
|
||||
stdin.Read([]byte{})
|
||||
})
|
||||
|
||||
// Cleanup pipes
|
||||
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Expected behaviour: the process dies when the client disconnects
|
||||
func TestRunDisconnect(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
c1 := make(chan struct{})
|
||||
go func() {
|
||||
// We're simulating a disconnect so the return value doesn't matter. What matters is the
|
||||
// fact that CmdRun returns.
|
||||
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", GetTestImage(runtime).Id, "/bin/cat")
|
||||
close(c1)
|
||||
}()
|
||||
|
||||
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
|
||||
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Close pipes (simulate disconnect)
|
||||
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// as the pipes are close, we expect the process to die,
|
||||
// therefore CmdRun to unblock. Wait for CmdRun
|
||||
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
|
||||
<-c1
|
||||
})
|
||||
|
||||
// Client disconnect after run -i should cause stdin to be closed, which should
|
||||
// cause /bin/cat to exit.
|
||||
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
|
||||
container := runtime.List()[0]
|
||||
container.Wait()
|
||||
if container.State.Running {
|
||||
t.Fatalf("/bin/cat is still running after closing stdin")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Expected behaviour: the process dies when the client disconnects
|
||||
func TestRunDisconnectTty(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
c1 := make(chan struct{})
|
||||
go func() {
|
||||
// We're simulating a disconnect so the return value doesn't matter. What matters is the
|
||||
// fact that CmdRun returns.
|
||||
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", "-t", GetTestImage(runtime).Id, "/bin/cat")
|
||||
close(c1)
|
||||
}()
|
||||
|
||||
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
|
||||
for {
|
||||
// Client disconnect after run -i should keep stdin out in TTY mode
|
||||
l := runtime.List()
|
||||
if len(l) == 1 && l[0].State.Running {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
|
||||
// Client disconnect after run -i should keep stdin out in TTY mode
|
||||
container := runtime.List()[0]
|
||||
|
||||
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
|
||||
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Close pipes (simulate disconnect)
|
||||
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// In tty mode, we expect the process to stay alive even after client's stdin closes.
|
||||
// Do not wait for run to finish
|
||||
|
||||
// Give some time to monitor to do his thing
|
||||
container.WaitTimeout(500 * time.Millisecond)
|
||||
if !container.State.Running {
|
||||
t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAttachStdin checks attaching to stdin without stdout and stderr.
|
||||
// 'docker run -i -a stdin' should sends the client's stdin to the command,
|
||||
// then detach from it and print the container id.
|
||||
func TestRunAttachStdin(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
srv.CmdRun(stdin, rcli.NewDockerLocalConn(stdoutPipe), "-i", "-a", "stdin", GetTestImage(runtime).Id, "sh", "-c", "echo hello; cat")
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
// Send input to the command, close stdin
|
||||
setTimeout(t, "Write timed out", 2*time.Second, func() {
|
||||
if _, err := stdinPipe.Write([]byte("hi there\n")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := stdinPipe.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
container := runtime.List()[0]
|
||||
|
||||
// Check output
|
||||
cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cmdOutput != container.ShortId()+"\n" {
|
||||
t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ShortId()+"\n", cmdOutput)
|
||||
}
|
||||
|
||||
// wait for CmdRun to return
|
||||
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
|
||||
<-ch
|
||||
})
|
||||
|
||||
setTimeout(t, "Waiting for command to exit timed out", 2*time.Second, func() {
|
||||
container.Wait()
|
||||
})
|
||||
|
||||
// Check logs
|
||||
if cmdLogs, err := container.ReadLog("stdout"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if output, err := ioutil.ReadAll(cmdLogs); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
expectedLog := "hello\nhi there\n"
|
||||
if string(output) != expectedLog {
|
||||
t.Fatalf("Unexpected logs: should be '%s', not '%s'\n", expectedLog, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Expected behaviour, the process stays alive when the client disconnects
|
||||
func TestAttachDisconnect(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
srv := &Server{runtime: runtime}
|
||||
|
||||
container, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Memory: 33554432,
|
||||
Cmd: []string{"/bin/cat"},
|
||||
OpenStdin: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container)
|
||||
|
||||
// Start the process
|
||||
if err := container.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stdin, stdinPipe := io.Pipe()
|
||||
stdout, stdoutPipe := io.Pipe()
|
||||
|
||||
// Attach to it
|
||||
c1 := make(chan struct{})
|
||||
go func() {
|
||||
// We're simulating a disconnect so the return value doesn't matter. What matters is the
|
||||
// fact that CmdAttach returns.
|
||||
srv.CmdAttach(stdin, rcli.NewDockerLocalConn(stdoutPipe), container.Id)
|
||||
close(c1)
|
||||
}()
|
||||
|
||||
setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
|
||||
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
// Close pipes (client disconnects)
|
||||
if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for attach to finish, the client disconnected, therefore, Attach finished his job
|
||||
setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() {
|
||||
<-c1
|
||||
})
|
||||
|
||||
// We closed stdin, expect /bin/cat to still be running
|
||||
// Wait a little bit to make sure container.monitor() did his thing
|
||||
err = container.WaitTimeout(500 * time.Millisecond)
|
||||
if err == nil || !container.State.Running {
|
||||
t.Fatalf("/bin/cat is not running after closing stdin")
|
||||
}
|
||||
|
||||
// Try to avoid the timeoout in destroy. Best effort, don't check error
|
||||
cStdin, _ := container.StdinPipe()
|
||||
cStdin.Close()
|
||||
}
|
||||
816
container.go
Normal file
816
container.go
Normal file
@@ -0,0 +1,816 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/rcli"
|
||||
"github.com/kr/pty"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
root string
|
||||
|
||||
Id string
|
||||
|
||||
Created time.Time
|
||||
|
||||
Path string
|
||||
Args []string
|
||||
|
||||
Config *Config
|
||||
State State
|
||||
Image string
|
||||
|
||||
network *NetworkInterface
|
||||
NetworkSettings *NetworkSettings
|
||||
|
||||
SysInitPath string
|
||||
ResolvConfPath string
|
||||
|
||||
cmd *exec.Cmd
|
||||
stdout *writeBroadcaster
|
||||
stderr *writeBroadcaster
|
||||
stdin io.ReadCloser
|
||||
stdinPipe io.WriteCloser
|
||||
ptyMaster io.Closer
|
||||
|
||||
runtime *Runtime
|
||||
|
||||
waitLock chan struct{}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Hostname string
|
||||
User string
|
||||
Memory int64 // Memory limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
|
||||
AttachStdin bool
|
||||
AttachStdout bool
|
||||
AttachStderr bool
|
||||
PortSpecs []string
|
||||
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
|
||||
OpenStdin bool // Open stdin
|
||||
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
|
||||
Env []string
|
||||
Cmd []string
|
||||
Dns []string
|
||||
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
|
||||
}
|
||||
|
||||
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
|
||||
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
|
||||
if len(args) > 0 && args[0] != "--help" {
|
||||
cmd.SetOutput(ioutil.Discard)
|
||||
}
|
||||
|
||||
flHostname := cmd.String("h", "", "Container host name")
|
||||
flUser := cmd.String("u", "", "Username or UID")
|
||||
flDetach := cmd.Bool("d", false, "Detached mode: leave the container running in the background")
|
||||
flAttach := NewAttachOpts()
|
||||
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
|
||||
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
|
||||
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
|
||||
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
|
||||
|
||||
if *flMemory > 0 && !capabilities.MemoryLimit {
|
||||
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
*flMemory = 0
|
||||
}
|
||||
|
||||
var flPorts ListOpts
|
||||
cmd.Var(&flPorts, "p", "Expose a container's port to the host (use 'docker port' to see the actual mapping)")
|
||||
|
||||
var flEnv ListOpts
|
||||
cmd.Var(&flEnv, "e", "Set environment variables")
|
||||
|
||||
var flDns ListOpts
|
||||
cmd.Var(&flDns, "dns", "Set custom dns servers")
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if *flDetach && len(flAttach) > 0 {
|
||||
return nil, fmt.Errorf("Conflicting options: -a and -d")
|
||||
}
|
||||
// If neither -d or -a are set, attach to everything by default
|
||||
if len(flAttach) == 0 && !*flDetach {
|
||||
if !*flDetach {
|
||||
flAttach.Set("stdout")
|
||||
flAttach.Set("stderr")
|
||||
if *flStdin {
|
||||
flAttach.Set("stdin")
|
||||
}
|
||||
}
|
||||
}
|
||||
parsedArgs := cmd.Args()
|
||||
runCmd := []string{}
|
||||
image := ""
|
||||
if len(parsedArgs) >= 1 {
|
||||
image = cmd.Arg(0)
|
||||
}
|
||||
if len(parsedArgs) > 1 {
|
||||
runCmd = parsedArgs[1:]
|
||||
}
|
||||
config := &Config{
|
||||
Hostname: *flHostname,
|
||||
PortSpecs: flPorts,
|
||||
User: *flUser,
|
||||
Tty: *flTty,
|
||||
OpenStdin: *flStdin,
|
||||
Memory: *flMemory,
|
||||
AttachStdin: flAttach.Get("stdin"),
|
||||
AttachStdout: flAttach.Get("stdout"),
|
||||
AttachStderr: flAttach.Get("stderr"),
|
||||
Env: flEnv,
|
||||
Cmd: runCmd,
|
||||
Dns: flDns,
|
||||
Image: image,
|
||||
}
|
||||
|
||||
if *flMemory > 0 && !capabilities.SwapLimit {
|
||||
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
|
||||
// When allocating stdin in attached mode, close stdin at client disconnect
|
||||
if config.OpenStdin && config.AttachStdin {
|
||||
config.StdinOnce = true
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
type NetworkSettings struct {
|
||||
IpAddress string
|
||||
IpPrefixLen int
|
||||
Gateway string
|
||||
Bridge string
|
||||
PortMapping map[string]string
|
||||
}
|
||||
|
||||
// String returns a human-readable description of the port mapping defined in the settings
|
||||
func (settings *NetworkSettings) PortMappingHuman() string {
|
||||
var mapping []string
|
||||
for private, public := range settings.PortMapping {
|
||||
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
|
||||
}
|
||||
sort.Strings(mapping)
|
||||
return strings.Join(mapping, ", ")
|
||||
}
|
||||
|
||||
// Inject the io.Reader at the given path. Note: do not close the reader
|
||||
func (container *Container) Inject(file io.Reader, pth string) error {
|
||||
// Make sure the directory exists
|
||||
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: Handle permissions/already existing dest
|
||||
dest, err := os.Create(path.Join(container.rwPath(), pth))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(dest, file); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Cmd() *exec.Cmd {
|
||||
return container.cmd
|
||||
}
|
||||
|
||||
func (container *Container) When() time.Time {
|
||||
return container.Created
|
||||
}
|
||||
|
||||
func (container *Container) FromDisk() error {
|
||||
data, err := ioutil.ReadFile(container.jsonPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Load container settings
|
||||
if err := json.Unmarshal(data, container); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) ToDisk() (err error) {
|
||||
data, err := json.Marshal(container)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return ioutil.WriteFile(container.jsonPath(), data, 0666)
|
||||
}
|
||||
|
||||
func (container *Container) generateLXCConfig() error {
|
||||
fo, err := os.Create(container.lxcConfigPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fo.Close()
|
||||
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) startPty() error {
|
||||
ptyMaster, ptySlave, err := pty.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.ptyMaster = ptyMaster
|
||||
container.cmd.Stdout = ptySlave
|
||||
container.cmd.Stderr = ptySlave
|
||||
|
||||
// Copy the PTYs to our broadcasters
|
||||
go func() {
|
||||
defer container.stdout.CloseWriters()
|
||||
Debugf("[startPty] Begin of stdout pipe")
|
||||
io.Copy(container.stdout, ptyMaster)
|
||||
Debugf("[startPty] End of stdout pipe")
|
||||
}()
|
||||
|
||||
// stdin
|
||||
if container.Config.OpenStdin {
|
||||
container.cmd.Stdin = ptySlave
|
||||
container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}
|
||||
go func() {
|
||||
defer container.stdin.Close()
|
||||
Debugf("[startPty] Begin of stdin pipe")
|
||||
io.Copy(ptyMaster, container.stdin)
|
||||
Debugf("[startPty] End of stdin pipe")
|
||||
}()
|
||||
}
|
||||
if err := container.cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
ptySlave.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) start() error {
|
||||
container.cmd.Stdout = container.stdout
|
||||
container.cmd.Stderr = container.stderr
|
||||
if container.Config.OpenStdin {
|
||||
stdin, err := container.cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
defer stdin.Close()
|
||||
Debugf("Begin of stdin pipe [start]")
|
||||
io.Copy(stdin, container.stdin)
|
||||
Debugf("End of stdin pipe [start]")
|
||||
}()
|
||||
}
|
||||
return container.cmd.Start()
|
||||
}
|
||||
|
||||
func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
|
||||
var cStdout, cStderr io.ReadCloser
|
||||
|
||||
var nJobs int
|
||||
errors := make(chan error, 3)
|
||||
if stdin != nil && container.Config.OpenStdin {
|
||||
nJobs += 1
|
||||
if cStdin, err := container.StdinPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
go func() {
|
||||
Debugf("[start] attach stdin\n")
|
||||
defer Debugf("[end] attach stdin\n")
|
||||
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
|
||||
if cStdout != nil {
|
||||
defer cStdout.Close()
|
||||
}
|
||||
if cStderr != nil {
|
||||
defer cStderr.Close()
|
||||
}
|
||||
if container.Config.StdinOnce && !container.Config.Tty {
|
||||
defer cStdin.Close()
|
||||
}
|
||||
if container.Config.Tty {
|
||||
_, err = CopyEscapable(cStdin, stdin)
|
||||
} else {
|
||||
_, err = io.Copy(cStdin, stdin)
|
||||
}
|
||||
if err != nil {
|
||||
Debugf("[error] attach stdin: %s\n", err)
|
||||
}
|
||||
// Discard error, expecting pipe error
|
||||
errors <- nil
|
||||
}()
|
||||
}
|
||||
}
|
||||
if stdout != nil {
|
||||
nJobs += 1
|
||||
if p, err := container.StdoutPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
cStdout = p
|
||||
go func() {
|
||||
Debugf("[start] attach stdout\n")
|
||||
defer Debugf("[end] attach stdout\n")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce {
|
||||
if stdin != nil {
|
||||
defer stdin.Close()
|
||||
}
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
}
|
||||
_, err := io.Copy(stdout, cStdout)
|
||||
if err != nil {
|
||||
Debugf("[error] attach stdout: %s\n", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
}
|
||||
if stderr != nil {
|
||||
nJobs += 1
|
||||
if p, err := container.StderrPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
cStderr = p
|
||||
go func() {
|
||||
Debugf("[start] attach stderr\n")
|
||||
defer Debugf("[end] attach stderr\n")
|
||||
// If we are in StdinOnce mode, then close stdin
|
||||
if container.Config.StdinOnce {
|
||||
if stdin != nil {
|
||||
defer stdin.Close()
|
||||
}
|
||||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
}
|
||||
_, err := io.Copy(stderr, cStderr)
|
||||
if err != nil {
|
||||
Debugf("[error] attach stderr: %s\n", err)
|
||||
}
|
||||
errors <- err
|
||||
}()
|
||||
}
|
||||
}
|
||||
return Go(func() error {
|
||||
if cStdout != nil {
|
||||
defer cStdout.Close()
|
||||
}
|
||||
if cStderr != nil {
|
||||
defer cStderr.Close()
|
||||
}
|
||||
// FIXME: how do clean up the stdin goroutine without the unwanted side effect
|
||||
// of closing the passed stdin? Add an intermediary io.Pipe?
|
||||
for i := 0; i < nJobs; i += 1 {
|
||||
Debugf("Waiting for job %d/%d\n", i+1, nJobs)
|
||||
if err := <-errors; err != nil {
|
||||
Debugf("Job %d returned error %s. Aborting all jobs\n", i+1, err)
|
||||
return err
|
||||
}
|
||||
Debugf("Job %d completed successfully\n", i+1)
|
||||
}
|
||||
Debugf("All jobs completed successfully\n")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (container *Container) Start() error {
|
||||
container.State.lock()
|
||||
defer container.State.unlock()
|
||||
|
||||
if container.State.Running {
|
||||
return fmt.Errorf("The container %s is already running.", container.Id)
|
||||
}
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := container.allocateNetwork(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure the config is compatible with the current kernel
|
||||
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
|
||||
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
container.Config.Memory = 0
|
||||
}
|
||||
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
|
||||
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
container.Config.MemorySwap = -1
|
||||
}
|
||||
|
||||
if err := container.generateLXCConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
params := []string{
|
||||
"-n", container.Id,
|
||||
"-f", container.lxcConfigPath(),
|
||||
"--",
|
||||
"/sbin/init",
|
||||
}
|
||||
|
||||
// Networking
|
||||
params = append(params, "-g", container.network.Gateway.String())
|
||||
|
||||
// User
|
||||
if container.Config.User != "" {
|
||||
params = append(params, "-u", container.Config.User)
|
||||
}
|
||||
|
||||
if container.Config.Tty {
|
||||
params = append(params, "-e", "TERM=xterm")
|
||||
}
|
||||
|
||||
// Setup environment
|
||||
params = append(params,
|
||||
"-e", "HOME=/",
|
||||
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
)
|
||||
|
||||
for _, elem := range container.Config.Env {
|
||||
params = append(params, "-e", elem)
|
||||
}
|
||||
|
||||
// Program
|
||||
params = append(params, "--", container.Path)
|
||||
params = append(params, container.Args...)
|
||||
|
||||
container.cmd = exec.Command("lxc-start", params...)
|
||||
|
||||
// Setup logging of stdout and stderr to disk
|
||||
if err := container.runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := container.runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
if container.Config.Tty {
|
||||
err = container.startPty()
|
||||
} else {
|
||||
err = container.start()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: save state on disk *first*, then converge
|
||||
// this way disk state is used as a journal, eg. we can restore after crash etc.
|
||||
container.State.setRunning(container.cmd.Process.Pid)
|
||||
|
||||
// Init the lock
|
||||
container.waitLock = make(chan struct{})
|
||||
container.ToDisk()
|
||||
go container.monitor()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Run() error {
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Output() (output []byte, err error) {
|
||||
pipe, err := container.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer pipe.Close()
|
||||
if err := container.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
output, err = ioutil.ReadAll(pipe)
|
||||
container.Wait()
|
||||
return output, err
|
||||
}
|
||||
|
||||
// StdinPipe() returns a pipe connected to the standard input of the container's
|
||||
// active process.
|
||||
//
|
||||
func (container *Container) StdinPipe() (io.WriteCloser, error) {
|
||||
return container.stdinPipe, nil
|
||||
}
|
||||
|
||||
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
container.stdout.AddWriter(writer)
|
||||
return newBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) StderrPipe() (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
container.stderr.AddWriter(writer)
|
||||
return newBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) allocateNetwork() error {
|
||||
iface, err := container.runtime.networkManager.Allocate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.NetworkSettings.PortMapping = make(map[string]string)
|
||||
for _, spec := range container.Config.PortSpecs {
|
||||
if nat, err := iface.AllocatePort(spec); err != nil {
|
||||
iface.Release()
|
||||
return err
|
||||
} else {
|
||||
container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
|
||||
}
|
||||
}
|
||||
container.network = iface
|
||||
container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
|
||||
container.NetworkSettings.IpAddress = iface.IPNet.IP.String()
|
||||
container.NetworkSettings.IpPrefixLen, _ = iface.IPNet.Mask.Size()
|
||||
container.NetworkSettings.Gateway = iface.Gateway.String()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) releaseNetwork() {
|
||||
container.network.Release()
|
||||
container.network = nil
|
||||
container.NetworkSettings = &NetworkSettings{}
|
||||
}
|
||||
|
||||
// FIXME: replace this with a control socket within docker-init
|
||||
func (container *Container) waitLxc() error {
|
||||
for {
|
||||
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if !strings.Contains(string(output), "RUNNING") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) monitor() {
|
||||
// Wait for the program to exit
|
||||
Debugf("Waiting for process")
|
||||
|
||||
// If the command does not exists, try to wait via lxc
|
||||
if container.cmd == nil {
|
||||
if err := container.waitLxc(); err != nil {
|
||||
Debugf("%s: Process: %s", container.Id, err)
|
||||
}
|
||||
} else {
|
||||
if err := container.cmd.Wait(); err != nil {
|
||||
// Discard the error as any signals or non 0 returns will generate an error
|
||||
Debugf("%s: Process: %s", container.Id, err)
|
||||
}
|
||||
}
|
||||
Debugf("Process finished")
|
||||
|
||||
var exitCode int = -1
|
||||
if container.cmd != nil {
|
||||
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
container.releaseNetwork()
|
||||
if container.Config.OpenStdin {
|
||||
if err := container.stdin.Close(); err != nil {
|
||||
Debugf("%s: Error close stdin: %s", container.Id, err)
|
||||
}
|
||||
}
|
||||
if err := container.stdout.CloseWriters(); err != nil {
|
||||
Debugf("%s: Error close stdout: %s", container.Id, err)
|
||||
}
|
||||
if err := container.stderr.CloseWriters(); err != nil {
|
||||
Debugf("%s: Error close stderr: %s", container.Id, err)
|
||||
}
|
||||
|
||||
if container.ptyMaster != nil {
|
||||
if err := container.ptyMaster.Close(); err != nil {
|
||||
Debugf("%s: Error closing Pty master: %s", container.Id, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := container.Unmount(); err != nil {
|
||||
log.Printf("%v: Failed to umount filesystem: %v", container.Id, err)
|
||||
}
|
||||
|
||||
// Re-create a brand new stdin pipe once the container exited
|
||||
if container.Config.OpenStdin {
|
||||
container.stdin, container.stdinPipe = io.Pipe()
|
||||
}
|
||||
|
||||
// Report status back
|
||||
container.State.setStopped(exitCode)
|
||||
|
||||
// Release the lock
|
||||
close(container.waitLock)
|
||||
|
||||
if err := container.ToDisk(); err != nil {
|
||||
// FIXME: there is a race condition here which causes this to fail during the unit tests.
|
||||
// If another goroutine was waiting for Wait() to return before removing the container's root
|
||||
// from the filesystem... At this point it may already have done so.
|
||||
// This is because State.setStopped() has already been called, and has caused Wait()
|
||||
// to return.
|
||||
// FIXME: why are we serializing running state to disk in the first place?
|
||||
//log.Printf("%s: Failed to dump configuration to the disk: %s", container.Id, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (container *Container) kill() error {
|
||||
if !container.State.Running {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sending SIGKILL to the process via lxc
|
||||
output, err := exec.Command("lxc-kill", "-n", container.Id, "9").CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("error killing container %s (%s, %s)", container.Id, output, err)
|
||||
}
|
||||
|
||||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if err := container.WaitTimeout(10 * time.Second); err != nil {
|
||||
if container.cmd == nil {
|
||||
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
|
||||
}
|
||||
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
|
||||
if err := container.cmd.Process.Kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the container to be actually stopped
|
||||
container.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Kill() error {
|
||||
container.State.lock()
|
||||
defer container.State.unlock()
|
||||
if !container.State.Running {
|
||||
return nil
|
||||
}
|
||||
return container.kill()
|
||||
}
|
||||
|
||||
func (container *Container) Stop(seconds int) error {
|
||||
container.State.lock()
|
||||
defer container.State.unlock()
|
||||
if !container.State.Running {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 1. Send a SIGTERM
|
||||
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
|
||||
log.Print(string(output))
|
||||
log.Print("Failed to send SIGTERM to the process, force killing")
|
||||
if err := container.kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Wait for the process to exit on its own
|
||||
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
|
||||
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
|
||||
if err := container.kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Restart(seconds int) error {
|
||||
if err := container.Stop(seconds); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait blocks until the container stops running, then returns its exit code.
|
||||
func (container *Container) Wait() int {
|
||||
<-container.waitLock
|
||||
return container.State.ExitCode
|
||||
}
|
||||
|
||||
func (container *Container) ExportRw() (Archive, error) {
|
||||
return Tar(container.rwPath(), Uncompressed)
|
||||
}
|
||||
|
||||
func (container *Container) Export() (Archive, error) {
|
||||
if err := container.EnsureMounted(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Tar(container.RootfsPath(), Uncompressed)
|
||||
}
|
||||
|
||||
func (container *Container) WaitTimeout(timeout time.Duration) error {
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
container.Wait()
|
||||
done <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
return fmt.Errorf("Timed Out")
|
||||
case <-done:
|
||||
return nil
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (container *Container) EnsureMounted() error {
|
||||
if mounted, err := container.Mounted(); err != nil {
|
||||
return err
|
||||
} else if mounted {
|
||||
return nil
|
||||
}
|
||||
return container.Mount()
|
||||
}
|
||||
|
||||
func (container *Container) Mount() error {
|
||||
image, err := container.GetImage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return image.Mount(container.RootfsPath(), container.rwPath())
|
||||
}
|
||||
|
||||
func (container *Container) Changes() ([]Change, error) {
|
||||
image, err := container.GetImage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return image.Changes(container.rwPath())
|
||||
}
|
||||
|
||||
func (container *Container) GetImage() (*Image, error) {
|
||||
if container.runtime == nil {
|
||||
return nil, fmt.Errorf("Can't get image of unregistered container")
|
||||
}
|
||||
return container.runtime.graph.Get(container.Image)
|
||||
}
|
||||
|
||||
func (container *Container) Mounted() (bool, error) {
|
||||
return Mounted(container.RootfsPath())
|
||||
}
|
||||
|
||||
func (container *Container) Unmount() error {
|
||||
return Unmount(container.RootfsPath())
|
||||
}
|
||||
|
||||
// ShortId returns a shorthand version of the container's id for convenience.
|
||||
// A collision with other container shorthands is very unlikely, but possible.
|
||||
// In case of a collision a lookup with Runtime.Get() will fail, and the caller
|
||||
// will need to use a langer prefix, or the full-length container Id.
|
||||
func (container *Container) ShortId() string {
|
||||
return TruncateId(container.Id)
|
||||
}
|
||||
|
||||
func (container *Container) logPath(name string) string {
|
||||
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.Id, name))
|
||||
}
|
||||
|
||||
func (container *Container) ReadLog(name string) (io.Reader, error) {
|
||||
return os.Open(container.logPath(name))
|
||||
}
|
||||
|
||||
func (container *Container) jsonPath() string {
|
||||
return path.Join(container.root, "config.json")
|
||||
}
|
||||
|
||||
func (container *Container) lxcConfigPath() string {
|
||||
return path.Join(container.root, "config.lxc")
|
||||
}
|
||||
|
||||
// This method must be exported to be used from the lxc template
|
||||
func (container *Container) RootfsPath() string {
|
||||
return path.Join(container.root, "rootfs")
|
||||
}
|
||||
|
||||
func (container *Container) rwPath() string {
|
||||
return path.Join(container.root, "rw")
|
||||
}
|
||||
|
||||
func validateId(id string) error {
|
||||
if id == "" {
|
||||
return fmt.Errorf("Invalid empty id")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
1084
container_test.go
Normal file
1084
container_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1 +0,0 @@
|
||||
Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
@@ -1,170 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# bits of this were adapted from lxc-checkconfig
|
||||
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
|
||||
|
||||
possibleConfigs=(
|
||||
'/proc/config.gz'
|
||||
"/boot/config-$(uname -r)"
|
||||
"/usr/src/linux-$(uname -r)/.config"
|
||||
'/usr/src/linux/.config'
|
||||
)
|
||||
: ${CONFIG:="${possibleConfigs[0]}"}
|
||||
|
||||
if ! command -v zgrep &> /dev/null; then
|
||||
zgrep() {
|
||||
zcat "$2" | grep "$1"
|
||||
}
|
||||
fi
|
||||
|
||||
is_set() {
|
||||
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
|
||||
}
|
||||
|
||||
# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
|
||||
declare -A colors=(
|
||||
[black]=30
|
||||
[red]=31
|
||||
[green]=32
|
||||
[yellow]=33
|
||||
[blue]=34
|
||||
[magenta]=35
|
||||
[cyan]=36
|
||||
[white]=37
|
||||
)
|
||||
color() {
|
||||
color=()
|
||||
if [ "$1" = 'bold' ]; then
|
||||
color+=( '1' )
|
||||
shift
|
||||
fi
|
||||
if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then
|
||||
color+=( "${colors[$1]}" )
|
||||
fi
|
||||
local IFS=';'
|
||||
echo -en '\033['"${color[*]}"m
|
||||
}
|
||||
wrap_color() {
|
||||
text="$1"
|
||||
shift
|
||||
color "$@"
|
||||
echo -n "$text"
|
||||
color reset
|
||||
echo
|
||||
}
|
||||
|
||||
wrap_good() {
|
||||
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
|
||||
}
|
||||
wrap_bad() {
|
||||
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
|
||||
}
|
||||
wrap_warning() {
|
||||
wrap_color >&2 "$*" red
|
||||
}
|
||||
|
||||
check_flag() {
|
||||
if is_set "$1"; then
|
||||
wrap_good "CONFIG_$1" 'enabled'
|
||||
else
|
||||
wrap_bad "CONFIG_$1" 'missing'
|
||||
fi
|
||||
}
|
||||
|
||||
check_flags() {
|
||||
for flag in "$@"; do
|
||||
echo "- $(check_flag "$flag")"
|
||||
done
|
||||
}
|
||||
|
||||
if [ ! -e "$CONFIG" ]; then
|
||||
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
|
||||
for tryConfig in "${possibleConfigs[@]}"; do
|
||||
if [ -e "$tryConfig" ]; then
|
||||
CONFIG="$tryConfig"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ ! -e "$CONFIG" ]; then
|
||||
wrap_warning "error: cannot find kernel config"
|
||||
wrap_warning " try running this script again, specifying the kernel config:"
|
||||
wrap_warning " CONFIG=/path/to/kernel/.config $0"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
wrap_color "info: reading kernel config from $CONFIG ..." white
|
||||
echo
|
||||
|
||||
echo 'Generally Necessary:'
|
||||
|
||||
echo -n '- '
|
||||
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
|
||||
cgroupDir="$(dirname "$cgroupSubsystemDir")"
|
||||
if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then
|
||||
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
|
||||
else
|
||||
if [ "$cgroupSubsystemDir" ]; then
|
||||
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
|
||||
else
|
||||
echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')"
|
||||
fi
|
||||
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
|
||||
fi
|
||||
|
||||
if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
|
||||
echo -n '- '
|
||||
if command -v apparmor_parser &> /dev/null; then
|
||||
echo "$(wrap_good 'apparmor' 'enabled and tools installed')"
|
||||
else
|
||||
echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')"
|
||||
echo -n ' '
|
||||
if command -v apt-get &> /dev/null; then
|
||||
echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')"
|
||||
elif command -v yum &> /dev/null; then
|
||||
echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')"
|
||||
else
|
||||
echo "$(wrap_color '(look for an "apparmor" package for your distribution)')"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
flags=(
|
||||
NAMESPACES {NET,PID,IPC,UTS}_NS
|
||||
DEVPTS_MULTIPLE_INSTANCES
|
||||
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED
|
||||
MACVLAN VETH BRIDGE
|
||||
NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE
|
||||
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
|
||||
NF_NAT NF_NAT_NEEDED
|
||||
)
|
||||
check_flags "${flags[@]}"
|
||||
echo
|
||||
|
||||
echo 'Optional Features:'
|
||||
flags=(
|
||||
MEMCG_SWAP
|
||||
RESOURCE_COUNTERS
|
||||
)
|
||||
check_flags "${flags[@]}"
|
||||
|
||||
echo '- Storage Drivers:'
|
||||
{
|
||||
echo '- "'$(wrap_color 'aufs' blue)'":'
|
||||
check_flags AUFS_FS | sed 's/^/ /'
|
||||
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
|
||||
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
|
||||
fi
|
||||
|
||||
echo '- "'$(wrap_color 'btrfs' blue)'":'
|
||||
check_flags BTRFS_FS | sed 's/^/ /'
|
||||
|
||||
echo '- "'$(wrap_color 'devicemapper' blue)'":'
|
||||
check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS | sed 's/^/ /'
|
||||
} | sed 's/^/ /'
|
||||
echo
|
||||
|
||||
#echo 'Potential Future Features:'
|
||||
#check_flags USER_NS
|
||||
#echo
|
||||
@@ -1,721 +0,0 @@
|
||||
#!bash
|
||||
#
|
||||
# bash completion file for core docker commands
|
||||
#
|
||||
# This script provides supports completion of:
|
||||
# - commands and their options
|
||||
# - container ids and names
|
||||
# - image repos and tags
|
||||
# - filepaths
|
||||
#
|
||||
# To enable the completions either:
|
||||
# - place this file in /etc/bash_completion.d
|
||||
# or
|
||||
# - copy this file and add the line below to your .bashrc after
|
||||
# bash completion features are loaded
|
||||
# . docker.bash
|
||||
#
|
||||
# Note:
|
||||
# Currently, the completions will not work if the docker daemon is not
|
||||
# bound to the default communication port/socket
|
||||
# If the docker daemon is using a unix socket for communication your user
|
||||
# must have access to the socket for the completions to function correctly
|
||||
|
||||
__docker_q() {
|
||||
docker 2>/dev/null "$@"
|
||||
}
|
||||
|
||||
__docker_containers_all()
|
||||
{
|
||||
local containers="$( __docker_q ps -a -q )"
|
||||
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_containers_running()
|
||||
{
|
||||
local containers="$( __docker_q ps -q )"
|
||||
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_containers_stopped()
|
||||
{
|
||||
local containers="$( { __docker_q ps -a -q; __docker_q ps -q; } | sort | uniq -u )"
|
||||
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_image_repos()
|
||||
{
|
||||
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
|
||||
COMPREPLY=( $( compgen -W "$repos" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_image_repos_and_tags()
|
||||
{
|
||||
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
|
||||
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
|
||||
COMPREPLY=( $( compgen -W "$repos $images" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
__docker_image_repos_and_tags_and_ids()
|
||||
{
|
||||
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
|
||||
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
|
||||
local ids="$( __docker_q images -a -q )"
|
||||
COMPREPLY=( $( compgen -W "$repos $images $ids" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
__docker_containers_and_images()
|
||||
{
|
||||
local containers="$( __docker_q ps -a -q )"
|
||||
local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^<none>$' )"
|
||||
local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^<none>:' )"
|
||||
local ids="$( __docker_q images -a -q )"
|
||||
COMPREPLY=( $( compgen -W "$containers $names $repos $images $ids" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
__docker_pos_first_nonflag()
|
||||
{
|
||||
local argument_flags=$1
|
||||
|
||||
local counter=$cpos
|
||||
while [ $counter -le $cword ]; do
|
||||
if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then
|
||||
(( counter++ ))
|
||||
else
|
||||
case "${words[$counter]}" in
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
echo $counter
|
||||
}
|
||||
|
||||
_docker_docker()
|
||||
{
|
||||
case "$prev" in
|
||||
-H)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-H" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "$commands help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_attach()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--no-stdin --sig-proxy" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter="$(__docker_pos_first_nonflag)"
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_running
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_build()
|
||||
{
|
||||
case "$prev" in
|
||||
-t|--tag)
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter="$(__docker_pos_first_nonflag '-t|--tag')"
|
||||
if [ $cword -eq $counter ]; then
|
||||
_filedir -d
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_commit()
|
||||
{
|
||||
case "$prev" in
|
||||
-m|--message|-a|--author|--run)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-m --message -a --author --run" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '-m|--message|-a|--author|--run')
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
return
|
||||
fi
|
||||
(( counter++ ))
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_cp()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
case "$cur" in
|
||||
*:)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
__docker_containers_all
|
||||
COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
|
||||
compopt -o nospace
|
||||
return
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
(( counter++ ))
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
_filedir
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_diff()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_events()
|
||||
{
|
||||
case "$prev" in
|
||||
--since)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--since" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_export()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_help()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
COMPREPLY=( $( compgen -W "$commands" -- "$cur" ) )
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_history()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-q --quiet --no-trunc" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_images()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-q --quiet -a --all --no-trunc -v --viz -t --tree" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_import()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
return
|
||||
fi
|
||||
(( counter++ ))
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_info()
|
||||
{
|
||||
return
|
||||
}
|
||||
|
||||
_docker_inspect()
|
||||
{
|
||||
case "$prev" in
|
||||
-f|--format)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-f --format" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_and_images
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_kill()
|
||||
{
|
||||
__docker_containers_running
|
||||
}
|
||||
|
||||
_docker_load()
|
||||
{
|
||||
return
|
||||
}
|
||||
|
||||
_docker_login()
|
||||
{
|
||||
case "$prev" in
|
||||
-u|--username|-p|--password|-e|--email)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_logs()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-f --follow" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_port()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_all
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_ps()
|
||||
{
|
||||
case "$prev" in
|
||||
--since|--before)
|
||||
__docker_containers_all
|
||||
;;
|
||||
-n)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_pull()
|
||||
{
|
||||
case "$prev" in
|
||||
-t|--tag)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t --tag" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '-t|--tag')
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_push()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_restart()
|
||||
{
|
||||
case "$prev" in
|
||||
-t|--time)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_all
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_rm()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-f --force -l --link -v --volumes" -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
*)
|
||||
local force=
|
||||
for arg in "${COMP_WORDS[@]}"; do
|
||||
case "$arg" in
|
||||
-f|--force)
|
||||
__docker_containers_all
|
||||
return
|
||||
;;
|
||||
esac
|
||||
done
|
||||
__docker_containers_stopped
|
||||
return
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_rmi()
|
||||
{
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
}
|
||||
|
||||
_docker_run()
|
||||
{
|
||||
case "$prev" in
|
||||
-a|--attach)
|
||||
COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
--cidfile|--env-file)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
--volumes-from)
|
||||
__docker_containers_all
|
||||
return
|
||||
;;
|
||||
-v|--volume)
|
||||
case "$cur" in
|
||||
*:*)
|
||||
# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
|
||||
;;
|
||||
'')
|
||||
COMPREPLY=( $( compgen -W '/' -- "$cur" ) )
|
||||
compopt -o nospace
|
||||
;;
|
||||
/*)
|
||||
_filedir
|
||||
compopt -o nospace
|
||||
;;
|
||||
esac
|
||||
return
|
||||
;;
|
||||
-e|--env)
|
||||
COMPREPLY=( $( compgen -e -- "$cur" ) )
|
||||
compopt -o nospace
|
||||
return
|
||||
;;
|
||||
--link)
|
||||
case "$cur" in
|
||||
*:*)
|
||||
;;
|
||||
*)
|
||||
__docker_containers_running
|
||||
COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
|
||||
compopt -o nospace
|
||||
;;
|
||||
esac
|
||||
return
|
||||
;;
|
||||
--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf')
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_save()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_search()
|
||||
{
|
||||
case "$prev" in
|
||||
-s|--stars)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_start()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-a --attach -i --interactive" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_stopped
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_stop()
|
||||
{
|
||||
case "$prev" in
|
||||
-t|--time)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_running
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_tag()
|
||||
{
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "-f --force" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
fi
|
||||
(( counter++ ))
|
||||
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_top()
|
||||
{
|
||||
local counter=$(__docker_pos_first_nonflag)
|
||||
if [ $cword -eq $counter ]; then
|
||||
__docker_containers_running
|
||||
fi
|
||||
}
|
||||
|
||||
_docker_version()
|
||||
{
|
||||
return
|
||||
}
|
||||
|
||||
_docker_wait()
|
||||
{
|
||||
__docker_containers_all
|
||||
}
|
||||
|
||||
_docker()
|
||||
{
|
||||
local commands="
|
||||
attach
|
||||
build
|
||||
commit
|
||||
cp
|
||||
diff
|
||||
events
|
||||
export
|
||||
history
|
||||
images
|
||||
import
|
||||
info
|
||||
insert
|
||||
inspect
|
||||
kill
|
||||
load
|
||||
login
|
||||
logs
|
||||
port
|
||||
ps
|
||||
pull
|
||||
push
|
||||
restart
|
||||
rm
|
||||
rmi
|
||||
run
|
||||
save
|
||||
search
|
||||
start
|
||||
stop
|
||||
tag
|
||||
top
|
||||
version
|
||||
wait
|
||||
"
|
||||
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
local command='docker'
|
||||
local counter=1
|
||||
while [ $counter -lt $cword ]; do
|
||||
case "${words[$counter]}" in
|
||||
-H)
|
||||
(( counter++ ))
|
||||
;;
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
command="${words[$counter]}"
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_docker_${command}
|
||||
declare -F $completions_func >/dev/null && $completions_func
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _docker docker
|
||||
@@ -1,257 +0,0 @@
|
||||
# docker.fish - docker completions for fish shell
|
||||
#
|
||||
# This file is generated by gen_docker_fish_completions.py from:
|
||||
# https://github.com/barnybug/docker-fish-completion
|
||||
#
|
||||
# To install the completions:
|
||||
# mkdir -p ~/.config/fish/completions
|
||||
# cp docker.fish ~/.config/fish/completions
|
||||
#
|
||||
# Completion supported:
|
||||
# - parameters
|
||||
# - commands
|
||||
# - containers
|
||||
# - images
|
||||
# - repositories
|
||||
|
||||
function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
|
||||
for i in (commandline -opc)
|
||||
if contains -- $i attach build commit cp diff events export history images import info insert inspect kill load login logs port ps pull push restart rm rmi run save search start stop tag top version wait
|
||||
return 1
|
||||
end
|
||||
end
|
||||
return 0
|
||||
end
|
||||
|
||||
function __fish_print_docker_containers --description 'Print a list of docker containers' -a select
|
||||
switch $select
|
||||
case running
|
||||
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
case stopped
|
||||
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
case all
|
||||
docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n'
|
||||
end
|
||||
end
|
||||
|
||||
function __fish_print_docker_images --description 'Print a list of docker images'
|
||||
docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1":"$2}'
|
||||
end
|
||||
|
||||
function __fish_print_docker_repositories --description 'Print a list of docker repositories'
|
||||
docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1}' | command sort | command uniq
|
||||
end
|
||||
|
||||
# common options
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group"
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking"
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules"
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver'
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit'
|
||||
|
||||
# subcommands
|
||||
# attach
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach stdin'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container"
|
||||
|
||||
# build
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success'
|
||||
|
||||
# commit
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith <hannibal@a-team.com>"'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
|
||||
|
||||
# cp
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path"
|
||||
|
||||
# diff
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container"
|
||||
|
||||
# events
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show previously created events and then stream.'
|
||||
|
||||
# export
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container"
|
||||
|
||||
# history
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image"
|
||||
|
||||
# images
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'Output graph in graphviz format'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository"
|
||||
|
||||
# import
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball'
|
||||
|
||||
# info
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information'
|
||||
|
||||
# inspect
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container"
|
||||
|
||||
# kill
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container"
|
||||
|
||||
# load
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive'
|
||||
|
||||
# login
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username'
|
||||
|
||||
# logs
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container"
|
||||
|
||||
# port
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port which is NAT-ed to PRIVATE_PORT'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container"
|
||||
|
||||
# ps
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.'
|
||||
|
||||
# pull
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s t -l tag -d 'Download tagged image in repository'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository"
|
||||
|
||||
# push
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to the docker registry server'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository"
|
||||
|
||||
# restart
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a running container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container"
|
||||
|
||||
# rm
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force removal of running container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container"
|
||||
|
||||
# rmi
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image"
|
||||
|
||||
# run
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to the host interfaces'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to stdin, stdout or stderr.'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: Run container in the background, print new container id'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom dns servers'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default entrypoint of the image'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: <number><optional unit>, where unit = b, k, m or g)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image"
|
||||
|
||||
# save
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image"
|
||||
|
||||
# search
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image in the docker index'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least xxx stars'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds'
|
||||
|
||||
# start
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's stdout/stderr and forward all signals to the process"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's stdin"
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container"
|
||||
|
||||
# stop
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a running container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it.'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container"
|
||||
|
||||
# tag
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -a '(__fish_print_docker_images)' -d "Image"
|
||||
|
||||
# top
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container"
|
||||
|
||||
# version
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the docker version information'
|
||||
|
||||
# wait
|
||||
complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code'
|
||||
complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container"
|
||||
|
||||
|
||||
@@ -1,410 +0,0 @@
|
||||
#compdef docker
|
||||
#
|
||||
# zsh completion for docker (http://docker.com)
|
||||
#
|
||||
# version: 0.3.0
|
||||
# github: https://github.com/felixr/docker-zsh-completion
|
||||
#
|
||||
# contributers:
|
||||
# - Felix Riedel
|
||||
# - Vincent Bernat
|
||||
#
|
||||
# license:
|
||||
#
|
||||
# Copyright (c) 2013, Felix Riedel
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# * Neither the name of the <organization> nor the
|
||||
# names of its contributors may be used to endorse or promote products
|
||||
# derived from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
|
||||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
__parse_docker_list() {
|
||||
awk '
|
||||
NR == 1 {
|
||||
idx=1;i=0;f[i]=0
|
||||
header=$0
|
||||
while ( match(header, / ([A-Z]+|[A-Z]+ [A-Z]+)/) ) {
|
||||
idx += RSTART+1
|
||||
f[++i]=idx
|
||||
header = substr($0,idx)
|
||||
}
|
||||
f[++i]=999
|
||||
}
|
||||
|
||||
NR > 1 '"$1"' {
|
||||
for(j=0;j<i;j++) {
|
||||
x[j] = substr($0, f[j], f[j+1]-f[j]-1)
|
||||
gsub(/[ ]+$/, "", x[j])
|
||||
}
|
||||
printf("%s:%7s, %s\n", x[0], x[3], x[1])
|
||||
if (x[6] != "") {
|
||||
split(x[6], names, /,/)
|
||||
for (name in names) printf("%s:%7s, %s\n", names[name], x[3], x[1])
|
||||
}
|
||||
}
|
||||
'| sed -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/'
|
||||
}
|
||||
|
||||
__docker_stoppedcontainers() {
|
||||
local expl
|
||||
declare -a stoppedcontainers
|
||||
stoppedcontainers=(${(f)"$(_call_program commands docker ps -a | __parse_docker_list '&& / Exit/')"})
|
||||
_describe -t containers-stopped "Stopped Containers" stoppedcontainers "$@"
|
||||
}
|
||||
|
||||
__docker_runningcontainers() {
|
||||
local expl
|
||||
declare -a containers
|
||||
|
||||
containers=(${(f)"$(_call_program commands docker ps | __parse_docker_list)"})
|
||||
_describe -t containers-active "Running Containers" containers "$@"
|
||||
}
|
||||
|
||||
__docker_containers () {
|
||||
__docker_stoppedcontainers "$@"
|
||||
__docker_runningcontainers "$@"
|
||||
}
|
||||
|
||||
__docker_images () {
|
||||
local expl
|
||||
declare -a images
|
||||
images=(${(f)"$(_call_program commands docker images | awk '(NR > 1 && $1 != "<none>"){printf("%s", $1);if ($2 != "<none>") printf("\\:%s", $2); printf("\n")}')"})
|
||||
images=($images ${(f)"$(_call_program commands docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"})
|
||||
_describe -t docker-images "Images" images
|
||||
}
|
||||
|
||||
__docker_tags() {
|
||||
local expl
|
||||
declare -a tags
|
||||
tags=(${(f)"$(_call_program commands docker images | awk '(NR>1){print $2}'| sort | uniq)"})
|
||||
_describe -t docker-tags "tags" tags
|
||||
}
|
||||
|
||||
__docker_repositories_with_tags() {
|
||||
if compset -P '*:'; then
|
||||
__docker_tags
|
||||
else
|
||||
__docker_repositories -qS ":"
|
||||
fi
|
||||
}
|
||||
|
||||
__docker_search() {
|
||||
# declare -a dockersearch
|
||||
local cache_policy
|
||||
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
|
||||
if [[ -z "$cache_policy" ]]; then
|
||||
zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
|
||||
fi
|
||||
|
||||
local searchterm cachename
|
||||
searchterm="${words[$CURRENT]%/}"
|
||||
cachename=_docker-search-$searchterm
|
||||
|
||||
local expl
|
||||
local -a result
|
||||
if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \
|
||||
&& ! _retrieve_cache ${cachename#_}; then
|
||||
_message "Searching for ${searchterm}..."
|
||||
result=(${(f)"$(_call_program commands docker search ${searchterm} | awk '(NR>2){print $1}')"})
|
||||
_store_cache ${cachename#_} result
|
||||
fi
|
||||
_wanted dockersearch expl 'Available images' compadd -a result
|
||||
}
|
||||
|
||||
__docker_caching_policy()
|
||||
{
|
||||
# oldp=( "$1"(Nmh+24) ) # 24 hour
|
||||
oldp=( "$1"(Nmh+1) ) # 24 hour
|
||||
(( $#oldp ))
|
||||
}
|
||||
|
||||
|
||||
__docker_repositories () {
|
||||
local expl
|
||||
declare -a repos
|
||||
repos=(${(f)"$(_call_program commands docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"})
|
||||
_describe -t docker-repos "Repositories" repos "$@"
|
||||
}
|
||||
|
||||
__docker_commands () {
|
||||
# local -a _docker_subcommands
|
||||
local cache_policy
|
||||
|
||||
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
|
||||
if [[ -z "$cache_policy" ]]; then
|
||||
zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
|
||||
fi
|
||||
|
||||
if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
|
||||
&& ! _retrieve_cache docker_subcommands;
|
||||
then
|
||||
_docker_subcommands=(${${(f)"$(_call_program commands
|
||||
docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}})
|
||||
_docker_subcommands=($_docker_subcommands 'help:Show help for a command')
|
||||
_store_cache docker_subcommands _docker_subcommands
|
||||
fi
|
||||
_describe -t docker-commands "docker command" _docker_subcommands
|
||||
}
|
||||
|
||||
__docker_subcommand () {
|
||||
local -a _command_args
|
||||
case "$words[1]" in
|
||||
(attach)
|
||||
_arguments \
|
||||
'--no-stdin[Do not attach stdin]' \
|
||||
'--sig-proxy[Proxify all received signal]' \
|
||||
':containers:__docker_runningcontainers'
|
||||
;;
|
||||
(build)
|
||||
_arguments \
|
||||
'--no-cache[Do not use cache when building the image]' \
|
||||
'-q[Suppress verbose build output]' \
|
||||
'--rm[Remove intermediate containers after a successful build]' \
|
||||
'-t=-:repository:__docker_repositories_with_tags' \
|
||||
':path or URL:_directories'
|
||||
;;
|
||||
(commit)
|
||||
_arguments \
|
||||
'--author=-[Author]:author: ' \
|
||||
'-m=-[Commit message]:message: ' \
|
||||
'--run=-[Configuration automatically applied when the image is run]:configuration: ' \
|
||||
':container:__docker_containers' \
|
||||
':repository:__docker_repositories_with_tags'
|
||||
;;
|
||||
(cp)
|
||||
_arguments \
|
||||
':container:->container' \
|
||||
':hostpath:_files'
|
||||
case $state in
|
||||
(container)
|
||||
if compset -P '*:'; then
|
||||
_files
|
||||
else
|
||||
__docker_containers -qS ":"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
(diff|export)
|
||||
_arguments '*:containers:__docker_containers'
|
||||
;;
|
||||
(history)
|
||||
_arguments \
|
||||
'--no-trunc[Do not truncate output]' \
|
||||
'-q[Only show numeric IDs]' \
|
||||
'*:images:__docker_images'
|
||||
;;
|
||||
(images)
|
||||
_arguments \
|
||||
'-a[Show all images]' \
|
||||
'--no-trunc[Do not truncate output]' \
|
||||
'-q[Only show numeric IDs]' \
|
||||
'--tree[Output graph in tree format]' \
|
||||
'--viz[Output graph in graphviz format]' \
|
||||
':repository:__docker_repositories'
|
||||
;;
|
||||
(inspect)
|
||||
_arguments \
|
||||
'--format=-[Format the output using the given go template]:template: ' \
|
||||
'*:containers:__docker_containers'
|
||||
;;
|
||||
(import)
|
||||
_arguments \
|
||||
':URL:(- http:// file://)' \
|
||||
':repository:__docker_repositories_with_tags'
|
||||
;;
|
||||
(info)
|
||||
;;
|
||||
(import)
|
||||
_arguments \
|
||||
':URL:(- http:// file://)' \
|
||||
':repository:__docker_repositories_with_tags'
|
||||
;;
|
||||
(insert)
|
||||
_arguments '1:containers:__docker_containers' \
|
||||
'2:URL:(http:// file://)' \
|
||||
'3:file:_files'
|
||||
;;
|
||||
(kill)
|
||||
_arguments '*:containers:__docker_runningcontainers'
|
||||
;;
|
||||
(load)
|
||||
;;
|
||||
(login)
|
||||
_arguments \
|
||||
'-e=-[Email]:email: ' \
|
||||
'-p=-[Password]:password: ' \
|
||||
'-u=-[Username]:username: ' \
|
||||
':server: '
|
||||
;;
|
||||
(logs)
|
||||
_arguments \
|
||||
'-f[Follow log output]' \
|
||||
'*:containers:__docker_containers'
|
||||
;;
|
||||
(port)
|
||||
_arguments \
|
||||
'1:containers:__docker_runningcontainers' \
|
||||
'2:port:_ports'
|
||||
;;
|
||||
(start)
|
||||
_arguments \
|
||||
'-a[Attach container'"'"'s stdout/stderr and forward all signals]' \
|
||||
'-i[Attach container'"'"'s stding]' \
|
||||
'*:containers:__docker_stoppedcontainers'
|
||||
;;
|
||||
(rm)
|
||||
_arguments \
|
||||
'--link[Remove the specified link and not the underlying container]' \
|
||||
'-v[Remove the volumes associated to the container]' \
|
||||
'*:containers:__docker_stoppedcontainers'
|
||||
;;
|
||||
(rmi)
|
||||
_arguments \
|
||||
'*:images:__docker_images'
|
||||
;;
|
||||
(restart|stop)
|
||||
_arguments '-t=-[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \
|
||||
'*:containers:__docker_runningcontainers'
|
||||
;;
|
||||
(top)
|
||||
_arguments \
|
||||
'1:containers:__docker_runningcontainers' \
|
||||
'(-)*:: :->ps-arguments'
|
||||
case $state in
|
||||
(ps-arguments)
|
||||
_ps
|
||||
;;
|
||||
esac
|
||||
|
||||
;;
|
||||
(ps)
|
||||
_arguments \
|
||||
'-a[Show all containers]' \
|
||||
'--before=-[Show only container created before...]:containers:__docker_containers' \
|
||||
'-l[Show only the latest created container]' \
|
||||
'-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \
|
||||
'--no-trunc[Do not truncate output]' \
|
||||
'-q[Only show numeric IDs]' \
|
||||
'-s[Display sizes]' \
|
||||
'--since=-[Show only containers created since...]:containers:__docker_containers'
|
||||
;;
|
||||
(tag)
|
||||
_arguments \
|
||||
'-f[force]'\
|
||||
':image:__docker_images'\
|
||||
':repository:__docker_repositories_with_tags'
|
||||
;;
|
||||
(run)
|
||||
_arguments \
|
||||
'-P[Publish all exposed ports to the host]' \
|
||||
'-a[Attach to stdin, stdout or stderr]' \
|
||||
'-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \
|
||||
'--cidfile=-[Write the container ID to the file]:CID file:_files' \
|
||||
'-d[Detached mode: leave the container running in the background]' \
|
||||
'*--dns=-[Set custom dns servers]:dns server: ' \
|
||||
'*-e=-[Set environment variables]:environment variable: ' \
|
||||
'--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
|
||||
'*--expose=-[Expose a port from the container without publishing it]: ' \
|
||||
'-h=-[Container host name]:hostname:_hosts' \
|
||||
'-i[Keep stdin open even if not attached]' \
|
||||
'--link=-[Add link to another container]:link:->link' \
|
||||
'--lxc-conf=-[Add custom lxc options]:lxc options: ' \
|
||||
'-m=-[Memory limit (in bytes)]:limit: ' \
|
||||
'--name=-[Container name]:name: ' \
|
||||
'*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \
|
||||
'--privileged[Give extended privileges to this container]' \
|
||||
'--rm[Remove intermediate containers when it exits]' \
|
||||
'--sig-proxy[Proxify all received signal]' \
|
||||
'-t[Allocate a pseudo-tty]' \
|
||||
'-u=-[Username or UID]:user:_users' \
|
||||
'*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\
|
||||
'--volumes-from=-[Mount volumes from the specified container]:volume: ' \
|
||||
'-w=-[Working directory inside the container]:directory:_directories' \
|
||||
'(-):images:__docker_images' \
|
||||
'(-):command: _command_names -e' \
|
||||
'*::arguments: _normal'
|
||||
|
||||
case $state in
|
||||
(link)
|
||||
if compset -P '*:'; then
|
||||
_wanted alias expl 'Alias' compadd -E ""
|
||||
else
|
||||
__docker_runningcontainers -qS ":"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
;;
|
||||
(pull|search)
|
||||
_arguments ':name:__docker_search'
|
||||
;;
|
||||
(push)
|
||||
_arguments ':repository:__docker_repositories_with_tags'
|
||||
;;
|
||||
(save)
|
||||
_arguments \
|
||||
':images:__docker_images'
|
||||
;;
|
||||
(wait)
|
||||
_arguments ':containers:__docker_runningcontainers'
|
||||
;;
|
||||
(help)
|
||||
_arguments ':subcommand:__docker_commands'
|
||||
;;
|
||||
(*)
|
||||
_message 'Unknown sub command'
|
||||
esac
|
||||
|
||||
}
|
||||
|
||||
_docker () {
|
||||
# Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`.
|
||||
# Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`.
|
||||
if [[ $service != docker ]]; then
|
||||
_call_function - _$service
|
||||
return
|
||||
fi
|
||||
|
||||
local curcontext="$curcontext" state line
|
||||
typeset -A opt_args
|
||||
|
||||
_arguments -C \
|
||||
'-H=-[tcp://host:port to bind/connect to]:socket: ' \
|
||||
'(-): :->command' \
|
||||
'(-)*:: :->option-or-argument'
|
||||
|
||||
if (( CURRENT == 1 )); then
|
||||
|
||||
fi
|
||||
case $state in
|
||||
(command)
|
||||
__docker_commands
|
||||
;;
|
||||
(option-or-argument)
|
||||
curcontext=${curcontext%:*:*}:docker-$words[1]:
|
||||
__docker_subcommand
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker "$@"
|
||||
96
contrib/crashTest.go
Normal file
96
contrib/crashTest.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
)
|
||||
|
||||
const DOCKER_PATH = "/home/creack/dotcloud/docker/docker/docker"
|
||||
|
||||
func runDaemon() (*exec.Cmd, error) {
|
||||
os.Remove("/var/run/docker.pid")
|
||||
cmd := exec.Command(DOCKER_PATH, "-d")
|
||||
outPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
errPipe, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
io.Copy(os.Stdout, outPipe)
|
||||
}()
|
||||
go func() {
|
||||
io.Copy(os.Stderr, errPipe)
|
||||
}()
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
func crashTest() error {
|
||||
if err := exec.Command("/bin/bash", "-c", "while true; do true; done").Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
daemon, err := runDaemon()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// time.Sleep(5000 * time.Millisecond)
|
||||
var stop bool
|
||||
go func() error {
|
||||
stop = false
|
||||
for i := 0; i < 100 && !stop; i++ {
|
||||
func() error {
|
||||
cmd := exec.Command(DOCKER_PATH, "run", "base", "echo", "hello", "world")
|
||||
log.Printf("%d", i)
|
||||
outPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inPipe, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
io.Copy(os.Stdout, outPipe)
|
||||
}()
|
||||
// Expecting error, do not check
|
||||
inPipe.Write([]byte("hello world!!!!!\n"))
|
||||
go inPipe.Write([]byte("hello world!!!!!\n"))
|
||||
go inPipe.Write([]byte("hello world!!!!!\n"))
|
||||
inPipe.Close()
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
outPipe.Close()
|
||||
return nil
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
time.Sleep(20 * time.Second)
|
||||
stop = true
|
||||
if err := daemon.Process.Kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := crashTest(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
Desktop Integration
|
||||
===================
|
||||
|
||||
The ./contrib/desktop-integration contains examples of typical dockerized
|
||||
desktop applications.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
* Data container: ./data/Dockerfile creates a data image sharing /data volume
|
||||
* Iceweasel: ./iceweasel/Dockerfile shows a way to dockerize a common multimedia application
|
||||
@@ -1,38 +0,0 @@
|
||||
# VERSION: 0.1
|
||||
# DESCRIPTION: Create data image sharing /data volume
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
# COMMENTS:
|
||||
# This image is used as base for all data containers.
|
||||
# /data volume is owned by sysadmin.
|
||||
# USAGE:
|
||||
# # Download data Dockerfile
|
||||
# wget http://raw.githubusercontent.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile
|
||||
#
|
||||
# # Build data image
|
||||
# docker build -t data .
|
||||
#
|
||||
# # Create a data container. (eg: iceweasel-data)
|
||||
# docker run --name iceweasel-data data true
|
||||
#
|
||||
# # List data from it
|
||||
# docker run --volumes-from iceweasel-data busybox ls -al /data
|
||||
|
||||
docker-version 0.6.5
|
||||
|
||||
# Smallest base image, just to launch a container
|
||||
FROM busybox
|
||||
MAINTAINER Daniel Mizyrycki <daniel@docker.com>
|
||||
|
||||
# Create a regular user
|
||||
RUN echo 'sysadmin:x:1000:1000::/data:/bin/sh' >> /etc/passwd
|
||||
RUN echo 'sysadmin:x:1000:' >> /etc/group
|
||||
|
||||
# Create directory for that user
|
||||
RUN mkdir /data
|
||||
RUN chown sysadmin.sysadmin /data
|
||||
|
||||
# Add content to /data. This will keep sysadmin ownership
|
||||
RUN touch /data/init_volume
|
||||
|
||||
# Create /data volume
|
||||
VOLUME /data
|
||||
@@ -1,41 +0,0 @@
|
||||
# VERSION: 0.7
|
||||
# DESCRIPTION: Create iceweasel container with its dependencies
|
||||
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
# COMMENTS:
|
||||
# This file describes how to build a Iceweasel container with all
|
||||
# dependencies installed. It uses native X11 unix socket and alsa
|
||||
# sound devices. Tested on Debian 7.2
|
||||
# USAGE:
|
||||
# # Download Iceweasel Dockerfile
|
||||
# wget http://raw.githubusercontent.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile
|
||||
#
|
||||
# # Build iceweasel image
|
||||
# docker build -t iceweasel .
|
||||
#
|
||||
# # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data
|
||||
# docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \
|
||||
# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
|
||||
# -e DISPLAY=unix$DISPLAY iceweasel
|
||||
#
|
||||
# # To run stateful dockerized data containers
|
||||
# docker run --volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \
|
||||
# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
|
||||
# -e DISPLAY=unix$DISPLAY iceweasel
|
||||
|
||||
docker-version 0.6.5
|
||||
|
||||
# Base docker image
|
||||
FROM debian:wheezy
|
||||
MAINTAINER Daniel Mizyrycki <daniel@docker.com>
|
||||
|
||||
# Install Iceweasel and "sudo"
|
||||
RUN apt-get update && apt-get install -y iceweasel sudo
|
||||
|
||||
# create sysadmin account
|
||||
RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin
|
||||
RUN sed -Ei 's/sudo:x:27:/sudo:x:27:sysadmin/' /etc/group
|
||||
RUN sed -Ei 's/(\%sudo\s+ALL=\(ALL\:ALL\) )ALL/\1 NOPASSWD:ALL/' /etc/sudoers
|
||||
|
||||
# Autorun iceweasel. -no-remote is necessary to create a new container, as
|
||||
# iceweasel appears to communicate with itself through X11.
|
||||
CMD ["/usr/bin/sudo", "-u", "sysadmin", "-H", "-E", "/usr/bin/iceweasel", "-no-remote"]
|
||||
68
contrib/docker-build/README
Normal file
68
contrib/docker-build/README
Normal file
@@ -0,0 +1,68 @@
|
||||
# docker-build: build your software with docker
|
||||
|
||||
## Description
|
||||
|
||||
docker-build is a script to build docker images from source. It will be deprecated once the 'build' feature is incorporated into docker itself (See https://github.com/dotcloud/docker/issues/278)
|
||||
|
||||
Author: Solomon Hykes <solomon@dotcloud.com>
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
docker-builder requires:
|
||||
|
||||
1) A reasonably recent Python setup (tested on 2.7.2).
|
||||
|
||||
2) A running docker daemon at version 0.1.4 or more recent (http://www.docker.io/gettingstarted)
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
First create a valid Changefile, which defines a sequence of changes to apply to a base image.
|
||||
|
||||
$ cat Changefile
|
||||
# Start build from a know base image
|
||||
from base:ubuntu-12.10
|
||||
# Update ubuntu sources
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
|
||||
run apt-get update
|
||||
# Install system packages
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
# Insert files from the host (./myscript must be present in the current directory)
|
||||
copy myscript /usr/local/bin/myscript
|
||||
|
||||
|
||||
Run docker-build, and pass the contents of your Changefile as standard input.
|
||||
|
||||
$ IMG=$(./docker-build < Changefile)
|
||||
|
||||
This will take a while: for each line of the changefile, docker-build will:
|
||||
|
||||
1. Create a new container to execute the given command or insert the given file
|
||||
2. Wait for the container to complete execution
|
||||
3. Commit the resulting changes as a new image
|
||||
4. Use the resulting image as the input of the next step
|
||||
|
||||
|
||||
If all the steps succeed, the result will be an image containing the combined results of each build step.
|
||||
You can trace back those build steps by inspecting the image's history:
|
||||
|
||||
$ docker history $IMG
|
||||
ID CREATED CREATED BY
|
||||
1e9e2045de86 A few seconds ago /bin/sh -c cat > /usr/local/bin/myscript; chmod +x /usr/local/bin/git
|
||||
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
|
||||
77db140aa62a A few seconds ago /bin/sh -c DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
83e85d155451 A few seconds ago /bin/sh -c apt-get update
|
||||
bfd53b36d9d3 A few seconds ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
|
||||
base 2 weeks ago /bin/bash
|
||||
27cf78414709 2 weeks ago
|
||||
|
||||
|
||||
Note that your build started from 'base', as instructed by your Changefile. But that base image itself seems to have been built in 2 steps - hence the extra step in the history.
|
||||
|
||||
|
||||
You can use this build technique to create any image you want: a database, a web application, or anything else that can be build by a sequence of unix commands - in other words, anything else.
|
||||
|
||||
104
contrib/docker-build/docker-build
Executable file
104
contrib/docker-build/docker-build
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# docker-build is a script to build docker images from source.
|
||||
# It will be deprecated once the 'build' feature is incorporated into docker itself.
|
||||
# (See https://github.com/dotcloud/docker/issues/278)
|
||||
#
|
||||
# Author: Solomon Hykes <solomon@dotcloud.com>
|
||||
|
||||
|
||||
|
||||
# First create a valid Changefile, which defines a sequence of changes to apply to a base image.
|
||||
#
|
||||
# $ cat Changefile
|
||||
# # Start build from a know base image
|
||||
# from base:ubuntu-12.10
|
||||
# # Update ubuntu sources
|
||||
# run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
|
||||
# run apt-get update
|
||||
# # Install system packages
|
||||
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
|
||||
# run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
# # Insert files from the host (./myscript must be present in the current directory)
|
||||
# copy myscript /usr/local/bin/myscript
|
||||
#
|
||||
#
|
||||
# Run docker-build, and pass the contents of your Changefile as standard input.
|
||||
#
|
||||
# $ IMG=$(./docker-build < Changefile)
|
||||
#
|
||||
# This will take a while: for each line of the changefile, docker-build will:
|
||||
#
|
||||
# 1. Create a new container to execute the given command or insert the given file
|
||||
# 2. Wait for the container to complete execution
|
||||
# 3. Commit the resulting changes as a new image
|
||||
# 4. Use the resulting image as the input of the next step
|
||||
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
def docker(args, stdin=None):
|
||||
print "# docker " + " ".join(args)
|
||||
p = subprocess.Popen(["docker"] + list(args), stdin=stdin, stdout=subprocess.PIPE)
|
||||
return p.stdout
|
||||
|
||||
def image_exists(img):
|
||||
return docker(["inspect", img]).read().strip() != ""
|
||||
|
||||
def run_and_commit(img_in, cmd, stdin=None):
|
||||
run_id = docker(["run"] + (["-i", "-a", "stdin"] if stdin else ["-d"]) + [img_in, "/bin/sh", "-c", cmd], stdin=stdin).read().rstrip()
|
||||
print "---> Waiting for " + run_id
|
||||
result=int(docker(["wait", run_id]).read().rstrip())
|
||||
if result != 0:
|
||||
print "!!! '{}' return non-zero exit code '{}'. Aborting.".format(cmd, result)
|
||||
sys.exit(1)
|
||||
return docker(["commit", run_id]).read().rstrip()
|
||||
|
||||
def insert(base, src, dst):
|
||||
print "COPY {} to {} in {}".format(src, dst, base)
|
||||
if dst == "":
|
||||
raise Exception("Missing destination path")
|
||||
stdin = file(src)
|
||||
stdin.seek(0)
|
||||
return run_and_commit(base, "cat > {0}; chmod +x {0}".format(dst), stdin=stdin)
|
||||
|
||||
|
||||
def main():
|
||||
base=""
|
||||
steps = []
|
||||
try:
|
||||
for line in sys.stdin.readlines():
|
||||
line = line.strip()
|
||||
# Skip comments and empty lines
|
||||
if line == "" or line[0] == "#":
|
||||
continue
|
||||
op, param = line.split(" ", 1)
|
||||
if op == "from":
|
||||
print "FROM " + param
|
||||
base = param
|
||||
steps.append(base)
|
||||
elif op == "run":
|
||||
print "RUN " + param
|
||||
result = run_and_commit(base, param)
|
||||
steps.append(result)
|
||||
base = result
|
||||
print "===> " + base
|
||||
elif op == "copy":
|
||||
src, dst = param.split(" ", 1)
|
||||
result = insert(base, src, dst)
|
||||
steps.append(result)
|
||||
base = result
|
||||
print "===> " + base
|
||||
else:
|
||||
print "Skipping uknown op " + op
|
||||
except:
|
||||
docker(["rmi"] + steps[1:])
|
||||
raise
|
||||
print base
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
11
contrib/docker-build/example.changefile
Normal file
11
contrib/docker-build/example.changefile
Normal file
@@ -0,0 +1,11 @@
|
||||
# Start build from a know base image
|
||||
from base:ubuntu-12.10
|
||||
# Update ubuntu sources
|
||||
run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
|
||||
run apt-get update
|
||||
# Install system packages
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
# Insert files from the host (./myscript must be present in the current directory)
|
||||
copy myscript /usr/local/bin/myscript
|
||||
@@ -1,170 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/docker/docker/daemon/graphdriver/devmapper"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s <flags> [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func byteSizeFromString(arg string) (int64, error) {
|
||||
digits := ""
|
||||
rest := ""
|
||||
last := strings.LastIndexAny(arg, "0123456789")
|
||||
if last >= 0 {
|
||||
digits = arg[:last+1]
|
||||
rest = arg[last+1:]
|
||||
}
|
||||
|
||||
val, err := strconv.ParseInt(digits, 10, 64)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
|
||||
rest = strings.ToLower(strings.TrimSpace(rest))
|
||||
|
||||
var multiplier int64 = 1
|
||||
switch rest {
|
||||
case "":
|
||||
multiplier = 1
|
||||
case "k", "kb":
|
||||
multiplier = 1024
|
||||
case "m", "mb":
|
||||
multiplier = 1024 * 1024
|
||||
case "g", "gb":
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
case "t", "tb":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024
|
||||
default:
|
||||
return 0, fmt.Errorf("Unknown size unit: %s", rest)
|
||||
}
|
||||
|
||||
return val * multiplier, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
root := flag.String("r", "/var/lib/docker", "Docker root dir")
|
||||
flDebug := flag.Bool("D", false, "Debug mode")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if *flDebug {
|
||||
os.Setenv("DEBUG", "1")
|
||||
}
|
||||
|
||||
if flag.NArg() < 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
home := path.Join(*root, "devicemapper")
|
||||
devices, err := devmapper.NewDeviceSet(home, false)
|
||||
if err != nil {
|
||||
fmt.Println("Can't initialize device mapper: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
switch args[0] {
|
||||
case "status":
|
||||
status := devices.Status()
|
||||
fmt.Printf("Pool name: %s\n", status.PoolName)
|
||||
fmt.Printf("Data Loopback file: %s\n", status.DataLoopback)
|
||||
fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback)
|
||||
fmt.Printf("Sector size: %d\n", status.SectorSize)
|
||||
fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total))
|
||||
fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total))
|
||||
break
|
||||
case "list":
|
||||
ids := devices.List()
|
||||
sort.Strings(ids)
|
||||
for _, id := range ids {
|
||||
fmt.Println(id)
|
||||
}
|
||||
break
|
||||
case "device":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
}
|
||||
status, err := devices.GetDeviceStatus(args[1])
|
||||
if err != nil {
|
||||
fmt.Println("Can't get device info: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Id: %d\n", status.DeviceId)
|
||||
fmt.Printf("Size: %d\n", status.Size)
|
||||
fmt.Printf("Transaction Id: %d\n", status.TransactionId)
|
||||
fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors)
|
||||
fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors)
|
||||
fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector)
|
||||
break
|
||||
case "resize":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
}
|
||||
|
||||
size, err := byteSizeFromString(args[1])
|
||||
if err != nil {
|
||||
fmt.Println("Invalid size: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = devices.ResizePool(size)
|
||||
if err != nil {
|
||||
fmt.Println("Error resizeing pool: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
break
|
||||
case "snap":
|
||||
if flag.NArg() < 3 {
|
||||
usage()
|
||||
}
|
||||
|
||||
err := devices.AddDevice(args[1], args[2])
|
||||
if err != nil {
|
||||
fmt.Println("Can't create snap device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
case "remove":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
}
|
||||
|
||||
err := devices.RemoveDevice(args[1])
|
||||
if err != nil {
|
||||
fmt.Println("Can't remove device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
case "mount":
|
||||
if flag.NArg() < 3 {
|
||||
usage()
|
||||
}
|
||||
|
||||
err := devices.MountDevice(args[1], args[2], false)
|
||||
if err != nil {
|
||||
fmt.Println("Can't create snap device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
default:
|
||||
fmt.Printf("Unknown command %s\n", args[0])
|
||||
usage()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
#
|
||||
# This Dockerfile will create an image that allows to generate upstart and
|
||||
# systemd scripts (more to come)
|
||||
#
|
||||
# docker-version 0.6.2
|
||||
#
|
||||
|
||||
FROM ubuntu:12.10
|
||||
MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
|
||||
|
||||
RUN apt-get update && apt-get install -y wget git mercurial
|
||||
|
||||
# Install Go
|
||||
RUN wget --no-check-certificate https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz -O go-1.1.2.tar.gz
|
||||
RUN tar -xzvf go-1.1.2.tar.gz && mv /go /goroot
|
||||
RUN mkdir /go
|
||||
|
||||
ENV GOROOT /goroot
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOROOT/bin:$PATH
|
||||
|
||||
RUN go get github.com/docker/docker && cd /go/src/github.com/docker/docker && git checkout v0.6.3
|
||||
ADD manager.go /manager/
|
||||
RUN cd /manager && go build -o /usr/bin/manager
|
||||
|
||||
ENTRYPOINT ["/usr/bin/manager"]
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
FROM busybox
|
||||
MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
|
||||
ADD manager /usr/bin/
|
||||
ENTRYPOINT ["/usr/bin/manager"]
|
||||
@@ -1,130 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/docker/docker"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var templates = map[string]string{
|
||||
|
||||
"upstart": `description "{{.description}}"
|
||||
author "{{.author}}"
|
||||
start on filesystem and started lxc-net and started docker
|
||||
stop on runlevel [!2345]
|
||||
respawn
|
||||
exec /home/vagrant/goroot/bin/docker start -a {{.container_id}}
|
||||
`,
|
||||
|
||||
"systemd": `[Unit]
|
||||
Description={{.description}}
|
||||
Author={{.author}}
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
ExecStart=/usr/bin/docker start -a {{.container_id}}
|
||||
ExecStop=/usr/bin/docker stop -t 2 {{.container_id}}
|
||||
|
||||
[Install]
|
||||
WantedBy=local.target
|
||||
`,
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Parse command line for custom options
|
||||
kind := flag.String("t", "upstart", "Type of manager requested")
|
||||
author := flag.String("a", "<none>", "Author of the image")
|
||||
description := flag.String("d", "<none>", "Description of the image")
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintf(os.Stderr, "\nUsage: manager <container id>\n\n")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
|
||||
// We require at least the container ID
|
||||
if flag.NArg() != 1 {
|
||||
println(flag.NArg())
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
// Check that the requested process manager is supported
|
||||
if _, exists := templates[*kind]; !exists {
|
||||
panic("Unknown script template")
|
||||
}
|
||||
|
||||
// Load the requested template
|
||||
tpl, err := template.New("processManager").Parse(templates[*kind])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create stdout/stderr buffers
|
||||
bufOut := bytes.NewBuffer(nil)
|
||||
bufErr := bytes.NewBuffer(nil)
|
||||
|
||||
// Instanciate the Docker CLI
|
||||
cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil)
|
||||
// Retrieve the container info
|
||||
if err := cli.CmdInspect(flag.Arg(0)); err != nil {
|
||||
// As of docker v0.6.3, CmdInspect always returns nil
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// If there is nothing in the error buffer, then the Docker daemon is there and the container has been found
|
||||
if bufErr.Len() == 0 {
|
||||
// Unmarshall the resulting container data
|
||||
c := []*docker.Container{{}}
|
||||
if err := json.Unmarshal(bufOut.Bytes(), &c); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Reset the buffers
|
||||
bufOut.Reset()
|
||||
bufErr.Reset()
|
||||
// Retrieve the info of the linked image
|
||||
if err := cli.CmdInspect(c[0].Image); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// If there is nothing in the error buffer, then the image has been found.
|
||||
if bufErr.Len() == 0 {
|
||||
// Unmarshall the resulting image data
|
||||
img := []*docker.Image{{}}
|
||||
if err := json.Unmarshal(bufOut.Bytes(), &img); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// If no author has been set, use the one from the image
|
||||
if *author == "<none>" && img[0].Author != "" {
|
||||
*author = strings.Replace(img[0].Author, "\"", "", -1)
|
||||
}
|
||||
// If no description has been set, use the comment from the image
|
||||
if *description == "<none>" && img[0].Comment != "" {
|
||||
*description = strings.Replace(img[0].Comment, "\"", "", -1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Old version: Wrtie the resulting script to file
|
||||
// f, err := os.OpenFile(kind, os.O_CREATE|os.O_WRONLY, 0755)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// defer f.Close()
|
||||
|
||||
// Create a map with needed data
|
||||
data := map[string]string{
|
||||
"author": *author,
|
||||
"description": *description,
|
||||
"container_id": flag.Arg(0),
|
||||
}
|
||||
|
||||
// Process the template and output it on Stdout
|
||||
if err := tpl.Execute(os.Stdout, data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
echo >&2 "usage: $0 [-a author] [-d description] container [manager]"
|
||||
echo >&2 " ie: $0 -a 'John Smith' 4ec9612a37cd systemd"
|
||||
echo >&2 " ie: $0 -d 'Super Cool System' 4ec9612a37cd # defaults to upstart"
|
||||
exit 1
|
||||
}
|
||||
|
||||
auth='<none>'
|
||||
desc='<none>'
|
||||
have_auth=
|
||||
have_desc=
|
||||
while getopts a:d: opt; do
|
||||
case "$opt" in
|
||||
a)
|
||||
auth="$OPTARG"
|
||||
have_auth=1
|
||||
;;
|
||||
d)
|
||||
desc="$OPTARG"
|
||||
have_desc=1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $(($OPTIND - 1))
|
||||
|
||||
[ $# -ge 1 -a $# -le 2 ] || usage
|
||||
|
||||
cid="$1"
|
||||
script="${2:-upstart}"
|
||||
if [ ! -e "manager/$script" ]; then
|
||||
echo >&2 "Error: manager type '$script' is unknown (PRs always welcome!)."
|
||||
echo >&2 'The currently supported types are:'
|
||||
echo >&2 " $(cd manager && echo *)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO https://github.com/docker/docker/issues/734 (docker inspect formatting)
|
||||
#if command -v docker > /dev/null 2>&1; then
|
||||
# image="$(docker inspect -f '{{.Image}}' "$cid")"
|
||||
# if [ "$image" ]; then
|
||||
# if [ -z "$have_auth" ]; then
|
||||
# auth="$(docker inspect -f '{{.Author}}' "$image")"
|
||||
# fi
|
||||
# if [ -z "$have_desc" ]; then
|
||||
# desc="$(docker inspect -f '{{.Comment}}' "$image")"
|
||||
# fi
|
||||
# fi
|
||||
#fi
|
||||
|
||||
exec "manager/$script" "$cid" "$auth" "$desc"
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
cid="$1"
|
||||
auth="$2"
|
||||
desc="$3"
|
||||
|
||||
cat <<-EOF
|
||||
[Unit]
|
||||
Description=$desc
|
||||
Author=$auth
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/docker start -a $cid
|
||||
ExecStop=/usr/bin/docker stop -t 2 $cid
|
||||
|
||||
[Install]
|
||||
WantedBy=local.target
|
||||
EOF
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
cid="$1"
|
||||
auth="$2"
|
||||
desc="$3"
|
||||
|
||||
cat <<-EOF
|
||||
description "$(echo "$desc" | sed 's/"/\\"/g')"
|
||||
author "$(echo "$auth" | sed 's/"/\\"/g')"
|
||||
start on filesystem and started lxc-net and started docker
|
||||
stop on runlevel [!2345]
|
||||
respawn
|
||||
exec /usr/bin/docker start -a "$cid"
|
||||
EOF
|
||||
@@ -1,13 +0,0 @@
|
||||
# /etc/conf.d/docker: config file for /etc/init.d/docker
|
||||
|
||||
# where the docker daemon output gets piped
|
||||
#DOCKER_LOGFILE="/var/log/docker.log"
|
||||
|
||||
# where docker's pid get stored
|
||||
#DOCKER_PIDFILE="/run/docker.pid"
|
||||
|
||||
# where the docker daemon itself is run from
|
||||
#DOCKER_BINARY="/usr/bin/docker"
|
||||
|
||||
# any other random options you want to pass to docker
|
||||
DOCKER_OPTS=""
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/sbin/runscript
|
||||
# Copyright 1999-2013 Gentoo Foundation
|
||||
# Distributed under the terms of the GNU General Public License v2
|
||||
# $Header: $
|
||||
|
||||
DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log}
|
||||
DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid}
|
||||
DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker}
|
||||
DOCKER_OPTS=${DOCKER_OPTS:-}
|
||||
|
||||
start() {
|
||||
checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE"
|
||||
|
||||
ulimit -n 1048576
|
||||
ulimit -u 1048576
|
||||
|
||||
ebegin "Starting docker daemon"
|
||||
start-stop-daemon --start --background \
|
||||
--exec "$DOCKER_BINARY" \
|
||||
--pidfile "$DOCKER_PIDFILE" \
|
||||
--stdout "$DOCKER_LOGFILE" \
|
||||
--stderr "$DOCKER_LOGFILE" \
|
||||
-- -d -p "$DOCKER_PIDFILE" \
|
||||
$DOCKER_OPTS
|
||||
eend $?
|
||||
}
|
||||
|
||||
stop() {
|
||||
ebegin "Stopping docker daemon"
|
||||
start-stop-daemon --stop \
|
||||
--exec "$DOCKER_BINARY" \
|
||||
--pidfile "$DOCKER_PIDFILE"
|
||||
eend $?
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
Lokesh Mandvekar <lsm5@fedoraproject.org> (@lsm5)
|
||||
Brandon Philips <brandon.philips@coreos.com> (@philips)
|
||||
@@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.com
|
||||
After=network.target docker.socket
|
||||
Requires=docker.socket
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/docker -d -H fd://
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
|
||||
[Install]
|
||||
Also=docker.socket
|
||||
@@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=Docker Socket for the API
|
||||
|
||||
[Socket]
|
||||
ListenStream=/var/run/docker.sock
|
||||
SocketMode=0660
|
||||
SocketUser=root
|
||||
SocketGroup=docker
|
||||
|
||||
[Install]
|
||||
WantedBy=sockets.target
|
||||
@@ -1,141 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: docker
|
||||
# Required-Start: $syslog $remote_fs
|
||||
# Required-Stop: $syslog $remote_fs
|
||||
# Should-Start: cgroupfs-mount cgroup-lite
|
||||
# Should-Stop: cgroupfs-mount cgroup-lite
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Create lightweight, portable, self-sufficient containers.
|
||||
# Description:
|
||||
# Docker is an open-source project to easily create lightweight, portable,
|
||||
# self-sufficient containers from any application. The same container that a
|
||||
# developer builds and tests on a laptop can run at scale, in production, on
|
||||
# VMs, bare metal, OpenStack clusters, public clouds and more.
|
||||
### END INIT INFO
|
||||
|
||||
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
|
||||
|
||||
BASE=$(basename $0)
|
||||
|
||||
# modify these in /etc/default/$BASE (/etc/default/docker)
|
||||
DOCKER=/usr/bin/$BASE
|
||||
# This is the pid file managed by docker itself
|
||||
DOCKER_PIDFILE=/var/run/$BASE.pid
|
||||
# This is the pid file created/managed by start-stop-daemon
|
||||
DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid
|
||||
DOCKER_LOGFILE=/var/log/$BASE.log
|
||||
DOCKER_OPTS=
|
||||
DOCKER_DESC="Docker"
|
||||
|
||||
# Get lsb functions
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
if [ -f /etc/default/$BASE ]; then
|
||||
. /etc/default/$BASE
|
||||
fi
|
||||
|
||||
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
|
||||
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
|
||||
log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check docker is present
|
||||
if [ ! -x $DOCKER ]; then
|
||||
log_failure_msg "$DOCKER not present or not executable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fail_unless_root() {
|
||||
if [ "$(id -u)" != '0' ]; then
|
||||
log_failure_msg "$DOCKER_DESC must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
cgroupfs_mount() {
|
||||
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
|
||||
if grep -v '^#' /etc/fstab | grep -q cgroup \
|
||||
|| [ ! -e /proc/cgroups ] \
|
||||
|| [ ! -d /sys/fs/cgroup ]; then
|
||||
return
|
||||
fi
|
||||
if ! mountpoint -q /sys/fs/cgroup; then
|
||||
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
|
||||
fi
|
||||
(
|
||||
cd /sys/fs/cgroup
|
||||
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
|
||||
mkdir -p $sys
|
||||
if ! mountpoint -q $sys; then
|
||||
if ! mount -n -t cgroup -o $sys cgroup $sys; then
|
||||
rmdir $sys || true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
)
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
fail_unless_root
|
||||
|
||||
cgroupfs_mount
|
||||
|
||||
touch "$DOCKER_LOGFILE"
|
||||
chgrp docker "$DOCKER_LOGFILE"
|
||||
|
||||
ulimit -n 1048576
|
||||
if [ "$BASH" ]; then
|
||||
ulimit -u 1048576
|
||||
else
|
||||
ulimit -p 1048576
|
||||
fi
|
||||
|
||||
log_begin_msg "Starting $DOCKER_DESC: $BASE"
|
||||
start-stop-daemon --start --background \
|
||||
--no-close \
|
||||
--exec "$DOCKER" \
|
||||
--pidfile "$DOCKER_SSD_PIDFILE" \
|
||||
--make-pidfile \
|
||||
-- \
|
||||
-d -p "$DOCKER_PIDFILE" \
|
||||
$DOCKER_OPTS \
|
||||
>> "$DOCKER_LOGFILE" 2>&1
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
stop)
|
||||
fail_unless_root
|
||||
log_begin_msg "Stopping $DOCKER_DESC: $BASE"
|
||||
start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE"
|
||||
log_end_msg $?
|
||||
;;
|
||||
|
||||
restart)
|
||||
fail_unless_root
|
||||
docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null`
|
||||
[ -n "$docker_pid" ] \
|
||||
&& ps -p $docker_pid > /dev/null 2>&1 \
|
||||
&& $0 stop
|
||||
$0 start
|
||||
;;
|
||||
|
||||
force-reload)
|
||||
fail_unless_root
|
||||
$0 restart
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@@ -1,13 +0,0 @@
|
||||
# Docker Upstart and SysVinit configuration file
|
||||
|
||||
# Customize location of Docker binary (especially for development testing).
|
||||
#DOCKER="/usr/local/bin/docker"
|
||||
|
||||
# Use DOCKER_OPTS to modify the daemon startup options.
|
||||
#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4"
|
||||
|
||||
# If you need Docker to use an HTTP proxy, it can also be specified here.
|
||||
#export http_proxy="http://127.0.0.1:3128/"
|
||||
|
||||
# This is also a handy place to tweak where Docker's temporary files go.
|
||||
#export TMPDIR="/mnt/bigdrive/docker-tmp"
|
||||
@@ -1,130 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# /etc/rc.d/init.d/docker
|
||||
#
|
||||
# Daemon for docker.com
|
||||
#
|
||||
# chkconfig: 2345 95 95
|
||||
# description: Daemon for docker.com
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: docker
|
||||
# Required-Start: $network cgconfig
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: start and stop docker
|
||||
# Description: Daemon for docker.com
|
||||
### END INIT INFO
|
||||
|
||||
# Source function library.
|
||||
. /etc/rc.d/init.d/functions
|
||||
|
||||
prog="docker"
|
||||
exec="/usr/bin/$prog"
|
||||
pidfile="/var/run/$prog.pid"
|
||||
lockfile="/var/lock/subsys/$prog"
|
||||
logfile="/var/log/$prog"
|
||||
|
||||
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
|
||||
|
||||
prestart() {
|
||||
service cgconfig status > /dev/null
|
||||
|
||||
if [[ $? != 0 ]]; then
|
||||
service cgconfig start
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
start() {
|
||||
[ -x $exec ] || exit 5
|
||||
|
||||
if ! [ -f $pidfile ]; then
|
||||
prestart
|
||||
printf "Starting $prog:\t"
|
||||
echo "\n$(date)\n" >> $logfile
|
||||
$exec -d $other_args &>> $logfile &
|
||||
pid=$!
|
||||
touch $lockfile
|
||||
# wait up to 10 seconds for the pidfile to exist. see
|
||||
# https://github.com/docker/docker/issues/5359
|
||||
tries=0
|
||||
while [ ! -f $pidfile -a $tries -lt 10 ]; do
|
||||
sleep 1
|
||||
tries=$((tries + 1))
|
||||
done
|
||||
success
|
||||
echo
|
||||
else
|
||||
failure
|
||||
echo
|
||||
printf "$pidfile still exists...\n"
|
||||
exit 7
|
||||
fi
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n $"Stopping $prog: "
|
||||
killproc -p $pidfile $prog
|
||||
retval=$?
|
||||
echo
|
||||
[ $retval -eq 0 ] && rm -f $lockfile
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
status -p $pidfile $prog
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
||||
@@ -1,7 +0,0 @@
|
||||
# /etc/sysconfig/docker
|
||||
#
|
||||
# Other arguments to pass to the docker daemon process
|
||||
# These will be parsed by the sysv initscript and appended
|
||||
# to the arguments list passed to docker -d
|
||||
|
||||
other_args=""
|
||||
@@ -1,41 +0,0 @@
|
||||
description "Docker daemon"
|
||||
|
||||
start on (local-filesystems and net-device-up IFACE!=lo)
|
||||
stop on runlevel [!2345]
|
||||
limit nofile 524288 1048576
|
||||
limit nproc 524288 1048576
|
||||
|
||||
respawn
|
||||
|
||||
pre-start script
|
||||
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
|
||||
if grep -v '^#' /etc/fstab | grep -q cgroup \
|
||||
|| [ ! -e /proc/cgroups ] \
|
||||
|| [ ! -d /sys/fs/cgroup ]; then
|
||||
exit 0
|
||||
fi
|
||||
if ! mountpoint -q /sys/fs/cgroup; then
|
||||
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
|
||||
fi
|
||||
(
|
||||
cd /sys/fs/cgroup
|
||||
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
|
||||
mkdir -p $sys
|
||||
if ! mountpoint -q $sys; then
|
||||
if ! mount -n -t cgroup -o $sys cgroup $sys; then
|
||||
rmdir $sys || true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
)
|
||||
end script
|
||||
|
||||
script
|
||||
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
|
||||
DOCKER=/usr/bin/$UPSTART_JOB
|
||||
DOCKER_OPTS=
|
||||
if [ -f /etc/default/$UPSTART_JOB ]; then
|
||||
. /etc/default/$UPSTART_JOB
|
||||
fi
|
||||
exec "$DOCKER" -d $DOCKER_OPTS
|
||||
end script
|
||||
55
contrib/install.sh
Executable file
55
contrib/install.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/bin/sh
|
||||
# This script is meant for quick & easy install via 'curl URL-OF-SCRIPT | sh'
|
||||
# Original version by Jeff Lindsay <progrium@gmail.com>
|
||||
# Revamped by Jerome Petazzoni <jerome@dotcloud.com>
|
||||
#
|
||||
# This script canonical location is http://get.docker.io/; to update it, run:
|
||||
# s3cmd put -m text/x-shellscript -P install.sh s3://get.docker.io/index
|
||||
|
||||
echo "Ensuring basic dependencies are installed..."
|
||||
apt-get -qq update
|
||||
apt-get -qq install lxc wget bsdtar
|
||||
|
||||
echo "Looking in /proc/filesystems to see if we have AUFS support..."
|
||||
if grep -q aufs /proc/filesystems
|
||||
then
|
||||
echo "Found."
|
||||
else
|
||||
echo "Ahem, it looks like the current kernel does not support AUFS."
|
||||
echo "Let's see if we can load the AUFS module with modprobe..."
|
||||
if modprobe aufs
|
||||
then
|
||||
echo "Module loaded."
|
||||
else
|
||||
echo "Ahem, things didn't turn out as expected."
|
||||
KPKG=linux-image-extra-$(uname -r)
|
||||
echo "Trying to install $KPKG..."
|
||||
if apt-get -qq install $KPKG
|
||||
then
|
||||
echo "Installed."
|
||||
else
|
||||
echo "Oops, we couldn't install the -extra kernel."
|
||||
echo "Are you sure you are running a supported version of Ubuntu?"
|
||||
echo "Proceeding anyway, but Docker will probably NOT WORK!"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Downloading docker binary and uncompressing into /usr/local/bin..."
|
||||
curl -s http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz |
|
||||
tar -C /usr/local/bin --strip-components=1 -zxf- \
|
||||
docker-master/docker
|
||||
|
||||
if [ -f /etc/init/dockerd.conf ]
|
||||
then
|
||||
echo "Upstart script already exists."
|
||||
else
|
||||
echo "Creating /etc/init/dockerd.conf..."
|
||||
echo "exec env LANG=\"en_US.UTF-8\" /usr/local/bin/docker -d" > /etc/init/dockerd.conf
|
||||
fi
|
||||
|
||||
echo "Starting dockerd..."
|
||||
start dockerd > /dev/null
|
||||
|
||||
echo "Done."
|
||||
echo
|
||||
@@ -1,82 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
[ $(id -u) -eq 0 ] || {
|
||||
printf >&2 '%s requires root\n' "$0"
|
||||
exit 1
|
||||
}
|
||||
|
||||
usage() {
|
||||
printf >&2 '%s: [-r release] [-m mirror] [-s]\n' "$0"
|
||||
exit 1
|
||||
}
|
||||
|
||||
tmp() {
|
||||
TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX)
|
||||
ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX)
|
||||
trap "rm -rf $TMP $ROOTFS" EXIT TERM INT
|
||||
}
|
||||
|
||||
apkv() {
|
||||
curl -sSL $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz |
|
||||
grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2
|
||||
}
|
||||
|
||||
getapk() {
|
||||
curl -sSL $REPO/$ARCH/apk-tools-static-$(apkv).apk |
|
||||
tar -xz -C $TMP sbin/apk.static
|
||||
}
|
||||
|
||||
mkbase() {
|
||||
$TMP/sbin/apk.static --repository $REPO --update-cache --allow-untrusted \
|
||||
--root $ROOTFS --initdb add alpine-base
|
||||
}
|
||||
|
||||
conf() {
|
||||
printf '%s\n' $REPO > $ROOTFS/etc/apk/repositories
|
||||
}
|
||||
|
||||
pack() {
|
||||
local id
|
||||
id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL)
|
||||
|
||||
docker tag $id alpine:latest
|
||||
docker run -i -t alpine printf 'alpine:%s with id=%s created!\n' $REL $id
|
||||
}
|
||||
|
||||
save() {
|
||||
[ $SAVE -eq 1 ] || return
|
||||
|
||||
tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz
|
||||
}
|
||||
|
||||
while getopts "hr:m:s" opt; do
|
||||
case $opt in
|
||||
r)
|
||||
REL=$OPTARG
|
||||
;;
|
||||
m)
|
||||
MIRROR=$OPTARG
|
||||
;;
|
||||
s)
|
||||
SAVE=1
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
REL=${REL:-edge}
|
||||
MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine}
|
||||
SAVE=${SAVE:-0}
|
||||
REPO=$MIRROR/$REL/main
|
||||
ARCH=$(uname -m)
|
||||
|
||||
tmp
|
||||
getapk
|
||||
mkbase
|
||||
conf
|
||||
pack
|
||||
save
|
||||
@@ -1,92 +0,0 @@
|
||||
#
|
||||
# /etc/pacman.conf
|
||||
#
|
||||
# See the pacman.conf(5) manpage for option and repository directives
|
||||
|
||||
#
|
||||
# GENERAL OPTIONS
|
||||
#
|
||||
[options]
|
||||
# The following paths are commented out with their default values listed.
|
||||
# If you wish to use different paths, uncomment and update the paths.
|
||||
#RootDir = /
|
||||
#DBPath = /var/lib/pacman/
|
||||
#CacheDir = /var/cache/pacman/pkg/
|
||||
#LogFile = /var/log/pacman.log
|
||||
#GPGDir = /etc/pacman.d/gnupg/
|
||||
HoldPkg = pacman glibc
|
||||
#XferCommand = /usr/bin/curl -C - -f %u > %o
|
||||
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
|
||||
#CleanMethod = KeepInstalled
|
||||
#UseDelta = 0.7
|
||||
Architecture = auto
|
||||
|
||||
# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup
|
||||
#IgnorePkg =
|
||||
#IgnoreGroup =
|
||||
|
||||
#NoUpgrade =
|
||||
#NoExtract =
|
||||
|
||||
# Misc options
|
||||
#UseSyslog
|
||||
#Color
|
||||
#TotalDownload
|
||||
# We cannot check disk space from within a chroot environment
|
||||
#CheckSpace
|
||||
#VerbosePkgLists
|
||||
|
||||
# By default, pacman accepts packages signed by keys that its local keyring
|
||||
# trusts (see pacman-key and its man page), as well as unsigned packages.
|
||||
SigLevel = Required DatabaseOptional
|
||||
LocalFileSigLevel = Optional
|
||||
#RemoteFileSigLevel = Required
|
||||
|
||||
# NOTE: You must run `pacman-key --init` before first using pacman; the local
|
||||
# keyring can then be populated with the keys of all official Arch Linux
|
||||
# packagers with `pacman-key --populate archlinux`.
|
||||
|
||||
#
|
||||
# REPOSITORIES
|
||||
# - can be defined here or included from another file
|
||||
# - pacman will search repositories in the order defined here
|
||||
# - local/custom mirrors can be added here or in separate files
|
||||
# - repositories listed first will take precedence when packages
|
||||
# have identical names, regardless of version number
|
||||
# - URLs will have $repo replaced by the name of the current repo
|
||||
# - URLs will have $arch replaced by the name of the architecture
|
||||
#
|
||||
# Repository entries are of the format:
|
||||
# [repo-name]
|
||||
# Server = ServerName
|
||||
# Include = IncludePath
|
||||
#
|
||||
# The header [repo-name] is crucial - it must be present and
|
||||
# uncommented to enable the repo.
|
||||
#
|
||||
|
||||
# The testing repositories are disabled by default. To enable, uncomment the
|
||||
# repo name header and Include lines. You can add preferred servers immediately
|
||||
# after the header, and they will be used before the default mirrors.
|
||||
|
||||
#[testing]
|
||||
#Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
[core]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
[extra]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
#[community-testing]
|
||||
#Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
[community]
|
||||
Include = /etc/pacman.d/mirrorlist
|
||||
|
||||
# An example of a custom package repository. See the pacman manpage for
|
||||
# tips on creating your own repositories.
|
||||
#[custom]
|
||||
#SigLevel = Optional TrustAll
|
||||
#Server = file:///home/custompkgs
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate a minimal filesystem for archlinux and load it into the local
|
||||
# docker as "archlinux"
|
||||
# requires root
|
||||
set -e
|
||||
|
||||
hash pacstrap &>/dev/null || {
|
||||
echo "Could not find pacstrap. Run pacman -S arch-install-scripts"
|
||||
exit 1
|
||||
}
|
||||
|
||||
hash expect &>/dev/null || {
|
||||
echo "Could not find expect. Run pacman -S expect"
|
||||
exit 1
|
||||
}
|
||||
|
||||
ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX)
|
||||
chmod 755 $ROOTFS
|
||||
|
||||
# packages to ignore for space savings
|
||||
PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
|
||||
|
||||
expect <<EOF
|
||||
set send_slow {1 .1}
|
||||
proc send {ignore arg} {
|
||||
sleep .1
|
||||
exp_send -s -- \$arg
|
||||
}
|
||||
set timeout 60
|
||||
|
||||
spawn pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
|
||||
expect {
|
||||
-exact "anyway? \[Y/n\] " { send -- "n\r"; exp_continue }
|
||||
-exact "(default=all): " { send -- "\r"; exp_continue }
|
||||
-exact "installation? \[Y/n\]" { send -- "y\r"; exp_continue }
|
||||
}
|
||||
EOF
|
||||
|
||||
arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
|
||||
arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"
|
||||
echo 'en_US.UTF-8 UTF-8' > $ROOTFS/etc/locale.gen
|
||||
arch-chroot $ROOTFS locale-gen
|
||||
arch-chroot $ROOTFS /bin/sh -c 'echo "Server = https://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist'
|
||||
|
||||
# udev doesn't work in containers, rebuild /dev
|
||||
DEV=$ROOTFS/dev
|
||||
rm -rf $DEV
|
||||
mkdir -p $DEV
|
||||
mknod -m 666 $DEV/null c 1 3
|
||||
mknod -m 666 $DEV/zero c 1 5
|
||||
mknod -m 666 $DEV/random c 1 8
|
||||
mknod -m 666 $DEV/urandom c 1 9
|
||||
mkdir -m 755 $DEV/pts
|
||||
mkdir -m 1777 $DEV/shm
|
||||
mknod -m 666 $DEV/tty c 5 0
|
||||
mknod -m 600 $DEV/console c 5 1
|
||||
mknod -m 666 $DEV/tty0 c 4 0
|
||||
mknod -m 666 $DEV/full c 1 7
|
||||
mknod -m 600 $DEV/initctl p
|
||||
mknod -m 666 $DEV/ptmx c 5 2
|
||||
ln -sf /proc/self/fd $DEV/fd
|
||||
|
||||
tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux
|
||||
docker run -i -t archlinux echo Success.
|
||||
rm -rf $ROOTFS
|
||||
@@ -1,11 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
# Generate a very minimal filesystem based on busybox-static,
|
||||
# and load it into the local docker under the name "busybox".
|
||||
|
||||
echo >&2
|
||||
echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static'
|
||||
echo >&2
|
||||
|
||||
BUSYBOX=$(which busybox)
|
||||
[ "$BUSYBOX" ] || {
|
||||
echo "Sorry, I could not locate busybox."
|
||||
@@ -14,7 +10,7 @@ BUSYBOX=$(which busybox)
|
||||
}
|
||||
|
||||
set -e
|
||||
ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM
|
||||
ROOTFS=/tmp/rootfs-busybox-$$-$RANDOM
|
||||
mkdir $ROOTFS
|
||||
cd $ROOTFS
|
||||
|
||||
@@ -39,5 +35,5 @@ do
|
||||
cp -a /dev/$X dev
|
||||
done
|
||||
|
||||
tar --numeric-owner -cf- . | docker import - busybox
|
||||
tar -cf- . | docker import - busybox
|
||||
docker run -i -u root busybox /bin/echo Success.
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate a minimal filesystem for CRUX/Linux and load it into the local
|
||||
# docker as "cruxlinux"
|
||||
# requires root and the crux iso (http://crux.nu)
|
||||
|
||||
set -e
|
||||
|
||||
die () {
|
||||
echo >&2 "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso"
|
||||
|
||||
ISO=${1}
|
||||
|
||||
ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX)
|
||||
CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX)
|
||||
TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX)
|
||||
|
||||
VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/')
|
||||
|
||||
# Mount the ISO
|
||||
mount -o ro,loop $ISO $CRUX
|
||||
|
||||
# Extract pkgutils
|
||||
tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz
|
||||
|
||||
# Put pkgadd in the $PATH
|
||||
export PATH="$TMP/usr/bin:$PATH"
|
||||
|
||||
# Install core packages
|
||||
mkdir -p $ROOTFS/var/lib/pkg
|
||||
touch $ROOTFS/var/lib/pkg/db
|
||||
for pkg in $CRUX/crux/core/*; do
|
||||
pkgadd -r $ROOTFS $pkg
|
||||
done
|
||||
|
||||
# Remove agetty and inittab config
|
||||
if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then
|
||||
echo "Removing agetty from /etc/inittab ..."
|
||||
chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab
|
||||
chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab
|
||||
chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab
|
||||
fi
|
||||
|
||||
# Remove kernel source
|
||||
rm -rf $ROOTFS/usr/src/*
|
||||
|
||||
# udev doesn't work in containers, rebuild /dev
|
||||
DEV=$ROOTFS/dev
|
||||
rm -rf $DEV
|
||||
mkdir -p $DEV
|
||||
mknod -m 666 $DEV/null c 1 3
|
||||
mknod -m 666 $DEV/zero c 1 5
|
||||
mknod -m 666 $DEV/random c 1 8
|
||||
mknod -m 666 $DEV/urandom c 1 9
|
||||
mkdir -m 755 $DEV/pts
|
||||
mkdir -m 1777 $DEV/shm
|
||||
mknod -m 666 $DEV/tty c 5 0
|
||||
mknod -m 600 $DEV/console c 5 1
|
||||
mknod -m 666 $DEV/tty0 c 4 0
|
||||
mknod -m 666 $DEV/full c 1 7
|
||||
mknod -m 600 $DEV/initctl p
|
||||
mknod -m 666 $DEV/ptmx c 5 2
|
||||
|
||||
IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION)
|
||||
docker tag $IMAGE_ID crux:latest
|
||||
docker run -i -t crux echo Success.
|
||||
|
||||
# Cleanup
|
||||
umount $CRUX
|
||||
rm -rf $ROOTFS
|
||||
rm -rf $CRUX
|
||||
rm -rf $TMP
|
||||
@@ -1,297 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
echo >&2
|
||||
echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap'
|
||||
echo >&2
|
||||
|
||||
variant='minbase'
|
||||
include='iproute,iputils-ping'
|
||||
arch='amd64' # intentionally undocumented for now
|
||||
skipDetection=
|
||||
strictDebootstrap=
|
||||
justTar=
|
||||
|
||||
usage() {
|
||||
echo >&2
|
||||
|
||||
echo >&2 "usage: $0 [options] repo suite [mirror]"
|
||||
|
||||
echo >&2
|
||||
echo >&2 'options: (not recommended)'
|
||||
echo >&2 " -p set an http_proxy for debootstrap"
|
||||
echo >&2 " -v $variant # change default debootstrap variant"
|
||||
echo >&2 " -i $include # change default package includes"
|
||||
echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)"
|
||||
echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
|
||||
echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list"
|
||||
echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
|
||||
|
||||
echo >&2
|
||||
echo >&2 " ie: $0 username/debian squeeze"
|
||||
echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
|
||||
|
||||
echo >&2
|
||||
echo >&2 " ie: $0 username/ubuntu precise"
|
||||
echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
|
||||
|
||||
echo >&2
|
||||
echo >&2 " ie: $0 -t precise.tar.bz2 precise"
|
||||
echo >&2 " $0 -t wheezy.tgz wheezy"
|
||||
echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
|
||||
|
||||
echo >&2
|
||||
}
|
||||
|
||||
# these should match the names found at http://www.debian.org/releases/
|
||||
debianStable=wheezy
|
||||
debianUnstable=sid
|
||||
# this should match the name found at http://releases.ubuntu.com/
|
||||
ubuntuLatestLTS=trusty
|
||||
# this should match the name found at http://releases.tanglu.org/
|
||||
tangluLatest=aequorea
|
||||
|
||||
while getopts v:i:a:p:dst name; do
|
||||
case "$name" in
|
||||
p)
|
||||
http_proxy="$OPTARG"
|
||||
;;
|
||||
v)
|
||||
variant="$OPTARG"
|
||||
;;
|
||||
i)
|
||||
include="$OPTARG"
|
||||
;;
|
||||
a)
|
||||
arch="$OPTARG"
|
||||
;;
|
||||
d)
|
||||
strictDebootstrap=1
|
||||
;;
|
||||
s)
|
||||
skipDetection=1
|
||||
;;
|
||||
t)
|
||||
justTar=1
|
||||
;;
|
||||
?)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $(($OPTIND - 1))
|
||||
|
||||
repo="$1"
|
||||
suite="$2"
|
||||
mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
|
||||
|
||||
if [ ! "$repo" ] || [ ! "$suite" ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# some rudimentary detection for whether we need to "sudo" our docker calls
|
||||
docker=''
|
||||
if docker version > /dev/null 2>&1; then
|
||||
docker='docker'
|
||||
elif sudo docker version > /dev/null 2>&1; then
|
||||
docker='sudo docker'
|
||||
elif command -v docker > /dev/null 2>&1; then
|
||||
docker='docker'
|
||||
else
|
||||
echo >&2 "warning: either docker isn't installed, or your current user cannot run it;"
|
||||
echo >&2 " this script is not likely to work as expected"
|
||||
sleep 3
|
||||
docker='docker' # give us a command-not-found later
|
||||
fi
|
||||
|
||||
# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory
|
||||
if [ "$justTar" ]; then
|
||||
if [ ! -d "$(dirname "$repo")" ]; then
|
||||
echo >&2 "error: $(dirname "$repo") does not exist"
|
||||
exit 1
|
||||
fi
|
||||
repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")"
|
||||
fi
|
||||
|
||||
# will be filled in later, if [ -z "$skipDetection" ]
|
||||
lsbDist=''
|
||||
|
||||
target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||
returnTo="$(pwd -P)"
|
||||
|
||||
if [ "$suite" = 'lucid' ]; then
|
||||
# lucid fails and doesn't include gpgv in minbase; "apt-get update" fails
|
||||
include+=',gpgv'
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
# bootstrap
|
||||
mkdir -p "$target"
|
||||
sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror"
|
||||
|
||||
cd "$target"
|
||||
|
||||
if [ -z "$strictDebootstrap" ]; then
|
||||
# prevent init scripts from running during install/update
|
||||
# policy-rc.d (for most scripts)
|
||||
echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null
|
||||
sudo chmod +x usr/sbin/policy-rc.d
|
||||
# initctl (for some pesky upstart scripts)
|
||||
sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
|
||||
sudo ln -sf /bin/true sbin/initctl
|
||||
# see https://github.com/docker/docker/issues/446#issuecomment-16953173
|
||||
|
||||
# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
|
||||
sudo chroot . apt-get clean
|
||||
|
||||
if strings usr/bin/dpkg | grep -q unsafe-io; then
|
||||
# while we're at it, apt is unnecessarily slow inside containers
|
||||
# this forces dpkg not to call sync() after package extraction and speeds up install
|
||||
# the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
|
||||
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
|
||||
# we have this wrapped up in an "if" because the "force-unsafe-io"
|
||||
# option was added in dpkg 1.15.8.6
|
||||
# (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82),
|
||||
# and ubuntu lucid/10.04 only has 1.15.5.6
|
||||
fi
|
||||
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||
{
|
||||
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
|
||||
echo "DPkg::Post-Invoke { ${aptGetClean} };"
|
||||
echo "APT::Update::Post-Invoke { ${aptGetClean} };"
|
||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
||||
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
|
||||
# and remove the translations, too
|
||||
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
|
||||
|
||||
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
|
||||
# rm /usr/sbin/policy-rc.d
|
||||
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
|
||||
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
|
||||
# rm /etc/apt/apt.conf.d/no-cache
|
||||
# rm /etc/apt/apt.conf.d/no-languages
|
||||
|
||||
if [ -z "$skipDetection" ]; then
|
||||
# see also rudimentary platform detection in hack/install.sh
|
||||
lsbDist=''
|
||||
if [ -r etc/lsb-release ]; then
|
||||
lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")"
|
||||
fi
|
||||
if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
|
||||
lsbDist='Debian'
|
||||
fi
|
||||
|
||||
case "$lsbDist" in
|
||||
Debian)
|
||||
# add the updates and security repositories
|
||||
if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
|
||||
# ${suite}-updates only applies to non-unstable
|
||||
sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
|
||||
|
||||
# same for security updates
|
||||
echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
|
||||
fi
|
||||
;;
|
||||
Ubuntu)
|
||||
# add the universe, updates, and security repositories
|
||||
sudo sed -i "
|
||||
s/ $suite main$/ $suite main universe/; p;
|
||||
s/ $suite main/ ${suite}-updates main/; p;
|
||||
s/ $suite-updates main/ ${suite}-security main/
|
||||
" etc/apt/sources.list
|
||||
;;
|
||||
Tanglu)
|
||||
# add the updates repository
|
||||
if [ "$suite" = "$tangluLatest" ]; then
|
||||
# ${suite}-updates only applies to stable Tanglu versions
|
||||
sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
|
||||
fi
|
||||
;;
|
||||
SteamOS)
|
||||
# add contrib and non-free
|
||||
sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# make sure our packages lists are as up to date as we can get them
|
||||
sudo chroot . apt-get update
|
||||
sudo chroot . apt-get dist-upgrade -y
|
||||
fi
|
||||
|
||||
if [ "$justTar" ]; then
|
||||
# create the tarball file so it has the right permissions (ie, not root)
|
||||
touch "$repo"
|
||||
|
||||
# fill the tarball
|
||||
sudo tar --numeric-owner -caf "$repo" .
|
||||
else
|
||||
# create the image (and tag $repo:$suite)
|
||||
sudo tar --numeric-owner -c . | $docker import - $repo:$suite
|
||||
|
||||
# test the image
|
||||
$docker run -i -t $repo:$suite echo success
|
||||
|
||||
if [ -z "$skipDetection" ]; then
|
||||
case "$lsbDist" in
|
||||
Debian)
|
||||
if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
|
||||
# tag latest
|
||||
$docker tag $repo:$suite $repo:latest
|
||||
|
||||
if [ -r etc/debian_version ]; then
|
||||
# tag the specific debian release version (which is only reasonable to tag on debian stable)
|
||||
ver=$(cat etc/debian_version)
|
||||
$docker tag $repo:$suite $repo:$ver
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
Ubuntu)
|
||||
if [ "$suite" = "$ubuntuLatestLTS" ]; then
|
||||
# tag latest
|
||||
$docker tag $repo:$suite $repo:latest
|
||||
fi
|
||||
if [ -r etc/lsb-release ]; then
|
||||
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
|
||||
if [ "$lsbRelease" ]; then
|
||||
# tag specific Ubuntu version number, if available (12.04, etc.)
|
||||
$docker tag $repo:$suite $repo:$lsbRelease
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
Tanglu)
|
||||
if [ "$suite" = "$tangluLatest" ]; then
|
||||
# tag latest
|
||||
$docker tag $repo:$suite $repo:latest
|
||||
fi
|
||||
if [ -r etc/lsb-release ]; then
|
||||
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
|
||||
if [ "$lsbRelease" ]; then
|
||||
# tag specific Tanglu version number, if available (1.0, 2.0, etc.)
|
||||
$docker tag $repo:$suite $repo:$lsbRelease
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
SteamOS)
|
||||
if [ -r etc/lsb-release ]; then
|
||||
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
|
||||
if [ "$lsbRelease" ]; then
|
||||
# tag specific SteamOS version number, if available (1.0, 2.0, etc.)
|
||||
$docker tag $repo:$suite $repo:$lsbRelease
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
|
||||
# cleanup
|
||||
cd "$returnTo"
|
||||
sudo rm -rf "$target"
|
||||
@@ -1,123 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Create a base CentOS Docker image.
|
||||
|
||||
# This script is useful on systems with rinse available (e.g.,
|
||||
# building a CentOS image on Debian). See contrib/mkimage-yum.sh for
|
||||
# a way to build CentOS images on systems with yum installed.
|
||||
|
||||
set -e
|
||||
|
||||
echo >&2
|
||||
echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse'
|
||||
echo >&2
|
||||
|
||||
repo="$1"
|
||||
distro="$2"
|
||||
mirror="$3"
|
||||
|
||||
if [ ! "$repo" ] || [ ! "$distro" ]; then
|
||||
self="$(basename $0)"
|
||||
echo >&2 "usage: $self repo distro [mirror]"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/centos centos-5"
|
||||
echo >&2 " $self username/centos centos-6"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/slc slc-5"
|
||||
echo >&2 " $self username/slc slc-6"
|
||||
echo >&2
|
||||
echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/"
|
||||
echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/"
|
||||
echo >&2
|
||||
echo >&2 'See /etc/rinse for supported values of "distro" and for examples of'
|
||||
echo >&2 ' expected values of "mirror".'
|
||||
echo >&2
|
||||
echo >&2 'This script is tested to work with the original upstream version of rinse,'
|
||||
echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at'
|
||||
echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.'
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM"
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
|
||||
returnTo="$(pwd -P)"
|
||||
|
||||
rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" )
|
||||
if [ "$mirror" ]; then
|
||||
rinseArgs+=( --mirror "$mirror" )
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
mkdir -p "$target"
|
||||
|
||||
sudo rinse "${rinseArgs[@]}"
|
||||
|
||||
cd "$target"
|
||||
|
||||
# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own
|
||||
sudo rm -rf dev
|
||||
sudo mkdir -m 755 dev
|
||||
(
|
||||
cd dev
|
||||
sudo ln -sf /proc/self/fd ./
|
||||
sudo mkdir -m 755 pts
|
||||
sudo mkdir -m 1777 shm
|
||||
sudo mknod -m 600 console c 5 1
|
||||
sudo mknod -m 600 initctl p
|
||||
sudo mknod -m 666 full c 1 7
|
||||
sudo mknod -m 666 null c 1 3
|
||||
sudo mknod -m 666 ptmx c 5 2
|
||||
sudo mknod -m 666 random c 1 8
|
||||
sudo mknod -m 666 tty c 5 0
|
||||
sudo mknod -m 666 tty0 c 4 0
|
||||
sudo mknod -m 666 urandom c 1 9
|
||||
sudo mknod -m 666 zero c 1 5
|
||||
)
|
||||
|
||||
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target"
|
||||
# locales
|
||||
sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
|
||||
# docs
|
||||
sudo rm -rf usr/share/{man,doc,info,gnome/help}
|
||||
# cracklib
|
||||
sudo rm -rf usr/share/cracklib
|
||||
# i18n
|
||||
sudo rm -rf usr/share/i18n
|
||||
# yum cache
|
||||
sudo rm -rf var/cache/yum
|
||||
sudo mkdir -p --mode=0755 var/cache/yum
|
||||
# sln
|
||||
sudo rm -rf sbin/sln
|
||||
# ldconfig
|
||||
#sudo rm -rf sbin/ldconfig
|
||||
sudo rm -rf etc/ld.so.cache var/cache/ldconfig
|
||||
sudo mkdir -p --mode=0755 var/cache/ldconfig
|
||||
|
||||
# allow networking init scripts inside the container to work without extra steps
|
||||
echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null
|
||||
|
||||
# to restore locales later:
|
||||
# yum reinstall glibc-common
|
||||
|
||||
version=
|
||||
if [ -r etc/redhat-release ]; then
|
||||
version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)"
|
||||
elif [ -r etc/SuSE-release ]; then
|
||||
version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)"
|
||||
fi
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
echo >&2 "warning: cannot autodetect OS version, using $distro as tag"
|
||||
sleep 20
|
||||
version="$distro"
|
||||
fi
|
||||
|
||||
sudo tar --numeric-owner -c . | docker import - $repo:$version
|
||||
|
||||
docker run -i -t $repo:$version echo success
|
||||
|
||||
cd "$returnTo"
|
||||
sudo rm -rf "$target"
|
||||
@@ -1,49 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate a very minimal filesystem based on busybox-static,
|
||||
# and load it into the local docker under the name "docker-ut".
|
||||
|
||||
missing_pkg() {
|
||||
echo "Sorry, I could not locate $1"
|
||||
echo "Try 'apt-get install ${2:-$1}'?"
|
||||
exit 1
|
||||
}
|
||||
|
||||
BUSYBOX=$(which busybox)
|
||||
[ "$BUSYBOX" ] || missing_pkg busybox busybox-static
|
||||
SOCAT=$(which socat)
|
||||
[ "$SOCAT" ] || missing_pkg socat
|
||||
|
||||
shopt -s extglob
|
||||
set -ex
|
||||
ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX`
|
||||
trap "rm -rf $ROOTFS" INT QUIT TERM
|
||||
cd $ROOTFS
|
||||
|
||||
mkdir bin etc dev dev/pts lib proc sys tmp
|
||||
touch etc/resolv.conf
|
||||
cp /etc/nsswitch.conf etc/nsswitch.conf
|
||||
echo root:x:0:0:root:/:/bin/sh > etc/passwd
|
||||
echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd
|
||||
echo root:x:0: > etc/group
|
||||
echo daemon:x:1: >> etc/group
|
||||
ln -s lib lib64
|
||||
ln -s bin sbin
|
||||
cp $BUSYBOX $SOCAT bin
|
||||
for X in $(busybox --list)
|
||||
do
|
||||
ln -s busybox bin/$X
|
||||
done
|
||||
rm bin/init
|
||||
ln bin/busybox bin/init
|
||||
cp -P /lib/x86_64-linux-gnu/lib{pthread*,c*(-*),dl*(-*),nsl*(-*),nss_*,util*(-*),wrap,z}.so* lib
|
||||
cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib
|
||||
cp -P /usr/lib/x86_64-linux-gnu/lib{crypto,ssl}.so* lib
|
||||
for X in console null ptmx random stdin stdout stderr tty urandom zero
|
||||
do
|
||||
cp -a /dev/$X dev
|
||||
done
|
||||
|
||||
chmod 0755 $ROOTFS # See #486
|
||||
tar --numeric-owner -cf- . | docker import - docker-ut
|
||||
docker run -i -u root docker-ut /bin/echo Success.
|
||||
rm -rf $ROOTFS
|
||||
@@ -1,98 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Create a base CentOS Docker image.
|
||||
#
|
||||
# This script is useful on systems with yum installed (e.g., building
|
||||
# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way
|
||||
# to build CentOS images on other systems.
|
||||
|
||||
usage() {
|
||||
cat <<EOOPTS
|
||||
$(basename $0) [OPTIONS] <name>
|
||||
OPTIONS:
|
||||
-y <yumconf> The path to the yum config to install packages from. The
|
||||
default is /etc/yum.conf.
|
||||
EOOPTS
|
||||
exit 1
|
||||
}
|
||||
|
||||
# option defaults
|
||||
yum_config=/etc/yum.conf
|
||||
while getopts ":y:h" opt; do
|
||||
case $opt in
|
||||
y)
|
||||
yum_config=$OPTARG
|
||||
;;
|
||||
h)
|
||||
usage
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
name=$1
|
||||
|
||||
if [[ -z $name ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
#--------------------
|
||||
|
||||
target=$(mktemp -d --tmpdir $(basename $0).XXXXXX)
|
||||
|
||||
set -x
|
||||
|
||||
mkdir -m 755 "$target"/dev
|
||||
mknod -m 600 "$target"/dev/console c 5 1
|
||||
mknod -m 600 "$target"/dev/initctl p
|
||||
mknod -m 666 "$target"/dev/full c 1 7
|
||||
mknod -m 666 "$target"/dev/null c 1 3
|
||||
mknod -m 666 "$target"/dev/ptmx c 5 2
|
||||
mknod -m 666 "$target"/dev/random c 1 8
|
||||
mknod -m 666 "$target"/dev/tty c 5 0
|
||||
mknod -m 666 "$target"/dev/tty0 c 4 0
|
||||
mknod -m 666 "$target"/dev/urandom c 1 9
|
||||
mknod -m 666 "$target"/dev/zero c 1 5
|
||||
|
||||
yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \
|
||||
--setopt=group_package_types=mandatory -y groupinstall Core
|
||||
yum -c "$yum_config" --installroot="$target" -y clean all
|
||||
|
||||
cat > "$target"/etc/sysconfig/network <<EOF
|
||||
NETWORKING=yes
|
||||
HOSTNAME=localhost.localdomain
|
||||
EOF
|
||||
|
||||
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb
|
||||
# --keep-services "$target". Stolen from mkimage-rinse.sh
|
||||
# locales
|
||||
rm -rf "$target"/usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
|
||||
# docs
|
||||
rm -rf "$target"/usr/share/{man,doc,info,gnome/help}
|
||||
# cracklib
|
||||
rm -rf "$target"/usr/share/cracklib
|
||||
# i18n
|
||||
rm -rf "$target"/usr/share/i18n
|
||||
# sln
|
||||
rm -rf "$target"/sbin/sln
|
||||
# ldconfig
|
||||
rm -rf "$target"/etc/ld.so.cache
|
||||
rm -rf "$target"/var/cache/ldconfig/*
|
||||
|
||||
version=
|
||||
if [ -r "$target"/etc/redhat-release ]; then
|
||||
version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' "$target"/etc/redhat-release)"
|
||||
fi
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
echo >&2 "warning: cannot autodetect OS version, using '$name' as tag"
|
||||
version=$name
|
||||
fi
|
||||
|
||||
tar --numeric-owner -c -C "$target" . | docker import - $name:$version
|
||||
docker run -i -t $name:$version echo success
|
||||
|
||||
rm -rf "$target"
|
||||
@@ -1,107 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
mkimg="$(basename "$0")"
|
||||
|
||||
usage() {
|
||||
echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]"
|
||||
echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie"
|
||||
echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components main,universe trusty"
|
||||
echo >&2 " $mkimg -t someuser/busybox busybox-static"
|
||||
echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5"
|
||||
echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4"
|
||||
echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/"
|
||||
exit 1
|
||||
}
|
||||
|
||||
scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage"
|
||||
|
||||
optTemp=$(getopt --options '+d:t:h' --longoptions 'dir:,tag:,help' --name "$mkimg" -- "$@")
|
||||
eval set -- "$optTemp"
|
||||
unset optTemp
|
||||
|
||||
dir=
|
||||
tag=
|
||||
while true; do
|
||||
case "$1" in
|
||||
-d|--dir) dir="$2" ; shift 2 ;;
|
||||
-t|--tag) tag="$2" ; shift 2 ;;
|
||||
-h|--help) usage ;;
|
||||
--) shift ; break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
script="$1"
|
||||
[ "$script" ] || usage
|
||||
shift
|
||||
|
||||
if [ ! -x "$scriptDir/$script" ]; then
|
||||
echo >&2 "error: $script does not exist or is not executable"
|
||||
echo >&2 " see $scriptDir for possible scripts"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# don't mistake common scripts like .febootstrap-minimize as image-creators
|
||||
if [[ "$script" == .* ]]; then
|
||||
echo >&2 "error: $script is a script helper, not a script"
|
||||
echo >&2 " see $scriptDir for possible scripts"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
delDir=
|
||||
if [ -z "$dir" ]; then
|
||||
dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)"
|
||||
delDir=1
|
||||
fi
|
||||
|
||||
rootfsDir="$dir/rootfs"
|
||||
( set -x; mkdir -p "$rootfsDir" )
|
||||
|
||||
# pass all remaining arguments to $script
|
||||
"$scriptDir/$script" "$rootfsDir" "$@"
|
||||
|
||||
# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them
|
||||
rm -rf "$rootfsDir/dev" "$rootfsDir/proc"
|
||||
mkdir -p "$rootfsDir/dev" "$rootfsDir/proc"
|
||||
|
||||
# make sure /etc/resolv.conf has something useful in it
|
||||
mkdir -p "$rootfsDir/etc"
|
||||
cat > "$rootfsDir/etc/resolv.conf" <<'EOF'
|
||||
nameserver 8.8.8.8
|
||||
nameserver 8.8.4.4
|
||||
EOF
|
||||
|
||||
tarFile="$dir/rootfs.tar.xz"
|
||||
touch "$tarFile"
|
||||
|
||||
(
|
||||
set -x
|
||||
tar --numeric-owner -caf "$tarFile" -C "$rootfsDir" --transform='s,^./,,' .
|
||||
)
|
||||
|
||||
echo >&2 "+ cat > '$dir/Dockerfile'"
|
||||
cat > "$dir/Dockerfile" <<'EOF'
|
||||
FROM scratch
|
||||
ADD rootfs.tar.xz /
|
||||
EOF
|
||||
|
||||
# if our generated image has a decent shell, let's set a default command
|
||||
for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do
|
||||
if [ -x "$rootfsDir/$shell" ]; then
|
||||
( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" )
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
( set -x; rm -rf "$rootfsDir" )
|
||||
|
||||
if [ "$tag" ]; then
|
||||
( set -x; docker build -t "$tag" "$dir" )
|
||||
elif [ "$delDir" ]; then
|
||||
# if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_
|
||||
( set -x; docker build "$dir" )
|
||||
fi
|
||||
|
||||
if [ "$delDir" ]; then
|
||||
( set -x; rm -rf "$dir" )
|
||||
fi
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
rootfsDir="$1"
|
||||
shift
|
||||
|
||||
(
|
||||
cd "$rootfsDir"
|
||||
|
||||
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target"
|
||||
# locales
|
||||
rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
|
||||
# docs
|
||||
rm -rf usr/share/{man,doc,info,gnome/help}
|
||||
# cracklib
|
||||
rm -rf usr/share/cracklib
|
||||
# i18n
|
||||
rm -rf usr/share/i18n
|
||||
# yum cache
|
||||
rm -rf var/cache/yum
|
||||
mkdir -p --mode=0755 var/cache/yum
|
||||
# sln
|
||||
rm -rf sbin/sln
|
||||
# ldconfig
|
||||
#rm -rf sbin/ldconfig
|
||||
rm -rf etc/ld.so.cache var/cache/ldconfig
|
||||
mkdir -p --mode=0755 var/cache/ldconfig
|
||||
)
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
rootfsDir="$1"
|
||||
shift
|
||||
|
||||
busybox="$(which busybox 2>/dev/null || true)"
|
||||
if [ -z "$busybox" ]; then
|
||||
echo >&2 'error: busybox: not found'
|
||||
echo >&2 ' install it with your distribution "busybox-static" package'
|
||||
exit 1
|
||||
fi
|
||||
if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then
|
||||
echo >&2 "error: '$busybox' appears to be a dynamic executable"
|
||||
echo >&2 ' you should install your distribution "busybox-static" package instead'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$rootfsDir/bin"
|
||||
rm -f "$rootfsDir/bin/busybox" # just in case
|
||||
cp "$busybox" "$rootfsDir/bin/busybox"
|
||||
|
||||
(
|
||||
cd "$rootfsDir"
|
||||
|
||||
IFS=$'\n'
|
||||
modules=( $(bin/busybox --list-modules) )
|
||||
unset IFS
|
||||
|
||||
for module in "${modules[@]}"; do
|
||||
mkdir -p "$(dirname "$module")"
|
||||
ln -sf /bin/busybox "$module"
|
||||
done
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user