mirror of
https://github.com/moby/moby.git
synced 2026-01-12 11:11:44 +00:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c97a1aada6 | ||
|
|
803a8d86e5 | ||
|
|
5fd1ff014a | ||
|
|
3c9ed5cdd6 | ||
|
|
59b6a93504 | ||
|
|
72d7c3847a | ||
|
|
6e7b8efa92 | ||
|
|
fa401da0ff | ||
|
|
26ec7b2e77 | ||
|
|
03a9e41245 | ||
|
|
55869531f5 | ||
|
|
9193585d66 | ||
|
|
38b8373434 | ||
|
|
03b5f8a585 | ||
|
|
bc260f0225 | ||
|
|
45dcd1125b | ||
|
|
d2e063d9e1 | ||
|
|
567a484b66 | ||
|
|
5d4b886ad6 |
@@ -1,2 +0,0 @@
|
||||
bundles
|
||||
.gopath
|
||||
22
.gitignore
vendored
22
.gitignore
vendored
@@ -1,33 +1,17 @@
|
||||
# Docker project generated files to ignore
|
||||
# if you want to ignore files created by your editor/tools,
|
||||
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||
.vagrant*
|
||||
bin
|
||||
docker/docker
|
||||
*.exe
|
||||
.*.swp
|
||||
a.out
|
||||
*.orig
|
||||
build_src
|
||||
command-line-arguments.test
|
||||
.flymake*
|
||||
docker.test
|
||||
auth/auth.test
|
||||
.idea
|
||||
.DS_Store
|
||||
docs/_build
|
||||
docs/_static
|
||||
docs/_templates
|
||||
.gopath/
|
||||
.dotcloud
|
||||
*.test
|
||||
bundles/
|
||||
.hg/
|
||||
.git/
|
||||
vendor/pkg/
|
||||
pyenv
|
||||
Vagrantfile
|
||||
docs/AWS_S3_BUCKET
|
||||
docs/GIT_BRANCH
|
||||
docs/VERSION
|
||||
docs/GITCOMMIT
|
||||
docs/changed-files
|
||||
autogen/
|
||||
.bashrc
|
||||
|
||||
145
.mailmap
145
.mailmap
@@ -1,144 +1,19 @@
|
||||
# Generate AUTHORS: hack/generate-authors.sh
|
||||
|
||||
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
|
||||
# duplicates that aren't also email duplicates): scan the output of:
|
||||
# git log --format='%aE - %aN' | sort -uf
|
||||
#
|
||||
# For explanation on this file format: man git-shortlog
|
||||
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
|
||||
Erwin van der Koogh <info@erronis.nl>
|
||||
Ahmed Kamal <email.ahmedkamal@googlemail.com>
|
||||
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
||||
Marcus Linke <marcus.linke@gmx.de>
|
||||
Aleksandrs Fadins <aleks@s-ko.net>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Wayne Chang <wayne@neverfear.org>
|
||||
Chen Chao <cc272309126@gmail.com>
|
||||
Daehyeok Mun <daehyeok@gmail.com>
|
||||
<daehyeok@gmail.com> <daehyeok@daehyeokui-MacBook-Air.local>
|
||||
<jt@yadutaf.fr> <admin@jtlebi.fr>
|
||||
<jeff@docker.com> <jefferya@programmerq.net>
|
||||
<charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
# Generate AUTHORS: git log --all --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
|
||||
<charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@docker.com>
|
||||
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@charmes.net>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com> creack <charmes.guillaume@gmail.com>
|
||||
<guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
|
||||
<kencochrane@gmail.com> <KenCochrane@gmail.com>
|
||||
Thatcher Peskens <thatcher@docker.com>
|
||||
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
|
||||
Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
|
||||
<sridharr@activestate.com> <github@srid.name>
|
||||
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@dotcloud.com>
|
||||
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@gmx.net>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
||||
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
||||
Joffrey F <joffrey@dotcloud.com>
|
||||
<joffrey@dotcloud.com> <f.joffrey@gmail.com>
|
||||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
<kalessin@kalessin.fr> <louis@dotcloud.com>
|
||||
<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
|
||||
<victor.vieux@docker.com> <victor@dotcloud.com>
|
||||
<victor.vieux@docker.com> <dev@vvieux.com>
|
||||
<victor.vieux@docker.com> <victor@docker.com>
|
||||
<victor.vieux@docker.com> <vieux@docker.com>
|
||||
<victor.vieux@docker.com> <victorvieux@gmail.com>
|
||||
<victor.vieux@dotcloud.com> <victor@dotcloud.com>
|
||||
<dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
<daniel@gasienica.ch> <dgasienica@zynga.com>
|
||||
Roberto Hashioka <roberto_hashioka@hotmail.com>
|
||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
Nolan Darilek <nolan@thewordnerd.info>
|
||||
<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Faiz Khan <faizkhan00@gmail.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Matthew Mueller <mattmuelle@gmail.com>
|
||||
<mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
<proppy@google.com> <proppy@aminche.com>
|
||||
<michael@docker.com> <michael@crosbymichael.com>
|
||||
<michael@docker.com> <crosby.michael@gmail.com>
|
||||
<michael@docker.com> <crosbymichael@gmail.com>
|
||||
<github@developersupport.net> <github@metaliveblog.com>
|
||||
<brandon@ifup.org> <brandon@ifup.co>
|
||||
<dano@spotify.com> <daniel.norberg@gmail.com>
|
||||
<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
|
||||
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
|
||||
<shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
||||
<solomon@docker.com> <solomon.hykes@dotcloud.com>
|
||||
<solomon@docker.com> <solomon@dotcloud.com>
|
||||
<solomon@docker.com> <s@docker.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
|
||||
<alexl@redhat.com> <alexander.larsson@gmail.com>
|
||||
Alexandr Morozov <lk4d4math@gmail.com>
|
||||
<git.nivoc@neverbox.com> <kuehnle@online.de>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
<ostezer@gmail.com> <ostezer@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
<justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
|
||||
<taim@bosboot.org> <maztaim@users.noreply.github.com>
|
||||
<viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
|
||||
<vbatts@redhat.com> <vbatts@hashbangbash.com>
|
||||
<altsysrq@gmail.com> <iamironbob@gmail.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
|
||||
<github@hollensbe.org> <erik+github@hollensbe.org>
|
||||
<github@albersweb.de> <albers@users.noreply.github.com>
|
||||
<lsm5@fedoraproject.org> <lsm5@redhat.com>
|
||||
<marc@marc-abramowitz.com> <msabramo@gmail.com>
|
||||
Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
|
||||
<bernat@luffy.cx> <vincent@bernat.im>
|
||||
<p@pwaller.net> <peter@scraperwiki.com>
|
||||
<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
<julienbordellier@gmail.com> <git@julienbordellier.com>
|
||||
<ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
|
||||
<lk4d4@docker.com> <lk4d4math@gmail.com>
|
||||
<arnaud.porterie@docker.com> <icecrime@gmail.com>
|
||||
<baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
<cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||
<ewindisch@docker.com> <eric@windisch.us>
|
||||
<frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||
Hollie Teal <hollie@docker.com>
|
||||
<hollie@docker.com> <hollie.teal@docker.com>
|
||||
<hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||
<huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||
Jessica Frazelle <jess@docker.com> Jessie Frazelle <jfrazelle@users.noreply.github.com>
|
||||
<jess@docker.com> <jfrazelle@users.noreply.github.com>
|
||||
<konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
|
||||
<tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
|
||||
<estesp@linux.vnet.ibm.com> <estesp@gmail.com>
|
||||
<github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Thomas LEVEIL <thomasleveil@gmail.com> Thomas LÉVEIL <thomasleveil@users.noreply.github.com>
|
||||
<oi@truffles.me.uk> <timruffles@googlemail.com>
|
||||
<Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
|
||||
|
||||
744
AUTHORS
744
AUTHORS
@@ -1,773 +1,45 @@
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `hack/generate-authors.sh`.
|
||||
|
||||
Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aaron Feng <aaron.feng@gmail.com>
|
||||
Aaron Huslage <huslage@gmail.com>
|
||||
Abel Muiño <amuino@gmail.com>
|
||||
Abhinav Ajgaonkar <abhinav316@gmail.com>
|
||||
Abin Shahab <ashahab@altiscale.com>
|
||||
Adam Miller <admiller@redhat.com>
|
||||
Adam Singer <financeCoding@gmail.com>
|
||||
Aditya <aditya@netroy.in>
|
||||
Adrian Mouat <adrian.mouat@gmail.com>
|
||||
Adrien Folie <folie.adrien@gmail.com>
|
||||
Ahmed Kamal <email.ahmedkamal@googlemail.com>
|
||||
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
||||
Aidan Hobson Sayers <aidanhs@cantab.net>
|
||||
AJ Bowen <aj@gandi.net>
|
||||
Al Tobey <al@ooyala.com>
|
||||
alambike <alambike@gmail.com>
|
||||
Alan Thompson <cloojure@gmail.com>
|
||||
Albert Callarisa <shark234@gmail.com>
|
||||
Albert Zhang <zhgwenming@gmail.com>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Aleksandrs Fadins <aleks@s-ko.net>
|
||||
Alex Gaynor <alex.gaynor@gmail.com>
|
||||
Alex Warhawk <ax.warhawk@gmail.com>
|
||||
Alexander Boyd <alex@opengroove.org>
|
||||
Alexander Larsson <alexl@redhat.com>
|
||||
Alexander Morozov <lk4d4@docker.com>
|
||||
Alexander Shopov <ash@kambanaria.org>
|
||||
Alexandr Morozov <lk4d4@docker.com>
|
||||
Alexey Kotlyarov <alexey@infoxchange.net.au>
|
||||
Alexey Shamrin <shamrin@gmail.com>
|
||||
Alexis THOMAS <fr.alexisthomas@gmail.com>
|
||||
almoehi <almoehi@users.noreply.github.com>
|
||||
amangoel <amangoel@gmail.com>
|
||||
Amit Bakshi <ambakshi@gmail.com>
|
||||
Anand Patil <anand.prabhakar.patil@gmail.com>
|
||||
AnandkumarPatel <anandkumarpatel@gmail.com>
|
||||
Andre Dublin <81dublin@gmail.com>
|
||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||
Andrea Turli <andrea.turli@gmail.com>
|
||||
Andreas Köhler <andi5.py@gmx.net>
|
||||
Andreas Savvides <andreas@editd.com>
|
||||
Andreas Tiefenthaler <at@an-ti.eu>
|
||||
Andrew C. Bodine <acbodine@us.ibm.com>
|
||||
Andrew Duckworth <grillopress@gmail.com>
|
||||
Andrew France <andrew@avito.co.uk>
|
||||
Andrew Macgregor <andrew.macgregor@agworld.com.au>
|
||||
Andrew Munsell <andrew@wizardapps.net>
|
||||
Andrew Weiss <andrew.weiss@outlook.com>
|
||||
Andrew Williams <williams.andrew@gmail.com>
|
||||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
Andrey Petrov <andrey.petrov@shazow.net>
|
||||
Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
|
||||
Andy Chambers <anchambers@paypal.com>
|
||||
andy diller <dillera@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Andy Kipp <andy@rstudio.com>
|
||||
Andy Rothfusz <github@developersupport.net>
|
||||
Andy Rothfusz <github@metaliveblog.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
Andy Wilson <wilson.andrew.j+github@gmail.com>
|
||||
Ankush Agarwal <ankushagarwal11@gmail.com>
|
||||
Anthony Baire <Anthony.Baire@irisa.fr>
|
||||
Anthony Bishopric <git@anthonybishopric.com>
|
||||
Anton Löfgren <anton.lofgren@gmail.com>
|
||||
Anton Nikitin <anton.k.nikitin@gmail.com>
|
||||
Antony Messerli <amesserl@rackspace.com>
|
||||
apocas <petermdias@gmail.com>
|
||||
ArikaChen <eaglesora@gmail.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arthur Gautier <baloo@gandi.net>
|
||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||
averagehuman <averagehuman@users.noreply.github.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Barnaby Gray <barnaby@pickle.me.uk>
|
||||
Barry Allard <barry.allard@gmail.com>
|
||||
Bartłomiej Piotrowski <b@bpiotrowski.pl>
|
||||
bdevloed <boris.de.vloed@gmail.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
Ben Sargent <ben@brokendigits.com>
|
||||
Ben Toews <mastahyeti@gmail.com>
|
||||
Ben Wiklund <ben@daisyowl.com>
|
||||
Benjamin Atkin <ben@benatkin.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
Bert Goethals <bert@bertg.be>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
bin liu <liubin0329@users.noreply.github.com>
|
||||
Blake Geno <blakegeno@gmail.com>
|
||||
Bouke Haarsma <bouke@webatoom.nl>
|
||||
Boyd Hemphill <boyd@feedmagnet.com>
|
||||
Brandon Liu <bdon@bdon.org>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brandon Rhodes <brandon@rhodesmill.org>
|
||||
Brett Kochendorfer <brett.kochendorfer@gmail.com>
|
||||
Brian (bex) Exelbierd <bexelbie@redhat.com>
|
||||
Brian Dorsey <brian@dorseys.org>
|
||||
Brian Flad <bflad417@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Brian McCallister <brianm@skife.org>
|
||||
Brian Olsen <brian@maven-group.org>
|
||||
Brian Shumate <brian@couchbase.com>
|
||||
Brice Jaglin <bjaglin@teads.tv>
|
||||
Briehan Lombaard <briehan.lombaard@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Bruno Binet <bruno.binet@gmail.com>
|
||||
Bruno Renié <brutasse@gmail.com>
|
||||
Bryan Bess <squarejaw@bsbess.com>
|
||||
Bryan Matsuo <bryan.matsuo@gmail.com>
|
||||
Bryan Murphy <bmurphy1976@gmail.com>
|
||||
Burke Libbey <burke@libbey.me>
|
||||
Byung Kang <byung.kang.ctr@amrdec.army.mil>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Calen Pennington <cale@edx.org>
|
||||
Cameron Boehmer <cameron.boehmer@gmail.com>
|
||||
Carl X. Su <bcbcarl@gmail.com>
|
||||
Charles Hooper <charles.hooper@dotcloud.com>
|
||||
Charles Lindsay <chaz@chazomatic.us>
|
||||
Charles Merriam <charles.merriam@gmail.com>
|
||||
Charlie Lewis <charliel@lab41.org>
|
||||
Chen Chao <cc272309126@gmail.com>
|
||||
Chewey <prosto-chewey@users.noreply.github.com>
|
||||
Chia-liang Kao <clkao@clkao.org>
|
||||
Chris Alfonso <calfonso@redhat.com>
|
||||
Chris Armstrong <chris@opdemand.com>
|
||||
Chris Snow <chsnow123@gmail.com>
|
||||
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
||||
chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||
Christian Berendt <berendt@b1-systems.de>
|
||||
Christian Stefanescu <st.chris@gmail.com>
|
||||
ChristoperBiscardi <biscarch@sketcht.com>
|
||||
Christophe Troestler <christophe.Troestler@umons.ac.be>
|
||||
Christopher Currie <codemonkey+github@gmail.com>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Christopher Rigor <crigor@gmail.com>
|
||||
Chun Chen <chenchun.feed@gmail.com>
|
||||
Ciro S. Costa <ciro.costa@usp.br>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Colin Dunklau <colin.dunklau@gmail.com>
|
||||
Colin Rice <colin@daedrum.net>
|
||||
Colin Walters <walters@verbum.org>
|
||||
Cory Forsyth <cory.forsyth@gmail.com>
|
||||
cressie176 <github@stephen-cresswell.net>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
||||
Daan van Berkel <daan.v.berkel.1980@gmail.com>
|
||||
Daehyeok Mun <daehyeok@gmail.com>
|
||||
Dafydd Crosby <dtcrsby@gmail.com>
|
||||
Dan Buch <d.buch@modcloth.com>
|
||||
Dan Cotora <dan@bluevision.ro>
|
||||
Dan Griffin <dgriffin@peer1.com>
|
||||
Dan Hirsch <thequux@upstandinghackers.com>
|
||||
Dan Keder <dan.keder@gmail.com>
|
||||
Dan McPherson <dmcphers@redhat.com>
|
||||
Dan Stine <sw@stinemail.com>
|
||||
Dan Walsh <dwalsh@redhat.com>
|
||||
Dan Williams <me@deedubs.com>
|
||||
Daniel Exner <dex@dragonslave.de>
|
||||
Daniel Farrell <dfarrell@redhat.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Menet <membership@sontags.ch>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
|
||||
Daniel Norberg <dano@spotify.com>
|
||||
Daniel Nordberg <dnordberg@gmail.com>
|
||||
Daniel Robinson <gottagetmac@gmail.com>
|
||||
Daniel Von Fange <daniel@leancoder.com>
|
||||
Daniel YC Lin <dlin.tw@gmail.com>
|
||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
||||
Danny Berger <dpb587@gmail.com>
|
||||
Danny Yates <danny@codeaholics.org>
|
||||
Darren Coxall <darren@darrencoxall.com>
|
||||
Darren Shepherd <darren.s.shepherd@gmail.com>
|
||||
David Anderson <dave@natulte.net>
|
||||
David Calavera <david.calavera@gmail.com>
|
||||
David Corking <dmc-source@dcorking.com>
|
||||
David Gageot <david@gageot.net>
|
||||
David Gebler <davidgebler@gmail.com>
|
||||
David Mat <david@davidmat.com>
|
||||
David Mcanulty <github@hellspark.com>
|
||||
David Pelaez <pelaez89@gmail.com>
|
||||
David Röthlisberger <david@rothlis.net>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
Davide Ceretti <davide.ceretti@hogarthww.com>
|
||||
Dawn Chen <dawnchen@google.com>
|
||||
decadent <decadent@users.noreply.github.com>
|
||||
Deni Bertovic <deni@kset.org>
|
||||
Derek <crq@kernel.org>
|
||||
Derek <crquan@gmail.com>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Deric Crago <deric.crago@gmail.com>
|
||||
Deshi Xiao <dxiao@redhat.com>
|
||||
Dinesh Subhraveti <dineshs@altiscale.com>
|
||||
Djibril Koné <kone.djibril@gmail.com>
|
||||
dkumor <daniel@dkumor.com>
|
||||
Dmitry Demeshchuk <demeshchuk@gmail.com>
|
||||
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||
Dolph Mathews <dolph.mathews@gmail.com>
|
||||
Dominik Honnef <dominik@honnef.co>
|
||||
Don Kjer <don.kjer@gmail.com>
|
||||
Don Spaulding <donspauldingii@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
doug tangren <d.tangren@gmail.com>
|
||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||
dragon788 <dragon788@users.noreply.github.com>
|
||||
Dražen Lučanin <kermit666@gmail.com>
|
||||
Dustin Sallings <dustin@spy.net>
|
||||
Edmund Wagner <edmund-wagner@web.de>
|
||||
Eiichi Tsukata <devel@etsukata.com>
|
||||
Eike Herzbach <eike@herzbach.net>
|
||||
Eivind Uggedal <eivind@uggedal.com>
|
||||
Elias Probst <mail@eliasprobst.eu>
|
||||
Emil Hernvall <emil@quench.at>
|
||||
Emily Maier <emily@emilymaier.net>
|
||||
Emily Rose <emily@contactvibe.com>
|
||||
Eric Hanchrow <ehanchrow@ine.com>
|
||||
Eric Lee <thenorthsecedes@gmail.com>
|
||||
Eric Myhre <hash@exultant.us>
|
||||
Eric Paris <eparis@redhat.com>
|
||||
Eric Windisch <ewindisch@docker.com>
|
||||
Erik Dubbelboer <erik@dubbelboer.com>
|
||||
Erik Hollensbe <github@hollensbe.org>
|
||||
Erik Inge Bolsø <knan@redpill-linpro.com>
|
||||
Erik Kristensen <erik@erikkristensen.com>
|
||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||
Erwin van der Koogh <info@erronis.nl>
|
||||
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
||||
eugenkrizo <eugen.krizo@gmail.com>
|
||||
Evan Carmi <carmi@users.noreply.github.com>
|
||||
Evan Hazlett <ejhazlett@gmail.com>
|
||||
Evan Krall <krall@yelp.com>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Evan Wies <evan@neomantra.net>
|
||||
Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
|
||||
ezbercih <cem.ezberci@gmail.com>
|
||||
Fabio Falci <fabiofalci@gmail.com>
|
||||
Fabio Rehm <fgrehm@gmail.com>
|
||||
Fabrizio Regini <freegenie@gmail.com>
|
||||
Faiz Khan <faizkhan00@gmail.com>
|
||||
Fareed Dudhia <fareeddudhia@googlemail.com>
|
||||
Felix Rabe <felix@rabe.io>
|
||||
Fernando <fermayo@gmail.com>
|
||||
Filipe Brandenburger <filbranden@google.com>
|
||||
Flavio Castelli <fcastelli@suse.com>
|
||||
FLGMwt <ryan.stelly@live.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Frank Macreery <frank@macreery.com>
|
||||
Frank Rosquin <frank.rosquin+github@gmail.com>
|
||||
Fred Lifton <fred.lifton@docker.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
Frederik Loeffert <frederik@zitrusmedia.de>
|
||||
Freek Kalter <freek@kalteronline.org>
|
||||
Gabe Rosenhouse <gabe@missionst.com>
|
||||
Gabor Nagy <mail@aigeruth.hu>
|
||||
Gabriel Monroy <gabriel@opdemand.com>
|
||||
Galen Sampson <galen.sampson@gmail.com>
|
||||
Gareth Rushgrove <gareth@morethanseven.net>
|
||||
gautam, prasanna <prasannagautam@gmail.com>
|
||||
Geoffrey Bachelet <grosfrais@gmail.com>
|
||||
George Xie <georgexsh@gmail.com>
|
||||
Gereon Frey <gereon.frey@dynport.de>
|
||||
German DZ <germ@ndz.com.ar>
|
||||
Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
|
||||
Giuseppe Mazzotta <gdm85@users.noreply.github.com>
|
||||
Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
|
||||
Gleb M Borisov <borisov.gleb@gmail.com>
|
||||
Glyn Normington <gnormington@gopivotal.com>
|
||||
Goffert van Gool <goffert@phusion.nl>
|
||||
golubbe <ben.golub@dotcloud.com>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Thornton <xdissent@me.com>
|
||||
grunny <mwgrunny@gmail.com>
|
||||
Guilherme Salgado <gsalgado@gmail.com>
|
||||
Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com>
|
||||
Gurjeet Singh <gurjeet@singh.im>
|
||||
Guruprasad <lgp171188@gmail.com>
|
||||
Hans Rødtang <hansrodtang@gmail.com>
|
||||
Harald Albers <github@albersweb.de>
|
||||
Harley Laue <losinggeneration@gmail.com>
|
||||
Hector Castro <hectcastro@gmail.com>
|
||||
Henning Sprang <henning.sprang@gmail.com>
|
||||
Hobofan <goisser94@gmail.com>
|
||||
Hollie Teal <hollie@docker.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hu Tao <hutao@cn.fujitsu.com>
|
||||
Huayi Zhang <irachex@gmail.com>
|
||||
Hugo Duncan <hugo@hugoduncan.org>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
|
||||
Hunter Blanks <hunter@twilio.com>
|
||||
Huu Nguyen <huu@prismskylabs.com>
|
||||
hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
|
||||
Ian Babrou <ibobrik@gmail.com>
|
||||
Ian Bishop <ianbishop@pace7.com>
|
||||
Ian Bull <irbull@gmail.com>
|
||||
Ian Main <imain@redhat.com>
|
||||
Ian Truslove <ian.truslove@gmail.com>
|
||||
Igor Dolzhikov <bluesriverz@gmail.com>
|
||||
ILYA Khlopotov <ilya.khlopotov@gmail.com>
|
||||
inglesp <peter.inglesby@gmail.com>
|
||||
Isaac Dupree <antispam@idupree.com>
|
||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||
Isao Jonas <isao.jonas@gmail.com>
|
||||
Ivan Fraixedes <ifcdev@gmail.com>
|
||||
Jack Danger Canty <jackdanger@squareup.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk>
|
||||
Jacob Edelman <edelman.jd@gmail.com>
|
||||
Jake Moshenko <jake@devtable.com>
|
||||
jakedt <jake@devtable.com>
|
||||
James Allen <jamesallen0108@gmail.com>
|
||||
James Carr <james.r.carr@gmail.com>
|
||||
James DeFelice <james.defelice@ishisystems.com>
|
||||
James Harrison Fisher <jameshfisher@gmail.com>
|
||||
James Kyle <james@jameskyle.org>
|
||||
James Mills <prologic@shortcircuit.net.au>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
Jan Keromnes <janx@linux.com>
|
||||
Jan Pazdziora <jpazdziora@redhat.com>
|
||||
Jan Toebes <jan@toebes.info>
|
||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||
jaseg <jaseg@jaseg.net>
|
||||
Jason Giedymin <jasong@apache.org>
|
||||
Jason Hall <imjasonh@gmail.com>
|
||||
Jason Livesay <ithkuil@gmail.com>
|
||||
Jason McVetta <jason.mcvetta@gmail.com>
|
||||
Jason Plum <jplum@devonit.com>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
Jean-Paul Calderone <exarkun@twistedmatrix.com>
|
||||
Jean-Tiare Le Bigot <jt@yadutaf.fr>
|
||||
Jeff Anderson <jeff@docker.com>
|
||||
Jeff Lindsay <progrium@gmail.com>
|
||||
Jeff Welch <whatthejeff@gmail.com>
|
||||
Jeffrey Bolle <jeffreybolle@gmail.com>
|
||||
Jeremy Grosser <jeremy@synack.me>
|
||||
Jesse Dubay <jesse@thefortytwo.net>
|
||||
Jessica Frazelle <jess@docker.com>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||
Jim Alateras <jima@comware.com.au>
|
||||
Jim Perrin <jperrin@centos.org>
|
||||
Jimmy Cuadra <jimmy@jimmycuadra.com>
|
||||
Jiří Župka <jzupka@redhat.com>
|
||||
Joe Beda <joe.github@bedafamily.com>
|
||||
Joe Ferguson <joe@infosiftr.com>
|
||||
Joe Shaw <joe@joeshaw.org>
|
||||
Joe Van Dyk <joe@tanga.com>
|
||||
Joel Friedly <joelfriedly@gmail.com>
|
||||
Joel Handwell <joelhandwell@gmail.com>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Johan Rydberg <johan.rydberg@gmail.com>
|
||||
Johannes 'fish' Ziemke <github@freigeist.org>
|
||||
Joffrey F <joffrey@dotcloud.com>
|
||||
John Costa <john.costa@gmail.com>
|
||||
John Feminella <jxf@jxf.me>
|
||||
John Gardiner Myers <jgmyers@proofpoint.com>
|
||||
John Gossman <johngos@microsoft.com>
|
||||
John OBrien III <jobrieniii@yahoo.com>
|
||||
John Warwick <jwarwick@gmail.com>
|
||||
Jon Wedaman <jweede@gmail.com>
|
||||
Jonas Pfenniger <jonas@pfenniger.name>
|
||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jonathan Camp <jonathan@irondojo.com>
|
||||
Jonathan McCrohan <jmccrohan@gmail.com>
|
||||
Jonathan Mueller <j.mueller@apoveda.ch>
|
||||
Jonathan Pares <jonathanpa@users.noreply.github.com>
|
||||
Jonathan Rudenberg <jonathan@titanous.com>
|
||||
Joost Cassee <joost@cassee.net>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Jordan Sissel <jls@semicomplete.com>
|
||||
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
|
||||
Joseph Hager <ajhager@gmail.com>
|
||||
Josh <jokajak@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
Josiah Kiehl <jkiehl@riotgames.com>
|
||||
JP <jpellerin@leapfrogonline.com>
|
||||
Julian Taylor <jtaylor.debian@googlemail.com>
|
||||
Julien Barbier <write0@gmail.com>
|
||||
Julien Bordellier <julienbordellier@gmail.com>
|
||||
Julien Dubois <julien.dubois@gmail.com>
|
||||
Justin Force <justin.force@gmail.com>
|
||||
Justin Plock <jplock@users.noreply.github.com>
|
||||
Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
|
||||
Jörg Thalheim <joerg@higgsboson.tk>
|
||||
Kamil Domanski <kamil@domanski.co>
|
||||
Karan Lyons <karan@karanlyons.com>
|
||||
Karl Grzeszczak <karlgrz@gmail.com>
|
||||
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
||||
Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
|
||||
Keli Hu <dev@keli.hu>
|
||||
Ken Cochrane <kencochrane@gmail.com>
|
||||
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
|
||||
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
|
||||
Kevin Clark <kevin.clark@gmail.com>
|
||||
Kevin J. Lynagh <kevin@keminglabs.com>
|
||||
Kevin Menard <kevin@nirvdrum.com>
|
||||
Kevin Wallace <kevin@pentabarf.net>
|
||||
Keyvan Fatehi <keyvanfatehi@gmail.com>
|
||||
kies <lleelm@gmail.com>
|
||||
Kim BKC Carlbacker <kim.carlbacker@gmail.com>
|
||||
Kimbro Staken <kstaken@kstaken.com>
|
||||
Kiran Gangadharan <kiran.daredevil@gmail.com>
|
||||
knappe <tyler.knappe@gmail.com>
|
||||
Kohei Tsuruta <coheyxyz@gmail.com>
|
||||
Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
|
||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
Krasimir Georgiev <support@vip-consult.co.uk>
|
||||
krrg <krrgithub@gmail.com>
|
||||
Kyle Conroy <kyle.j.conroy@gmail.com>
|
||||
kyu <leehk1227@gmail.com>
|
||||
Lachlan Coote <lcoote@vmware.com>
|
||||
Lajos Papp <lajos.papp@sequenceiq.com>
|
||||
Lakshan Perera <lakshan@laktek.com>
|
||||
lalyos <lalyos@yahoo.com>
|
||||
Lance Chen <cyen0312@gmail.com>
|
||||
Lars R. Damerow <lars@pixar.com>
|
||||
Laurie Voss <github@seldo.com>
|
||||
leeplay <hyeongkyu.lee@navercorp.com>
|
||||
Lei Jitang <leijitang@huawei.com>
|
||||
Len Weincier <len@cloudafrica.net>
|
||||
Leszek Kowalski <github@leszekkowalski.pl>
|
||||
Levi Gross <levi@levigross.com>
|
||||
Lewis Marshall <lewis@lmars.net>
|
||||
Lewis Peckover <lew+github@lew.io>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
limsy <seongyeol37@gmail.com>
|
||||
Lokesh Mandvekar <lsm5@fedoraproject.org>
|
||||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
lukaspustina <lukas.pustina@centerdevice.com>
|
||||
lukemarsden <luke@digital-crocus.com>
|
||||
Lénaïc Huard <lhuard@amadeus.com>
|
||||
Madhu Venugopal <madhu@socketplane.io>
|
||||
Mahesh Tiyyagura <tmahesh@gmail.com>
|
||||
Malte Janduda <mail@janduda.net>
|
||||
Manfred Zabarauskas <manfredas@zabarauskas.com>
|
||||
Manuel Meurer <manuel@krautcomputing.com>
|
||||
Manuel Woelker <github@manuel.woelker.org>
|
||||
Marc Abramowitz <marc@marc-abramowitz.com>
|
||||
Marc Kuo <kuomarc2@gmail.com>
|
||||
Marc Tamsky <mtamsky@gmail.com>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marcus Farkas <toothlessgear@finitebox.com>
|
||||
Marcus Linke <marcus.linke@gmx.de>
|
||||
Marcus Ramberg <marcus@nordaaker.com>
|
||||
Marek Goldmann <marek.goldmann@gmail.com>
|
||||
Marianna <mtesselh@gmail.com>
|
||||
Marius Voila <marius.voila@gmail.com>
|
||||
Mark Allen <mrallen1@yahoo.com>
|
||||
Mark McGranaghan <mmcgrana@gmail.com>
|
||||
Marko Mikulicic <mmikulicic@gmail.com>
|
||||
Marko Tibold <marko@tibold.nl>
|
||||
Markus Fix <lispmeister@gmail.com>
|
||||
Martijn van Oosterhout <kleptog@svana.org>
|
||||
Martin Honermeyer <maze@strahlungsfrei.de>
|
||||
Martin Redmond <martin@tinychat.com>
|
||||
Mary Anthony <moxieandmore@gmail.com>
|
||||
Mason Malone <mason.malone@gmail.com>
|
||||
Mateusz Sulima <sulima.mateusz@gmail.com>
|
||||
Mathias Monnerville <mathias@monnerville.com>
|
||||
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
|
||||
Matt Apperson <me@mattapperson.com>
|
||||
Matt Bachmann <bachmann.matt@gmail.com>
|
||||
Matt Haggard <haggardii@gmail.com>
|
||||
Matthew Heon <mheon@redhat.com>
|
||||
Matthew Mueller <mattmuelle@gmail.com>
|
||||
Matthew Riley <mattdr@google.com>
|
||||
Matthias Klumpp <matthias@tenstral.net>
|
||||
Matthias Kühnle <git.nivoc@neverbox.com>
|
||||
mattymo <raytrac3r@gmail.com>
|
||||
mattyw <mattyw@me.com>
|
||||
Max Shytikov <mshytikov@gmail.com>
|
||||
Maxim Treskin <zerthurd@gmail.com>
|
||||
Maxime Petazzoni <max@signalfuse.com>
|
||||
meejah <meejah@meejah.ca>
|
||||
Mengdi Gao <usrgdd@gmail.com>
|
||||
Mert Yazıcıoğlu <merty@users.noreply.github.com>
|
||||
Michael Brown <michael@netdirect.ca>
|
||||
Michael Crosby <michael@docker.com>
|
||||
Michael Gorsuch <gorsuch@github.com>
|
||||
Michael Hudson-Doyle <michael.hudson@linaro.org>
|
||||
Michael Neale <michael.neale@gmail.com>
|
||||
Michael Prokop <github@michael-prokop.at>
|
||||
Michael Scharf <github@scharf.gr>
|
||||
Michael Stapelberg <michael+gh@stapelberg.de>
|
||||
Michael Steinert <mike.steinert@gmail.com>
|
||||
Michael Thies <michaelthies78@gmail.com>
|
||||
Michal Jemala <michal.jemala@gmail.com>
|
||||
Michal Minar <miminar@redhat.com>
|
||||
Michaël Pailloncy <mpapo.dev@gmail.com>
|
||||
Michiel@unhosted <michiel@unhosted.org>
|
||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||
Mike Chelen <michael.chelen@gmail.com>
|
||||
Mike Gaffney <mike@uberu.com>
|
||||
Mike MacCana <mike.maccana@gmail.com>
|
||||
Mike Naberezny <mike@naberezny.com>
|
||||
Mike Snitzer <snitzer@redhat.com>
|
||||
Mikhail Sobolev <mss@mawhrin.net>
|
||||
Mohit Soni <mosoni@ebay.com>
|
||||
Morgante Pell <morgante.pell@morgante.net>
|
||||
Morten Siebuhr <sbhr@sbhr.dk>
|
||||
Mrunal Patel <mrunalp@gmail.com>
|
||||
mschurenko <matt.schurenko@gmail.com>
|
||||
Mustafa Akın <mustafa91@gmail.com>
|
||||
Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
|
||||
Nan Monnand Deng <monnand@gmail.com>
|
||||
Naoki Orii <norii@cs.cmu.edu>
|
||||
Nate Eagleson <nate@nateeag.com>
|
||||
Nate Jones <nate@endot.org>
|
||||
Nathan Hsieh <hsieh.nathan@gmail.com>
|
||||
Nathan Kleyn <nathan@nathankleyn.com>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com>
|
||||
Neal McBurnett <neal@mcburnett.org>
|
||||
Nelson Chen <crazysim@gmail.com>
|
||||
Niall O'Higgins <niallo@unworkable.org>
|
||||
Nicholas E. Rabenau <nerab@gmx.at>
|
||||
Nick Payne <nick@kurai.co.uk>
|
||||
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
|
||||
Nick Stinemates <nick@stinemates.org>
|
||||
Nicolas De loof <nicolas.deloof@gmail.com>
|
||||
Nicolas Dudebout <nicolas.dudebout@gatech.edu>
|
||||
Nicolas Goy <kuon@goyman.com>
|
||||
Nicolas Kaiser <nikai@nikai.net>
|
||||
NikolaMandic <mn080202@gmail.com>
|
||||
noducks <onemannoducks@gmail.com>
|
||||
Nolan Darilek <nolan@thewordnerd.info>
|
||||
nzwsch <hi@nzwsch.com>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
OddBloke <daniel@daniel-watkins.co.uk>
|
||||
odk- <github@odkurzacz.org>
|
||||
Oguz Bilgic <fisyonet@gmail.com>
|
||||
Oh Jinkyun <tintypemolly@gmail.com>
|
||||
Ole Reifschneider <mail@ole-reifschneider.de>
|
||||
Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
pandrew <letters@paulnotcom.se>
|
||||
panticz <mail@konczalski.de>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
Pascal Hartig <phartig@rdrei.net>
|
||||
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
pattichen <craftsbear@gmail.com>
|
||||
Paul <paul9869@gmail.com>
|
||||
Paul Annesley <paul@annesley.cc>
|
||||
Paul Bowsher <pbowsher@globalpersonals.co.uk>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Paul Jimenez <pj@place.org>
|
||||
Paul Lietar <paul@lietar.net>
|
||||
Paul Morie <pmorie@gmail.com>
|
||||
Paul Nasrat <pnasrat@gmail.com>
|
||||
Paul Weaver <pauweave@cisco.com>
|
||||
Pavlos Ratis <dastergon@gentoo.org>
|
||||
Peter Bourgon <peter@bourgon.org>
|
||||
Peter Braden <peterbraden@peterbraden.co.uk>
|
||||
Peter Ericson <pdericson@gmail.com>
|
||||
Peter Salvatore <peter@psftw.com>
|
||||
Peter Waller <p@pwaller.net>
|
||||
Phil <underscorephil@gmail.com>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||
Phil Spitler <pspitler@gmail.com>
|
||||
Philipp Weissensteiner <mail@philippweissensteiner.com>
|
||||
Phillip Alexander <git@phillipalexander.io>
|
||||
Piergiuliano Bossi <pgbossi@gmail.com>
|
||||
Pierre <py@poujade.org>
|
||||
Pierre Wacrenier <pierre.wacrenier@gmail.com>
|
||||
Pierre-Alain RIVIERE <pariviere@ippon.fr>
|
||||
Piotr Bogdan <ppbogdan@gmail.com>
|
||||
pixelistik <pixelistik@users.noreply.github.com>
|
||||
Porjo <porjo38@yahoo.com.au>
|
||||
Prasanna Gautam <prasannagautam@gmail.com>
|
||||
Przemek Hejman <przemyslaw.hejman@gmail.com>
|
||||
pysqz <randomq@126.com>
|
||||
Qiang Huang <h.huangqiang@huawei.com>
|
||||
Quentin Brossard <qbrossard@gmail.com>
|
||||
r0n22 <cameron.regan@gmail.com>
|
||||
Rafal Jeczalik <rjeczalik@gmail.com>
|
||||
Rafe Colton <rafael.colton@gmail.com>
|
||||
Rajat Pandit <rp@rajatpandit.com>
|
||||
Rajdeep Dua <dua_rajdeep@yahoo.com>
|
||||
Ralph Bean <rbean@redhat.com>
|
||||
Ramkumar Ramachandra <artagnon@gmail.com>
|
||||
Ramon van Alteren <ramon@vanalteren.nl>
|
||||
Recursive Madman <recursive.madman@gmx.de>
|
||||
Remi Rampin <remirampin@gmail.com>
|
||||
Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
|
||||
rgstephens <greg@udon.org>
|
||||
Rhys Hiltner <rhys@twitch.tv>
|
||||
Richard Harvey <richard@squarecows.com>
|
||||
Richard Metzler <richard@paadee.com>
|
||||
Richo Healey <richo@psych0tik.net>
|
||||
Rick Bradley <rick@users.noreply.github.com>
|
||||
Rick van de Loo <rickvandeloo@gmail.com>
|
||||
Robert Bachmann <rb@robertbachmann.at>
|
||||
Robert Bittle <guywithnose@gmail.com>
|
||||
Robert Obryk <robryk@gmail.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
||||
Robin Speekenbrink <robin@kingsquare.nl>
|
||||
robpc <rpcann@gmail.com>
|
||||
Rodrigo Vaz <rodrigo.vaz@gmail.com>
|
||||
Roel Van Nyen <roel.vannyen@gmail.com>
|
||||
Roger Peppe <rogpeppe@gmail.com>
|
||||
Rohit Jnagal <jnagal@google.com>
|
||||
Roland Huß <roland@jolokia.org>
|
||||
Roland Moriz <rmoriz@users.noreply.github.com>
|
||||
Ron Smits <ron.smits@gmail.com>
|
||||
Rovanion Luckey <rovanion.luckey@gmail.com>
|
||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||
Ryan Anderson <anderson.ryanc@gmail.com>
|
||||
Ryan Aslett <github@mixologic.com>
|
||||
Ryan Detzel <ryan.detzel@gmail.com>
|
||||
Ryan Fowler <rwfowler@gmail.com>
|
||||
Ryan O'Donnell <odonnellryanc@gmail.com>
|
||||
Ryan Seto <ryanseto@yak.net>
|
||||
Ryan Thomas <rthomas@atlassian.com>
|
||||
Rémy Greinhofer <remy.greinhofer@livelovely.com>
|
||||
Sam Alba <sam.alba@gmail.com>
|
||||
Sam Bailey <cyprix@cyprix.com.au>
|
||||
Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
|
||||
Sam Reis <sreis@atlassian.com>
|
||||
Sam Rijs <srijs@airpost.net>
|
||||
Sami Wagiaalla <swagiaal@redhat.com>
|
||||
Samuel Andaya <samuel@andaya.net>
|
||||
Samuel PHAN <samuel-phan@users.noreply.github.com>
|
||||
Satnam Singh <satnam@raintown.org>
|
||||
satoru <satorulogic@gmail.com>
|
||||
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
|
||||
Scott Bessler <scottbessler@gmail.com>
|
||||
Scott Collier <emailscottcollier@gmail.com>
|
||||
Scott Johnston <scott@docker.com>
|
||||
Scott Stamp <scottstamp851@gmail.com>
|
||||
Scott Walls <sawalls@umich.edu>
|
||||
Sean Cronin <seancron@gmail.com>
|
||||
Sean P. Kane <skane@newrelic.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
|
||||
SeongJae Park <sj38.park@gmail.com>
|
||||
Shane Canon <scanon@lbl.gov>
|
||||
shaunol <shaunol@gmail.com>
|
||||
Shawn Landden <shawn@churchofgit.com>
|
||||
Shawn Siefkas <shawn.siefkas@meredith.com>
|
||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||
shuai-z <zs.broccoli@gmail.com>
|
||||
Silas Sewell <silas@sewell.org>
|
||||
Simon Taranto <simon.taranto@gmail.com>
|
||||
Sindhu S <sindhus@live.in>
|
||||
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
|
||||
Solomon Hykes <solomon@docker.com>
|
||||
Song Gao <song@gao.io>
|
||||
Soulou <leo@unbekandt.eu>
|
||||
soulshake <amy@gandi.net>
|
||||
Sridatta Thatipamala <sthatipamala@gmail.com>
|
||||
Solomon Hykes <solomon@dotcloud.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Srini Brahmaroutu <sbrahma@us.ibm.com>
|
||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||
Steeve Morin <steeve.morin@gmail.com>
|
||||
Stefan Praszalowicz <stefan@greplin.com>
|
||||
Stephen Crosby <stevecrozz@gmail.com>
|
||||
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||
Steven Merrill <steven.merrill@gmail.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Bellemare <sylvain.bellemare@ezeep.com>
|
||||
Sébastien <sebastien@yoozio.com>
|
||||
Sébastien Luttringer <seblu@seblu.net>
|
||||
Sébastien Stormacq <sebsto@users.noreply.github.com>
|
||||
tang0th <tang0th@gmx.com>
|
||||
Tatsuki Sugiura <sugi@nemui.org>
|
||||
Tatsushi Inagaki <e29253@jp.ibm.com>
|
||||
Ted M. Young <tedyoung@gmail.com>
|
||||
Tehmasp Chaudhri <tehmasp@gmail.com>
|
||||
Tejesh Mehta <tejesh.mehta@gmail.com>
|
||||
Thatcher Peskens <thatcher@docker.com>
|
||||
Thermionix <bond711@gmail.com>
|
||||
Thijs Terlouw <thijsterlouw@gmail.com>
|
||||
Thomas Bikeev <thomas.bikeev@mac.com>
|
||||
Thomas Frössman <thomasf@jossystem.se>
|
||||
Thomas Hansen <thomas.hansen@gmail.com>
|
||||
Thomas LEVEIL <thomasleveil@gmail.com>
|
||||
Thomas Orozco <thomas@orozco.fr>
|
||||
Thomas Schroeter <thomas@cliqz.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tim Bosse <taim@bosboot.org>
|
||||
Tim Hockin <thockin@google.com>
|
||||
Tim Ruffles <oi@truffles.me.uk>
|
||||
Tim Smith <timbot@google.com>
|
||||
Thatcher Peskens <thatcher@dotcloud.com>
|
||||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||
tjwebb123 <tjwebb123@users.noreply.github.com>
|
||||
tobe <tobegit3hub@gmail.com>
|
||||
Tobias Bieniek <Tobias.Bieniek@gmx.de>
|
||||
Tobias Gesellchen <tobias@gesellix.de>
|
||||
Tobias Schmidt <ts@soundcloud.com>
|
||||
Tobias Schwab <tobias.schwab@dynport.de>
|
||||
Todd Lunter <tlunter@gmail.com>
|
||||
Tom Fotherby <tom+github@peopleperhour.com>
|
||||
Tom Hulihan <hulihan.tom159@gmail.com>
|
||||
Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
|
||||
Tomas Tomecek <ttomecek@redhat.com>
|
||||
Tomasz Lipinski <tlipinski@users.noreply.github.com>
|
||||
Tomasz Nurkiewicz <nurkiewicz@gmail.com>
|
||||
Tommaso Visconti <tommaso.visconti@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Tony Daws <tony@daws.ca>
|
||||
Torstein Husebø <torstein@huseboe.net>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Trent Ogren <tedwardo2@gmail.com>
|
||||
Tyler Brock <tyler.brock@gmail.com>
|
||||
Tzu-Jung Lee <roylee17@gmail.com>
|
||||
Ulysse Carion <ulyssecarion@gmail.com>
|
||||
unknown <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||
Vaidas Jablonskis <jablonskis@gmail.com>
|
||||
vgeta <gopikannan.venugopalsamy@gmail.com>
|
||||
Victor Coisne <victor.coisne@dotcloud.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Victor Marmol <vmarmol@google.com>
|
||||
Victor Vieux <victor.vieux@docker.com>
|
||||
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Bernat <bernat@luffy.cx>
|
||||
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
Vincent Mayers <vincent.mayers@inbloom.org>
|
||||
Vincent Woo <me@vincentwoo.com>
|
||||
Vinod Kulkarni <vinod.kulkarni@gmail.com>
|
||||
Vishal Doshi <vishal.doshi@gmail.com>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
Vitor Monteiro <vmrmonteiro@gmail.com>
|
||||
Troy Howard <thoward37@gmail.com>
|
||||
unclejack <unclejacksons@gmail.com>
|
||||
Victor Vieux <victor.vieux@dotcloud.com>
|
||||
Vivek Agarwal <me@vivek.im>
|
||||
Vivek Dasgupta <vdasgupt@redhat.com>
|
||||
Vivek Goyal <vgoyal@redhat.com>
|
||||
Vladimir Bulyga <xx@ccxx.cc>
|
||||
Vladimir Kirillov <proger@wilab.org.ua>
|
||||
Vladimir Rutsky <altsysrq@gmail.com>
|
||||
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
||||
waitingkuo <waitingkuo0527@gmail.com>
|
||||
Walter Leibbrandt <github@wrl.co.za>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
Ward Vandewege <ward@jhvc.com>
|
||||
WarheadsSE <max@warheads.net>
|
||||
Wayne Chang <wayne@neverfear.org>
|
||||
Wes Morgan <cap10morgan@gmail.com>
|
||||
Will Dietz <w@wdtz.org>
|
||||
Will Rouesnel <w.rouesnel@gmail.com>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
William Delanoue <william.delanoue@gmail.com>
|
||||
William Henry <whenry@redhat.com>
|
||||
William Riancho <wr.wllm@gmail.com>
|
||||
William Thurston <thurstw@amazon.com>
|
||||
Xiuming Chen <cc@cxm.cc>
|
||||
xuzhaokui <cynicholas@gmail.com>
|
||||
Yang Bai <hamo.by@gmail.com>
|
||||
Yasunori Mahata <nori@mahata.net>
|
||||
Yohei Ueda <yohei@jp.ibm.com>
|
||||
Yurii Rashkovskii <yrashk@gmail.com>
|
||||
Zac Dover <zdover@redhat.com>
|
||||
Zach Borboa <zachborboa@gmail.com>
|
||||
Zain Memon <zain@inzain.net>
|
||||
Zaiste! <oh@zaiste.net>
|
||||
Zane DeGraffenried <zane.deg@gmail.com>
|
||||
Zilin Du <zilin.du@gmail.com>
|
||||
zimbatm <zimbatm@zimbatm.com>
|
||||
Zoltan Tombol <zoltan.tombol@gmail.com>
|
||||
zqh <zqhxuyuan@gmail.com>
|
||||
Álex González <agonzalezro@gmail.com>
|
||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||
尹吉峰 <jifeng.yin@gmail.com>
|
||||
|
||||
1702
CHANGELOG.md
1702
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
422
CONTRIBUTING.md
422
CONTRIBUTING.md
@@ -1,387 +1,93 @@
|
||||
# Contributing to Docker
|
||||
|
||||
Want to hack on Docker? Awesome! We have a contributor's guide that explains
|
||||
[setting up a Docker development environment and the contribution
|
||||
process](https://docs.docker.com/project/who-written-for/).
|
||||
Want to hack on Docker? Awesome! There are instructions to get you
|
||||
started on the website: http://docker.io/gettingstarted.html
|
||||
|
||||

|
||||
They are probably not perfect, please let us know if anything feels
|
||||
wrong or incomplete.
|
||||
|
||||
This page contains information about reporting issues as well as some tips and
|
||||
guidelines useful to experienced open source contributors. Finally, make sure
|
||||
you read our [community guidelines](#docker-community-guidelines) before you
|
||||
start participating.
|
||||
## Contribution guidelines
|
||||
|
||||
## Topics
|
||||
### Pull requests are always welcome
|
||||
|
||||
* [Reporting Security Issues](#reporting-security-issues)
|
||||
* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
|
||||
* [Reporting Issues](#reporting-other-issues)
|
||||
* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines)
|
||||
* [Community Guidelines](#docker-community-guidelines)
|
||||
We are always thrilled to receive pull requests, and do our best to
|
||||
process them as fast as possible. Not sure if that typo is worth a pull
|
||||
request? Do it! We will appreciate it.
|
||||
|
||||
## Reporting security issues
|
||||
If your pull request is not accepted on the first try, don't be
|
||||
discouraged! If there's a problem with the implementation, hopefully you
|
||||
received feedback on what to improve.
|
||||
|
||||
The Docker maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
We're trying very hard to keep Docker lean and focused. We don't want it
|
||||
to do everything for everybody. This means that we might decide against
|
||||
incorporating a new feature. However, there might be a way to implement
|
||||
that feature *on top of* docker.
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[security@docker.com](mailto:security@docker.com),
|
||||
### Discuss your design on the mailing list
|
||||
|
||||
Security reports are greatly appreciated and we will publicly thank you for it.
|
||||
We also like to send gifts—if you're into Docker schwag make sure to let
|
||||
us know We currently do not offer a paid security bounty program, but are not
|
||||
ruling it out in the future.
|
||||
We recommend discussing your plans [on the mailing
|
||||
list](https://groups.google.com/forum/?fromgroups#!forum/docker-club)
|
||||
before starting to code - especially for more ambitious contributions.
|
||||
This gives other contributors a chance to point you in the right
|
||||
direction, give feedback on your design, and maybe point out if someone
|
||||
else is working on the same thing.
|
||||
|
||||
### Create issues...
|
||||
|
||||
## Reporting other issues
|
||||
Any significant improvement should be documented as [a github
|
||||
issue](https://github.com/dotcloud/docker/issues) before anybody
|
||||
starts working on it.
|
||||
|
||||
A great way to contribute to the project is to send a detailed report when you
|
||||
encounter an issue. We always appreciate a well-written, thorough bug report,
|
||||
and will thank you for it!
|
||||
|
||||
Check that [our issue database](https://github.com/docker/docker/issues)
|
||||
doesn't already include that problem or suggestion before submitting an issue.
|
||||
If you find a match, add a quick "+1" or "I have this problem too." Doing this
|
||||
helps prioritize the most common problems and requests.
|
||||
|
||||
When reporting issues, please include your host OS (Ubuntu 12.04, Fedora 19,
|
||||
etc). Please include:
|
||||
|
||||
* The output of `uname -a`.
|
||||
* The output of `docker version`.
|
||||
* The output of `docker -D info`.
|
||||
|
||||
Please also include the steps required to reproduce the problem if possible and
|
||||
applicable. This information will help us review and fix your issue faster.
|
||||
|
||||
**Issue Report Template**:
|
||||
|
||||
```
|
||||
Description of problem:
|
||||
|
||||
|
||||
`docker version`:
|
||||
|
||||
|
||||
`docker info`:
|
||||
|
||||
|
||||
`uname -a`:
|
||||
|
||||
|
||||
Environment details (AWS, VirtualBox, physical, etc.):
|
||||
|
||||
|
||||
How reproducible:
|
||||
|
||||
|
||||
Steps to Reproduce:
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
|
||||
Actual Results:
|
||||
|
||||
|
||||
Expected Results:
|
||||
|
||||
|
||||
Additional info:
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
##Quick contribution tips and guidelines
|
||||
|
||||
This section gives the experienced contributor some tips and guidelines.
|
||||
|
||||
###Pull requests are always welcome
|
||||
|
||||
Not sure if that typo is worth a pull request? Found a bug and know how to fix
|
||||
it? Do it! We will appreciate it. Any significant improvement should be
|
||||
documented as [a GitHub issue](https://github.com/docker/docker/issues) before
|
||||
anybody starts working on it.
|
||||
|
||||
We are always thrilled to receive pull requests. We do our best to process them
|
||||
quickly. If your pull request is not accepted on the first try,
|
||||
don't get discouraged! Our contributor's guide explains [the review process we
|
||||
use for simple changes](https://docs.docker.com/project/make-a-contribution/).
|
||||
|
||||
### Design and cleanup proposals
|
||||
|
||||
You can propose new designs for existing Docker features. You can also design
|
||||
entirely new features. We really appreciate contributors who want to refactor or
|
||||
otherwise cleanup our project. For information on making these types of
|
||||
contributions, see [the advanced contribution
|
||||
section](https://docs.docker.com/project/advanced-contributing/) in the
|
||||
contributors guide.
|
||||
|
||||
We try hard to keep Docker lean and focused. Docker can't do everything for
|
||||
everybody. This means that we might decide against incorporating a new feature.
|
||||
However, there might be a way to implement that feature *on top of* Docker.
|
||||
|
||||
### Talking to other Docker users and contributors
|
||||
|
||||
<table class="tg">
|
||||
<col width="45%">
|
||||
<col width="65%">
|
||||
<tr>
|
||||
<td>Internet Relay Chat (IRC)</th>
|
||||
<td>
|
||||
<p>
|
||||
IRC a direct line to our most knowledgeable Docker users; we have
|
||||
both the <code>#docker</code> and <code>#docker-dev</code> group on
|
||||
<strong>irc.freenode.net</strong>.
|
||||
IRC is a rich chat protocol but it can overwhelm new users. You can search
|
||||
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
|
||||
</p>
|
||||
Read our <a href="https://docs.docker.com/project/get-help/#irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Google Groups</td>
|
||||
<td>
|
||||
There are two groups.
|
||||
<a href="https://groups.google.com/forum/#!forum/docker-user" target="_blank">Docker-user</a>
|
||||
is for people using Docker containers.
|
||||
The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a>
|
||||
group is for contributors and other people contributing to the Docker
|
||||
project.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Twitter</td>
|
||||
<td>
|
||||
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
|
||||
to get updates on our products. You can also tweet us questions or just
|
||||
share blogs or stories.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Stack Overflow</td>
|
||||
<td>
|
||||
Stack Overflow has over 7000K Docker questions listed. We regularly
|
||||
monitor <a href="http://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
|
||||
and so do many other knowledgeable Docker users.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
### ...but check for existing issues first!
|
||||
|
||||
Please take a moment to check that an issue doesn't already exist
|
||||
documenting your bug report or improvement proposal. If it does, it
|
||||
never hurts to add a quick "+1" or "I have this problem too". This will
|
||||
help prioritize the most common problems and requests.
|
||||
|
||||
### Conventions
|
||||
|
||||
Fork the repository and make changes on your fork in a feature branch:
|
||||
Fork the repo and make changes on your fork in a feature branch:
|
||||
|
||||
- If it's a bug fix branch, name it XXXX-something where XXXX is the number of
|
||||
the issue.
|
||||
- If it's a feature branch, create an enhancement issue to announce
|
||||
your intentions, and name it XXXX-something where XXXX is the number of the
|
||||
issue.
|
||||
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
|
||||
issue
|
||||
- If it's a feature branch, create an enhancement issue to announce your
|
||||
intentions, and name it XXX-something where XXX is the number of the issue.
|
||||
|
||||
Submit unit tests for your changes. Go has a great test framework built in; use
|
||||
it! Take a look at existing tests for inspiration. [Run the full test
|
||||
suite](https://docs.docker.com/project/test-and-docs/) on your branch before
|
||||
submitting a pull request.
|
||||
Submit unit tests for your changes. Go has a great test framework built in; use
|
||||
it! Take a look at existing tests for inspiration. Run the full test suite on
|
||||
your branch before submitting a pull request.
|
||||
|
||||
Update the documentation when creating or modifying features. Test your
|
||||
documentation changes for clarity, concision, and correctness, as well as a
|
||||
clean documentation build. See our contributors guide for [our style
|
||||
guide](https://docs.docker.com/project/doc-style) and instructions on [building
|
||||
the documentation](https://docs.docker.com/project/test-and-docs/#build-and-test-the-documentation).
|
||||
Make sure you include relevant updates or additions to documentation when
|
||||
creating or modifying features.
|
||||
|
||||
Write clean code. Universally formatted code promotes ease of writing, reading,
|
||||
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
|
||||
committing your changes. Most editors have plug-ins that do this automatically.
|
||||
and maintenance. Always run `go fmt` before committing your changes. Most
|
||||
editors have plugins that do this automatically, and there's also a git
|
||||
pre-commit hook:
|
||||
|
||||
Pull request descriptions should be as clear as possible and include a reference
|
||||
to all the issues that they address.
|
||||
```
|
||||
curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
Commit messages must start with a capitalized and short summary (max. 50 chars)
|
||||
written in the imperative, followed by an optional, more detailed explanatory
|
||||
text which is separated from the summary by an empty line.
|
||||
Pull requests descriptions should be as clear as possible and include a
|
||||
reference to all the issues that they address.
|
||||
|
||||
Code review comments may be added to your pull request. Discuss, then make the
|
||||
suggested modifications and push additional commits to your feature branch. Post
|
||||
a comment after pushing. New commits show up in the pull request automatically,
|
||||
but the reviewers are notified only when you comment.
|
||||
suggested modifications and push additional commits to your feature branch. Be
|
||||
sure to post a comment after pushing. The new commits will show up in the pull
|
||||
request automatically, but the reviewers will not be notified unless you
|
||||
comment.
|
||||
|
||||
Pull requests must be cleanly rebased on top of master without multiple branches
|
||||
mixed into the PR.
|
||||
Before the pull request is merged, make sure that you squash your commits into
|
||||
logical units of work using `git rebase -i` and `git push -f`. After every
|
||||
commit the test suite should be passing. Include documentation changes in the
|
||||
same commit so that a revert would remove all traces of the feature or fix.
|
||||
|
||||
**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
|
||||
feature branch to update your pull request rather than `merge master`.
|
||||
|
||||
Before you make a pull request, squash your commits into logical units of work
|
||||
using `git rebase -i` and `git push -f`. A logical unit of work is a consistent
|
||||
set of patches that should be reviewed together: for example, upgrading the
|
||||
version of a vendored dependency and taking advantage of its now available new
|
||||
feature constitute two separate units of work. Implementing a new function and
|
||||
calling it in another file constitute a single logical unit of work. The very
|
||||
high majory of submissions should have a single commit, so if in doubt: squash
|
||||
down to one.
|
||||
|
||||
After every commit, [make sure the test suite passes]
|
||||
((https://docs.docker.com/project/test-and-docs/)). Include documentation
|
||||
changes in the same pull request so that a revert would remove all traces of
|
||||
the feature or fix.
|
||||
|
||||
Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that
|
||||
close an issue. Including references automatically closes the issue on a merge.
|
||||
|
||||
Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly
|
||||
from the Git history.
|
||||
|
||||
### Merge approval
|
||||
|
||||
Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to
|
||||
indicate acceptance.
|
||||
|
||||
A change requires LGTMs from an absolute majority of the maintainers of each
|
||||
component affected. For example, if a change affects `docs/` and `registry/`, it
|
||||
needs an absolute majority from the maintainers of `docs/` AND, separately, an
|
||||
absolute majority of the maintainers of `registry/`.
|
||||
|
||||
For more details, see the [MAINTAINERS](MAINTAINERS) page.
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still
|
||||
accepted, so there is no need to update outstanding pull requests to the new
|
||||
format right away, but please do adjust your processes for future contributions.
|
||||
|
||||
### How can I become a maintainer?
|
||||
|
||||
* Step 1: Learn the component inside out
|
||||
* Step 2: Make yourself useful by contributing code, bug fixes, support etc.
|
||||
* Step 3: Volunteer on the IRC channel (#docker at Freenode)
|
||||
* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev
|
||||
|
||||
Don't forget: being a maintainer is a time investment. Make sure you
|
||||
will have time to make yourself available. You don't have to be a
|
||||
maintainer to make a difference on the project!
|
||||
|
||||
### IRC meetings
|
||||
|
||||
There are two monthly meetings taking place on #docker-dev IRC to accomodate all
|
||||
timezones. Anybody can propose a topic for discussion prior to the meeting.
|
||||
|
||||
If you feel the conversation is going off-topic, feel free to point it out.
|
||||
|
||||
For the exact dates and times, have a look at [the irc-minutes
|
||||
repo](https://github.com/docker/irc-minutes). The minutes also contain all the
|
||||
notes from previous meetings.
|
||||
|
||||
## Docker community guidelines
|
||||
|
||||
We want to keep the Docker community awesome, growing and collaborative. We need
|
||||
your help to keep it that way. To help with this we've come up with some general
|
||||
guidelines for the community as a whole:
|
||||
|
||||
* Be nice: Be courteous, respectful and polite to fellow community members:
|
||||
no regional, racial, gender, or other abuse will be tolerated. We like
|
||||
nice people way better than mean ones!
|
||||
|
||||
* Encourage diversity and participation: Make everyone in our community feel
|
||||
welcome, regardless of their background and the extent of their
|
||||
contributions, and do everything possible to encourage participation in
|
||||
our community.
|
||||
|
||||
* Keep it legal: Basically, don't get us in trouble. Share only content that
|
||||
you own, do not share private or sensitive information, and don't break
|
||||
the law.
|
||||
|
||||
* Stay on topic: Make sure that you are posting to the correct channel and
|
||||
avoid off-topic discussions. Remember when you update an issue or respond
|
||||
to an email you are potentially sending to a large number of people. Please
|
||||
consider this before you update. Also remember that nobody likes spam.
|
||||
|
||||
### Guideline violations — 3 strikes method
|
||||
|
||||
The point of this section is not to find opportunities to punish people, but we
|
||||
do need a fair way to deal with people who are making our community suck.
|
||||
|
||||
1. First occurrence: We'll give you a friendly, but public reminder that the
|
||||
behavior is inappropriate according to our guidelines.
|
||||
|
||||
2. Second occurrence: We will send you a private message with a warning that
|
||||
any additional violations will result in removal from the community.
|
||||
|
||||
3. Third occurrence: Depending on the violation, we may need to delete or ban
|
||||
your account.
|
||||
|
||||
**Notes:**
|
||||
|
||||
* Obvious spammers are banned on first occurrence. If we don't do this, we'll
|
||||
have spam all over the place.
|
||||
|
||||
* Violations are forgiven after 6 months of good behavior, and we won't hold a
|
||||
grudge.
|
||||
|
||||
* People who commit minor infractions will get some education, rather than
|
||||
hammering them in the 3 strikes process.
|
||||
|
||||
* The rules apply equally to everyone in the community, no matter how much
|
||||
you've contributed.
|
||||
|
||||
* Extreme violations of a threatening, abusive, destructive or illegal nature
|
||||
will be addressed immediately and are not subject to 3 strikes or forgiveness.
|
||||
|
||||
* Contact abuse@docker.com to report abuse or appeal violations. In the case of
|
||||
appeals, we know that mistakes happen, and we'll work with you to come up with a
|
||||
fair solution if there has been a misunderstanding.
|
||||
Commits that fix or close an issue should include a reference like `Closes #XXX`
|
||||
or `Fixes #XXX`, which will automatically close the issue when merged.
|
||||
|
||||
Add your name to the AUTHORS file, but make sure the list is sorted and your
|
||||
name and email address match your git configuration. The AUTHORS file is
|
||||
regenerated occasionally from the git commit history, so a mismatch may result
|
||||
in your changes being overwritten.
|
||||
|
||||
171
Dockerfile
171
Dockerfile
@@ -1,171 +0,0 @@
|
||||
# This file describes the standard way to build Docker, using docker
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# # Assemble the full dev environment. This is slow the first time.
|
||||
# docker build -t docker .
|
||||
#
|
||||
# # Mount your source in an interactive container for quick testing:
|
||||
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
|
||||
#
|
||||
# # Run the test suite:
|
||||
# docker run --privileged docker hack/make.sh test
|
||||
#
|
||||
# # Publish a release:
|
||||
# docker run --privileged \
|
||||
# -e AWS_S3_BUCKET=baz \
|
||||
# -e AWS_ACCESS_KEY=foo \
|
||||
# -e AWS_SECRET_KEY=bar \
|
||||
# -e GPG_PASSPHRASE=gloubiboulga \
|
||||
# docker hack/release.sh
|
||||
#
|
||||
# Note: Apparmor used to mess with privileged mode, but this is no longer
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apparmor \
|
||||
aufs-tools \
|
||||
automake \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
curl \
|
||||
dpkg-sig \
|
||||
git \
|
||||
iptables \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libsqlite3-dev \
|
||||
mercurial \
|
||||
parallel \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-websocket \
|
||||
reprepro \
|
||||
ruby1.9.1 \
|
||||
ruby1.9.1-dev \
|
||||
s3cmd=1.1.0* \
|
||||
--no-install-recommends
|
||||
|
||||
# Get lvm2 source for compiling statically
|
||||
RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
|
||||
|
||||
# Compile and install lvm2
|
||||
RUN cd /usr/local/lvm2 \
|
||||
&& ./configure --enable-static_link \
|
||||
&& make device-mapper \
|
||||
&& make install_device-mapper
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
# Install lxc
|
||||
ENV LXC_VERSION 1.0.7
|
||||
RUN mkdir -p /usr/src/lxc \
|
||||
&& curl -sSL https://linuxcontainers.org/downloads/lxc/lxc-${LXC_VERSION}.tar.gz | tar -v -C /usr/src/lxc/ -xz --strip-components=1
|
||||
RUN cd /usr/src/lxc \
|
||||
&& ./configure \
|
||||
&& make \
|
||||
&& make install \
|
||||
&& ldconfig
|
||||
|
||||
# Install Go
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -sSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/local -xz \
|
||||
&& mkdir -p /go/bin
|
||||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
|
||||
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
|
||||
|
||||
# Compile Go for cross compilation
|
||||
ENV DOCKER_CROSSPLATFORMS \
|
||||
linux/386 linux/arm \
|
||||
darwin/amd64 darwin/386 \
|
||||
freebsd/amd64 freebsd/386 freebsd/arm \
|
||||
windows/amd64 windows/386
|
||||
|
||||
# (set an explicit GOARM of 5 for maximum compatibility)
|
||||
ENV GOARM 5
|
||||
RUN cd /usr/local/go/src \
|
||||
&& set -x \
|
||||
&& for platform in $DOCKER_CROSSPLATFORMS; do \
|
||||
GOOS=${platform%/*} \
|
||||
GOARCH=${platform##*/} \
|
||||
./make.bash --no-clean 2>&1; \
|
||||
done
|
||||
|
||||
# We still support compiling with older Go, so need to grab older "gofmt"
|
||||
ENV GOFMT_VERSION 1.3.3
|
||||
RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt
|
||||
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
RUN go get golang.org/x/tools/cmd/cover
|
||||
|
||||
# TODO replace FPM with some very minimal debhelper stuff
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
|
||||
|
||||
# Install registry
|
||||
ENV REGISTRY_COMMIT d957768537c5af40e4f4cd96871f7b2bde9e2923
|
||||
RUN set -x \
|
||||
&& git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \
|
||||
&& (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \
|
||||
&& GOPATH=/go/src/github.com/docker/distribution/Godeps/_workspace:/go \
|
||||
go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry
|
||||
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT 91985b239764fe54714fa0a93d52aa362357d251
|
||||
RUN git clone https://github.com/docker/docker-py.git /docker-py \
|
||||
&& cd /docker-py \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT
|
||||
|
||||
# Setup s3cmd config
|
||||
RUN { \
|
||||
echo '[default]'; \
|
||||
echo 'access_key=$AWS_ACCESS_KEY'; \
|
||||
echo 'secret_key=$AWS_SECRET_KEY'; \
|
||||
} > ~/.s3cfg
|
||||
|
||||
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||
RUN git config --global user.email 'docker-dummy@example.com'
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion
|
||||
|
||||
# Let us use a .bashrc file
|
||||
RUN ln -sfv $PWD/.bashrc ~/.bashrc
|
||||
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/
|
||||
RUN ./contrib/download-frozen-image.sh /docker-frozen-images \
|
||||
busybox:latest@4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 \
|
||||
hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5
|
||||
# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
|
||||
|
||||
# Install man page generator
|
||||
COPY vendor /go/src/github.com/docker/docker/vendor
|
||||
# (copy vendor/ because go-md2man needs golang.org/x/net)
|
||||
RUN set -x \
|
||||
&& git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
|
||||
&& git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday \
|
||||
&& go install -v github.com/cpuguy83/go-md2man
|
||||
|
||||
# install toml validator
|
||||
ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
|
||||
RUN set -x \
|
||||
&& git clone https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \
|
||||
&& (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT) \
|
||||
&& go install -v github.com/BurntSushi/toml/cmd/tomlv
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
@@ -1,34 +0,0 @@
|
||||
# docker build -t docker:simple -f Dockerfile.simple .
|
||||
# docker run --rm docker:simple hack/make.sh dynbinary
|
||||
# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit
|
||||
# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
FROM debian:jessie
|
||||
|
||||
# compile and runtime deps
|
||||
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
|
||||
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
btrfs-tools \
|
||||
curl \
|
||||
gcc \
|
||||
git \
|
||||
golang \
|
||||
libdevmapper-dev \
|
||||
libsqlite3-dev \
|
||||
\
|
||||
ca-certificates \
|
||||
e2fsprogs \
|
||||
iptables \
|
||||
procps \
|
||||
xz-utils \
|
||||
\
|
||||
aufs-tools \
|
||||
lxc \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV AUTO_GOPATH 1
|
||||
WORKDIR /usr/src/docker
|
||||
COPY . /usr/src/docker
|
||||
13
LICENSE
13
LICENSE
@@ -176,7 +176,18 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2015 Docker, Inc.
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
641
MAINTAINERS
641
MAINTAINERS
@@ -1,641 +0,0 @@
|
||||
# Docker maintainers file
|
||||
#
|
||||
# This file describes who runs the Docker project and how.
|
||||
# This is a living document - if you see something out of date or missing,
|
||||
# speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant
|
||||
# parser.
|
||||
|
||||
[Rules]
|
||||
|
||||
[Rules.maintainers]
|
||||
|
||||
title = "What is a maintainer?"
|
||||
|
||||
text = """
|
||||
There are different types of maintainers, with different responsibilities, but
|
||||
all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve the project.
|
||||
3) They spend that time doing whatever needs to be done, not necessarily what
|
||||
is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder to appreciate.
|
||||
It's easy to appreciate a really cool and technically advanced feature. It's harder
|
||||
to appreciate the absence of bugs, the slow but steady improvement in stability,
|
||||
or the reliability of a release process. But those things distinguish a good
|
||||
project from a great one.
|
||||
"""
|
||||
|
||||
[Rules.bdfl]
|
||||
|
||||
title = "The Benevolent dictator for life (BDFL)"
|
||||
|
||||
text = """
|
||||
Docker follows the timeless, highly efficient and totally unfair system
|
||||
known as [Benevolent dictator for
|
||||
life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with
|
||||
yours truly, Solomon Hykes, in the role of BDFL. This means that all
|
||||
decisions are made, by default, by Solomon. Since making every decision
|
||||
myself would be highly un-scalable, in practice decisions are spread
|
||||
across multiple maintainers.
|
||||
|
||||
Ideally, the BDFL role is like the Queen of England: awesome crown, but not
|
||||
an actual operational role day-to-day. The real job of a BDFL is to NEVER GO AWAY.
|
||||
Every other rule can change, perhaps drastically so, but the BDFL will always
|
||||
be there, preserving the philosophy and principles of the project, and keeping
|
||||
ultimate authority over its fate. This gives us great flexibility in experimenting
|
||||
with various governance models, knowing that we can always press the "reset" button
|
||||
without fear of fragmentation or deadlock. See the US congress for a counter-example.
|
||||
|
||||
BDFL daily routine:
|
||||
|
||||
* Is the project governance stuck in a deadlock or irreversibly fragmented?
|
||||
* If yes: refactor the project governance
|
||||
* Are there issues or conflicts escalated by core?
|
||||
* If yes: resolve them
|
||||
* Go back to polishing that crown.
|
||||
"""
|
||||
|
||||
[Rules.decisions]
|
||||
|
||||
title = "How are decisions made?"
|
||||
|
||||
text = """
|
||||
Short answer: EVERYTHING IS A PULL REQUEST.
|
||||
|
||||
Docker is an open-source project with an open design philosophy. This
|
||||
means that the repository is the source of truth for EVERY aspect of the
|
||||
project, including its philosophy, design, road map, and APIs. *If it's
|
||||
part of the project, it's in the repo. If it's in the repo, it's part of
|
||||
the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the
|
||||
repository. An implementation change is a change to the source code. An
|
||||
API change is a change to the API specification. A philosophy change is
|
||||
a change to the philosophy manifesto, and so on.
|
||||
|
||||
All decisions affecting Docker, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
|
||||
of the pull request and which areas of the project it affects. See *review flow*
|
||||
for details.
|
||||
|
||||
Because Docker is such a large and active project, it's important for everyone to know
|
||||
who is responsible for deciding what. That is determined by a precise set of rules.
|
||||
|
||||
* For every *decision* in the project, the rules should designate, in a deterministic way,
|
||||
who should *decide*.
|
||||
|
||||
* For every *problem* in the project, the rules should designate, in a deterministic way,
|
||||
who should be responsible for *fixing* it.
|
||||
|
||||
* For every *question* in the project, the rules should designate, in a deterministic way,
|
||||
who should be expected to have the *answer*.
|
||||
"""
|
||||
|
||||
[Rules.review]
|
||||
|
||||
title = "Review flow"
|
||||
|
||||
text = """
|
||||
Pull requests should be processed according to the following flow:
|
||||
|
||||
* For each subsystem affected by the change, the maintainers of the subsystem must approve or refuse it.
|
||||
It is the responsibility of the subsystem maintainers to process patches affecting them in a timely
|
||||
manner.
|
||||
|
||||
* If the change affects areas of the code which are not part of a subsystem,
|
||||
or if subsystem maintainers are unable to reach a timely decision, it must be approved by
|
||||
the core maintainers.
|
||||
|
||||
* If the change affects the UI or public APIs, or if it represents a major change in architecture,
|
||||
the architects must approve or refuse it.
|
||||
|
||||
* If the change affects the operations of the project, it must be approved or rejected by
|
||||
the relevant operators.
|
||||
|
||||
* If the change affects the governance, philosophy, goals or principles of the project,
|
||||
it must be approved by BDFL.
|
||||
|
||||
* A pull request can be in 1 of 5 distinct states, for each of which there is a corresponding label
|
||||
that needs to be applied. `Rules.review.states` contains the list of states with possible targets
|
||||
for each.
|
||||
"""
|
||||
|
||||
# Triage
|
||||
[Rules.review.states.0-triage]
|
||||
|
||||
# Maintainers are expected to triage new incoming pull requests by removing
|
||||
# the `0-triage` label and adding the correct labels (e.g. `1-design-review`)
|
||||
# potentially skipping some steps depending on the kind of pull request.
|
||||
# Use common sense for judging.
|
||||
#
|
||||
# Checking for DCO should be done at this stage.
|
||||
#
|
||||
# If an owner, responsible for closing or merging, can be assigned to the PR,
|
||||
# the better.
|
||||
|
||||
close = "e.g. unresponsive contributor without DCO"
|
||||
3-docs-review = "non-proposal documentation-only change"
|
||||
2-code-review = "e.g. trivial bugfix"
|
||||
1-design-review = "general case"
|
||||
|
||||
# Design review
|
||||
[Rules.review.states.1-design-review]
|
||||
|
||||
# Maintainers are expected to comment on the design of the pull request.
|
||||
# Review of documentation is expected only in the context of design validation,
|
||||
# not for stylistic changes.
|
||||
#
|
||||
# Ideally, documentation should reflect the expected behavior of the code.
|
||||
# No code review should take place in this step.
|
||||
#
|
||||
# Once design is approved, a maintainer should make sure to remove this label
|
||||
# and add the next one.
|
||||
|
||||
close = "design rejected"
|
||||
3-docs-review = "proposals with only documentation changes"
|
||||
2-code-review = "general case"
|
||||
|
||||
# Code review
|
||||
[Rules.review.states.2-code-review]
|
||||
|
||||
# Maintainers are expected to review the code and ensure that it is good
|
||||
# quality and in accordance with the documentation in the PR.
|
||||
#
|
||||
# If documentation is absent but expected, maintainers should ask for documentation.
|
||||
#
|
||||
# All tests should pass.
|
||||
#
|
||||
# Once code is approved according to the rules of the subsystem, a maintainer
|
||||
# should make sure to remove this label and add the next one.
|
||||
|
||||
close = ""
|
||||
1-design-review = "raises design concerns"
|
||||
4-merge = "trivial change not impacting documentation"
|
||||
3-docs-review = "general case"
|
||||
|
||||
# Docs review
|
||||
[Rules.review.states.3-docs-review]
|
||||
|
||||
# Maintainers are expected to review the documentation in its bigger context,
|
||||
# ensuring consistency, completeness, validity, and breadth of coverage across
|
||||
# all extent and new documentation.
|
||||
#
|
||||
# They should ask for any editorial change that makes the documentation more
|
||||
# consistent and easier to understand.
|
||||
#
|
||||
# Once documentation is approved (see below), a maintainer should make sure to remove this
|
||||
# label and add the next one.
|
||||
|
||||
close = ""
|
||||
2-code-review = "requires more code changes"
|
||||
1-design-review = "raises design concerns"
|
||||
4-merge = "general case"
|
||||
|
||||
# Docs approval
|
||||
[Rules.review.docs-approval]
|
||||
# Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs sub-project maintainers.
|
||||
# If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR).
|
||||
|
||||
# Merge
|
||||
[Rules.review.states.4-merge]
|
||||
|
||||
# Maintainers are expected to merge this pull request as soon as possible.
|
||||
# They can ask for a rebase, or carry the pull request themselves.
|
||||
# These should be the easy PRs to merge.
|
||||
|
||||
close = "carry PR"
|
||||
merge = ""
|
||||
|
||||
[Rules.DCO]
|
||||
|
||||
title = "Helping contributors with the DCO"
|
||||
|
||||
text = """
|
||||
The [DCO or `Sign your work`](
|
||||
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some Docker contributors are not as familiar with `git`, or have used a web based
|
||||
editor, and thus asking them to `git commit --amend -s` is not the best way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c) of the DCO. The
|
||||
most trivial way for a contributor to allow the maintainer to do this, is to add
|
||||
a DCO signature in a Pull Requests's comment, or a maintainer can simply note that
|
||||
the change is sufficiently trivial that it does not substantivly change the existing
|
||||
contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
"""
|
||||
|
||||
[Rules.holiday]
|
||||
|
||||
title = "I'm a maintainer, and I'm going on holiday"
|
||||
|
||||
text = """
|
||||
Please let your co-maintainers and other contributors know by raising a pull
|
||||
request that comments out your `MAINTAINERS` file entry using a `#`.
|
||||
"""
|
||||
|
||||
[Rules."no direct push"]
|
||||
|
||||
title = "I'm a maintainer. Should I make pull requests too?"
|
||||
|
||||
text = """
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
"""
|
||||
|
||||
[Rules.meta]
|
||||
|
||||
title = "How is this process changed?"
|
||||
|
||||
text = "Just like everything else: by making a pull request :)"
|
||||
|
||||
# Current project organization
|
||||
[Org]
|
||||
|
||||
bdfl = "shykes"
|
||||
|
||||
# The chief architect is responsible for the overall integrity of the technical architecture
|
||||
# across all subsystems, and the consistency of APIs and UI.
|
||||
#
|
||||
# Changes to UI, public APIs and overall architecture (for example a plugin system) must
|
||||
# be approved by the chief architect.
|
||||
"Chief Architect" = "shykes"
|
||||
|
||||
# The Chief Operator is responsible for the day-to-day operations of the project including:
|
||||
# - facilitating communications amongst all the contributors;
|
||||
# - tracking release schedules;
|
||||
# - managing the relationship with downstream distributions and upstream dependencies;
|
||||
# - helping new contributors to get involved and become successful contributors and maintainers
|
||||
#
|
||||
# The role is also responsible for managing and measuring the success of the overall project
|
||||
# and ensuring it is governed properly working in concert with the Docker Governance Advisory Board (DGAB).
|
||||
"Chief Operator" = "spf13"
|
||||
|
||||
[Org.Operators]
|
||||
|
||||
# The operators make sure the trains run on time. They are responsible for overall operations
|
||||
# of the project. This includes facilitating communication between all the participants; helping
|
||||
# newcomers get involved and become successful contributors and maintainers; tracking the schedule
|
||||
# of releases; managing the relationship with downstream distributions and upstream dependencies;
|
||||
# define measures of success for the project and measure progress; Devise and implement tools and
|
||||
# processes which make contributors and maintainers happier and more efficient.
|
||||
|
||||
|
||||
[Org.Operators.security]
|
||||
|
||||
people = [
|
||||
"erw"
|
||||
]
|
||||
|
||||
[Org.Operators."monthly meetings"]
|
||||
|
||||
people = [
|
||||
"sven",
|
||||
"tianon"
|
||||
]
|
||||
|
||||
[Org.Operators.infrastructure]
|
||||
|
||||
people = [
|
||||
"jfrazelle",
|
||||
"crosbymichael"
|
||||
]
|
||||
|
||||
# The chief maintainer is responsible for all aspects of quality for the project including
|
||||
# code reviews, usability, stability, security, performance, etc.
|
||||
# The most important function of the chief maintainer is to lead by example. On the first
|
||||
# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
|
||||
# be fine".
|
||||
"Chief Maintainer" = "crosbymichael"
|
||||
|
||||
[Org."Core maintainers"]
|
||||
|
||||
# The Core maintainers are the ghostbusters of the project: when there's a problem others
|
||||
# can't solve, they show up and fix it with bizarre devices and weaponry.
|
||||
# They have final say on technical implementation and coding style.
|
||||
# They are ultimately responsible for quality in all its forms: usability polish,
|
||||
# bugfixes, performance, stability, etc. When ownership can cleanly be passed to
|
||||
# a subsystem, they are responsible for doing so and holding the
|
||||
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
|
||||
|
||||
# For each release (including minor releases), a "release captain" is assigned from the
|
||||
# pool of core maintainers. Rotation is encouraged across all maintainers, to ensure
|
||||
# the release process is clear and up-to-date.
|
||||
#
|
||||
# It is common for core maintainers to "branch out" to join or start a subsystem.
|
||||
|
||||
|
||||
|
||||
people = [
|
||||
"unclejack",
|
||||
"crosbymichael",
|
||||
"erikh",
|
||||
"estesp",
|
||||
"icecrime",
|
||||
"jfrazelle",
|
||||
"lk4d4",
|
||||
"tibor",
|
||||
"vbatts",
|
||||
"vieux",
|
||||
"vishh"
|
||||
]
|
||||
|
||||
|
||||
[Org.Subsystems]
|
||||
|
||||
# As the project grows, it gets separated into well-defined subsystems. Each subsystem
|
||||
# has a dedicated group of maintainers, which are dedicated to that subsytem and responsible
|
||||
# for its quality.
|
||||
# This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows.
|
||||
#
|
||||
# The maintainers of each subsytem are responsible for:
|
||||
#
|
||||
# 1. Exposing a clear road map for improving their subsystem.
|
||||
# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
|
||||
# 3. Be available to anyone with questions, bug reports, criticism etc.
|
||||
# on their component. This includes IRC, GitHub requests and the mailing
|
||||
# list.
|
||||
# 4. Make sure their subsystem respects the philosophy, design and
|
||||
# road map of the project.
|
||||
#
|
||||
# #### How to review patches to your subsystem
|
||||
#
|
||||
# Accepting pull requests:
|
||||
#
|
||||
# - If the pull request appears to be ready to merge, give it a `LGTM`, which
|
||||
# stands for "Looks Good To Me".
|
||||
# - If the pull request has some small problems that need to be changed, make
|
||||
# a comment adressing the issues.
|
||||
# - If the changes needed to a PR are small, you can add a "LGTM once the
|
||||
# following comments are adressed..." this will reduce needless back and
|
||||
# forth.
|
||||
# - If the PR only needs a few changes before being merged, any MAINTAINER can
|
||||
# make a replacement PR that incorporates the existing commits and fixes the
|
||||
# problems before a fast track merge.
|
||||
#
|
||||
# Closing pull requests:
|
||||
#
|
||||
# - If a PR appears to be abandoned, after having attempted to contact the
|
||||
# original contributor, then a replacement PR may be made. Once the
|
||||
# replacement PR is made, any contributor may close the original one.
|
||||
# - If you are not sure if the pull request implements a good feature or you
|
||||
# do not understand the purpose of the PR, ask the contributor to provide
|
||||
# more documentation. If the contributor is not able to adequately explain
|
||||
# the purpose of the PR, the PR may be closed by any MAINTAINER.
|
||||
# - If a MAINTAINER feels that the pull request is sufficiently architecturally
|
||||
# flawed, or if the pull request needs significantly more design discussion
|
||||
# before being considered, the MAINTAINER should close the pull request with
|
||||
# a short explanation of what discussion still needs to be had. It is
|
||||
# important not to leave such pull requests open, as this will waste both the
|
||||
# MAINTAINER's time and the contributor's time. It is not good to string a
|
||||
# contributor on for weeks or months, having them make many changes to a PR
|
||||
# that will eventually be rejected.
|
||||
|
||||
[Org.Subsystems.Documentation]
|
||||
|
||||
people = [
|
||||
"fredlf",
|
||||
"james",
|
||||
"sven",
|
||||
"spf13",
|
||||
"mary"
|
||||
]
|
||||
|
||||
[Org.Subsystems.libcontainer]
|
||||
|
||||
people = [
|
||||
"crosbymichael",
|
||||
"vmarmol",
|
||||
"mpatel",
|
||||
"jnagal",
|
||||
"lk4d4"
|
||||
]
|
||||
|
||||
[Org.Subsystems.registry]
|
||||
|
||||
people = [
|
||||
"dmp42",
|
||||
"vbatts",
|
||||
"joffrey",
|
||||
"samalba",
|
||||
"sday",
|
||||
"jlhawn",
|
||||
"dmcg"
|
||||
]
|
||||
|
||||
[Org.Subsystems."build tools"]
|
||||
|
||||
people = [
|
||||
"shykes",
|
||||
"tianon"
|
||||
]
|
||||
|
||||
[Org.Subsystem."remote api"]
|
||||
|
||||
people = [
|
||||
"vieux"
|
||||
]
|
||||
|
||||
[Org.Subsystem.swarm]
|
||||
|
||||
people = [
|
||||
"aluzzardi",
|
||||
"vieux"
|
||||
]
|
||||
|
||||
[Org.Subsystem.machine]
|
||||
|
||||
people = [
|
||||
"bfirsh",
|
||||
"ehazlett"
|
||||
]
|
||||
|
||||
[Org.Subsystem.compose]
|
||||
|
||||
people = [
|
||||
"aanand"
|
||||
]
|
||||
|
||||
[Org.Subsystem.builder]
|
||||
|
||||
people = [
|
||||
"erikh",
|
||||
"tibor",
|
||||
"duglin"
|
||||
]
|
||||
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.aanand]
|
||||
Name = "Aanand Prasad"
|
||||
Email = "aanand@docker.com"
|
||||
GitHub = "aanand"
|
||||
|
||||
[people.aluzzardi]
|
||||
Name = "Andrea Luzzardi"
|
||||
Email = "aluzzardi@docker.com"
|
||||
GitHub = "aluzzardi"
|
||||
|
||||
[people.bfirsh]
|
||||
Name = "Ben Firshman"
|
||||
Email = "ben@firshman.co.uk"
|
||||
GitHub = "bfirsh"
|
||||
|
||||
[people.crosbymichael]
|
||||
Name = "Michael Crosby"
|
||||
Email = "crosbymichael@gmail.com"
|
||||
GitHub = "crosbymichael"
|
||||
|
||||
[people.duglin]
|
||||
Name = "Doug Davis"
|
||||
Email = "dug@us.ibm.com"
|
||||
GitHub = "duglin"
|
||||
|
||||
[people.dmcg]
|
||||
Name = "Derek McGowan"
|
||||
Email = "derek@docker.com"
|
||||
Github = "dmcgowan"
|
||||
|
||||
[people.dmp42]
|
||||
Name = "Olivier Gambier"
|
||||
Email = "olivier@docker.com"
|
||||
Github = "dmp42"
|
||||
|
||||
[people.ehazlett]
|
||||
Name = "Evan Hazlett"
|
||||
Email = "ejhazlett@gmail.com"
|
||||
GitHub = "ehazlett"
|
||||
|
||||
[people.erikh]
|
||||
Name = "Erik Hollensbe"
|
||||
Email = "erik@docker.com"
|
||||
GitHub = "erikh"
|
||||
|
||||
[people.erw]
|
||||
Name = "Eric Windisch"
|
||||
Email = "eric@windisch.us"
|
||||
GitHub = "ewindisch"
|
||||
|
||||
[people.estesp]
|
||||
Name = "Phil Estes"
|
||||
Email = "estesp@linux.vnet.ibm.com"
|
||||
GitHub = "estesp"
|
||||
|
||||
[people.fredlf]
|
||||
Name = "Fred Lifton"
|
||||
Email = "fred.lifton@docker.com"
|
||||
GitHub = "fredlf"
|
||||
|
||||
[people.icecrime]
|
||||
Name = "Arnaud Porterie"
|
||||
Email = "arnaud@docker.com"
|
||||
GitHub = "icecrime"
|
||||
|
||||
[people.jfrazelle]
|
||||
Name = "Jessie Frazelle"
|
||||
Email = "jess@docker.com"
|
||||
GitHub = "jfrazelle"
|
||||
|
||||
[people.jlhawn]
|
||||
Name = "Josh Hawn"
|
||||
Email = "josh.hawn@docker.com"
|
||||
Github = "jlhawn"
|
||||
|
||||
[people.joffrey]
|
||||
Name = "Joffrey Fuhrer"
|
||||
Email = "joffrey@docker.com"
|
||||
Github = "shin-"
|
||||
|
||||
[people.lk4d4]
|
||||
Name = "Alexander Morozov"
|
||||
Email = "lk4d4@docker.com"
|
||||
GitHub = "lk4d4"
|
||||
|
||||
[people.mary]
|
||||
Name = "Mary Anthony"
|
||||
Email = "mary.anthony@docker.com"
|
||||
GitHub = "moxiegirl"
|
||||
|
||||
[people.sday]
|
||||
Name = "Stephen Day"
|
||||
Email = "stephen.day@docker.com"
|
||||
Github = "stevvooe"
|
||||
|
||||
[people.shykes]
|
||||
Name = "Solomon Hykes"
|
||||
Email = "solomon@docker.com"
|
||||
GitHub = "shykes"
|
||||
|
||||
[people.spf13]
|
||||
Name = "Steve Francia"
|
||||
Email = "steve.francia@gmail.com"
|
||||
GitHub = "spf13"
|
||||
|
||||
[people.sven]
|
||||
Name = "Sven Dowideit"
|
||||
Email = "SvenDowideit@home.org.au"
|
||||
GitHub = "SvenDowideit"
|
||||
|
||||
[people.tianon]
|
||||
Name = "Tianon Gravi"
|
||||
Email = "admwiggin@gmail.com"
|
||||
GitHub = "tianon"
|
||||
|
||||
[people.tibor]
|
||||
Name = "Tibor Vass"
|
||||
Email = "tibor@docker.com"
|
||||
GitHub = "tiborvass"
|
||||
|
||||
[people.vbatts]
|
||||
Name = "Vincent Batts"
|
||||
Email = "vbatts@redhat.com"
|
||||
GitHub = "vbatts"
|
||||
|
||||
[people.vieux]
|
||||
Name = "Victor Vieux"
|
||||
Email = "vieux@docker.com"
|
||||
GitHub = "vieux"
|
||||
|
||||
[people.vmarmol]
|
||||
Name = "Victor Marmol"
|
||||
Email = "vmarmol@google.com"
|
||||
GitHub = "vmarmol"
|
||||
|
||||
[people.jnagal]
|
||||
Name = "Rohit Jnagal"
|
||||
Email = "jnagal@google.com"
|
||||
GitHub = "rjnagal"
|
||||
|
||||
[people.mpatel]
|
||||
Name = "Mrunal Patel"
|
||||
Email = "mpatel@redhat.com"
|
||||
GitHub = "mrunalp"
|
||||
|
||||
[people.unclejack]
|
||||
Name = "Cristian Staretu"
|
||||
Email = "cristian.staretu@gmail.com"
|
||||
GitHub = "unclejack"
|
||||
|
||||
[people.vishh]
|
||||
Name = "Vishnu Kannan"
|
||||
Email = "vishnuk@google.com"
|
||||
GitHub = "vishh"
|
||||
135
Makefile
135
Makefile
@@ -1,97 +1,78 @@
|
||||
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
|
||||
DOCKER_PACKAGE := github.com/dotcloud/docker
|
||||
RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
|
||||
SRCRELEASE := docker-$(RELEASE_VERSION)
|
||||
BINRELEASE := docker-$(RELEASE_VERSION).tgz
|
||||
|
||||
# env vars passed through directly to Docker's build scripts
|
||||
# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
|
||||
# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these
|
||||
DOCKER_ENVS := \
|
||||
-e BUILDFLAGS \
|
||||
-e DOCKER_CLIENTONLY \
|
||||
-e DOCKER_EXECDRIVER \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e TESTDIRS \
|
||||
-e TESTFLAGS \
|
||||
-e TIMEOUT
|
||||
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
|
||||
GIT_ROOT := $(shell git rev-parse --show-toplevel)
|
||||
BUILD_DIR := $(CURDIR)/.gopath
|
||||
|
||||
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
|
||||
# (default to no bind mount if DOCKER_HOST is set)
|
||||
# note: BINDDIR is supported for backwards-compatibility here
|
||||
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
|
||||
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
|
||||
GOPATH ?= $(BUILD_DIR)
|
||||
export GOPATH
|
||||
|
||||
# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
|
||||
DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
|
||||
GO_OPTIONS ?=
|
||||
ifeq ($(VERBOSE), 1)
|
||||
GO_OPTIONS += -v
|
||||
endif
|
||||
|
||||
# to allow `make DOCSPORT=9000 docs`
|
||||
DOCSPORT := 8000
|
||||
GIT_COMMIT = $(shell git rev-parse --short HEAD)
|
||||
GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
|
||||
SRC_DIR := $(GOPATH)/src
|
||||
|
||||
DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
|
||||
DOCKER_DIR := $(SRC_DIR)/$(DOCKER_PACKAGE)
|
||||
DOCKER_MAIN := $(DOCKER_DIR)/docker
|
||||
|
||||
# for some docs workarounds (see below in "docs-build" target)
|
||||
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
|
||||
DOCKER_BIN_RELATIVE := bin/docker
|
||||
DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
|
||||
|
||||
default: binary
|
||||
.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
|
||||
|
||||
all: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh
|
||||
all: $(DOCKER_BIN)
|
||||
|
||||
binary: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
$(DOCKER_BIN): $(DOCKER_DIR)
|
||||
@mkdir -p $(dir $@)
|
||||
@(cd $(DOCKER_MAIN); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
|
||||
@echo $(DOCKER_BIN_RELATIVE) is created.
|
||||
|
||||
cross: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
|
||||
$(DOCKER_DIR):
|
||||
@mkdir -p $(dir $@)
|
||||
@rm -f $@
|
||||
@ln -sf $(CURDIR)/ $@
|
||||
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS))
|
||||
|
||||
docs: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve
|
||||
whichrelease:
|
||||
echo $(RELEASE_VERSION)
|
||||
|
||||
docs-shell: docs-build
|
||||
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
|
||||
release: $(BINRELEASE)
|
||||
srcrelease: $(SRCRELEASE)
|
||||
deps: $(DOCKER_DIR)
|
||||
|
||||
docs-release: docs-build
|
||||
$(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT -e DISTRIBUTION_ID \
|
||||
-v $(CURDIR)/docs/awsconfig:/docs/awsconfig \
|
||||
"$(DOCKER_DOCS_IMAGE)" ./release.sh
|
||||
# A clean checkout of $RELEASE_VERSION, with vendored dependencies
|
||||
$(SRCRELEASE):
|
||||
rm -fr $(SRCRELEASE)
|
||||
git clone $(GIT_ROOT) $(SRCRELEASE)
|
||||
cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
|
||||
|
||||
docs-test: docs-build
|
||||
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh
|
||||
# A binary release ready to be uploaded to a mirror
|
||||
$(BINRELEASE): $(SRCRELEASE)
|
||||
rm -f $(BINRELEASE)
|
||||
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli test-docker-py
|
||||
clean:
|
||||
@rm -rf $(dir $(DOCKER_BIN))
|
||||
ifeq ($(GOPATH), $(BUILD_DIR))
|
||||
@rm -rf $(BUILD_DIR)
|
||||
else ifneq ($(DOCKER_DIR), $(realpath $(DOCKER_DIR)))
|
||||
@rm -f $(DOCKER_DIR)
|
||||
endif
|
||||
|
||||
test-unit: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
|
||||
test: all
|
||||
@(cd $(DOCKER_DIR); sudo -E go test $(GO_OPTIONS))
|
||||
|
||||
test-integration: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
|
||||
fmt:
|
||||
@gofmt -s -l -w .
|
||||
|
||||
test-integration-cli: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
|
||||
|
||||
test-docker-py: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-docker-py
|
||||
|
||||
validate: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco validate-toml
|
||||
|
||||
shell: build
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
build: bundles
|
||||
docker build -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
docs-build:
|
||||
cp ./VERSION docs/VERSION
|
||||
echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
|
||||
# echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
|
||||
echo "$(GITCOMMIT)" > docs/GITCOMMIT
|
||||
docker pull docs/base
|
||||
docker build -t "$(DOCKER_DOCS_IMAGE)" docs
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
hack:
|
||||
cd $(CURDIR)/buildbot && vagrant up
|
||||
|
||||
19
NOTICE
19
NOTICE
@@ -1,19 +1,6 @@
|
||||
Docker
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
Copyright 2012-2013 dotCloud, inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
This product includes software developed at dotCloud, inc. (http://www.dotcloud.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||
This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License.
|
||||
477
README.md
477
README.md
@@ -1,246 +1,317 @@
|
||||
Docker: the Linux container engine
|
||||
==================================
|
||||
Docker: the Linux container runtime
|
||||
===================================
|
||||
|
||||
Docker is an open source project to pack, ship and run any application
|
||||
as a lightweight container
|
||||
Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
|
||||
|
||||
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
|
||||
This means they can run anywhere, from your laptop to the largest
|
||||
EC2 compute instance and everything in between - and they don't require
|
||||
you to use a particular language, framework or packaging system. That
|
||||
makes them great building blocks for deploying and scaling web apps,
|
||||
databases, and backend services without depending on a particular stack
|
||||
or provider.
|
||||
Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
|
||||
|
||||
Docker began as an open-source implementation of the deployment engine which
|
||||
powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service.
|
||||
It benefits directly from the experience accumulated over several years
|
||||
of large-scale operation and support of hundreds of thousands of
|
||||
applications and databases.
|
||||

|
||||
|
||||

|
||||
* *Heterogeneous payloads*: any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
|
||||
|
||||
## Security Disclosure
|
||||
* *Any server*: docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
|
||||
|
||||
Security is very important to us. If you have any issue regarding security,
|
||||
please disclose the information responsibly by sending an email to
|
||||
security@docker.com and not by creating a github issue.
|
||||
* *Isolation*: docker isolates processes from each other and from the underlying host, using lightweight containers.
|
||||
|
||||
## Better than VMs
|
||||
|
||||
A common method for distributing applications and sandboxing their
|
||||
execution is to use virtual machines, or VMs. Typical VM formats are
|
||||
VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory
|
||||
these formats should allow every developer to automatically package
|
||||
their application into a "machine" for easy distribution and deployment.
|
||||
In practice, that almost never happens, for a few reasons:
|
||||
|
||||
* *Size*: VMs are very large which makes them impractical to store
|
||||
and transfer.
|
||||
* *Performance*: running VMs consumes significant CPU and memory,
|
||||
which makes them impractical in many scenarios, for example local
|
||||
development of multi-tier applications, and large-scale deployment
|
||||
of cpu and memory-intensive applications on large numbers of
|
||||
machines.
|
||||
* *Portability*: competing VM environments don't play well with each
|
||||
other. Although conversion tools do exist, they are limited and
|
||||
add even more overhead.
|
||||
* *Hardware-centric*: VMs were designed with machine operators in
|
||||
mind, not software developers. As a result, they offer very
|
||||
limited tooling for what developers need most: building, testing
|
||||
and running their software. For example, VMs offer no facilities
|
||||
for application versioning, monitoring, configuration, logging or
|
||||
service discovery.
|
||||
|
||||
By contrast, Docker relies on a different sandboxing method known as
|
||||
*containerization*. Unlike traditional virtualization, containerization
|
||||
takes place at the kernel level. Most modern operating system kernels
|
||||
now support the primitives necessary for containerization, including
|
||||
Linux with [openvz](http://openvz.org),
|
||||
[vserver](http://linux-vserver.org) and more recently
|
||||
[lxc](http://lxc.sourceforge.net), Solaris with
|
||||
[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
|
||||
and FreeBSD with
|
||||
[Jails](http://www.freebsd.org/doc/handbook/jails.html).
|
||||
|
||||
Docker builds on top of these low-level primitives to offer developers a
|
||||
portable format and runtime environment that solves all four problems.
|
||||
Docker containers are small (and their transfer can be optimized with
|
||||
layers), they have basically zero memory and cpu overhead, they are
|
||||
completely portable, and are designed from the ground up with an
|
||||
application-centric design.
|
||||
|
||||
Perhaps best of all, because Docker operates at the OS level, it can still be
|
||||
run inside a VM!
|
||||
|
||||
## Plays well with others
|
||||
|
||||
Docker does not require you to buy into a particular programming
|
||||
language, framework, packaging system, or configuration language.
|
||||
|
||||
Is your application a Unix process? Does it use files, tcp connections,
|
||||
environment variables, standard Unix streams and command-line arguments
|
||||
as inputs and outputs? Then Docker can run it.
|
||||
|
||||
Can your application's build be expressed as a sequence of such
|
||||
commands? Then Docker can build it.
|
||||
|
||||
## Escape dependency hell
|
||||
|
||||
A common problem for developers is the difficulty of managing all
|
||||
their application's dependencies in a simple and automated way.
|
||||
|
||||
This is usually difficult for several reasons:
|
||||
|
||||
* *Cross-platform dependencies*. Modern applications often depend on
|
||||
a combination of system libraries and binaries, language-specific
|
||||
packages, framework-specific modules, internal components
|
||||
developed for another project, etc. These dependencies live in
|
||||
different "worlds" and require different tools - these tools
|
||||
typically don't work well with each other, requiring awkward
|
||||
custom integrations.
|
||||
|
||||
* *Conflicting dependencies*. Different applications may depend on
|
||||
different versions of the same dependency. Packaging tools handle
|
||||
these situations with various degrees of ease - but they all
|
||||
handle them in different and incompatible ways, which again forces
|
||||
the developer to do extra work.
|
||||
|
||||
* *Custom dependencies*. A developer may need to prepare a custom
|
||||
version of their application's dependency. Some packaging systems
|
||||
can handle custom versions of a dependency, others can't - and all
|
||||
of them handle it differently.
|
||||
* *Repeatability*: because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
|
||||
|
||||
|
||||
Docker solves the problem of dependency hell by giving the developer a simple
|
||||
way to express *all* their application's dependencies in one place, while
|
||||
streamlining the process of assembling them. If this makes you think of
|
||||
[XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
|
||||
*replace* your favorite packaging systems. It simply orchestrates
|
||||
their use in a simple and repeatable way. How does it do that? With
|
||||
layers.
|
||||
Notable features
|
||||
-----------------
|
||||
|
||||
Docker defines a build as running a sequence of Unix commands, one
|
||||
after the other, in the same container. Build commands modify the
|
||||
contents of the container (usually by installing new files on the
|
||||
filesystem), the next command modifies it some more, etc. Since each
|
||||
build command inherits the result of the previous commands, the
|
||||
*order* in which the commands are executed expresses *dependencies*.
|
||||
* Filesystem isolation: each process container runs in a completely separate root filesystem.
|
||||
|
||||
Here's a typical Docker build process:
|
||||
* Resource isolation: system resources like cpu and memory can be allocated differently to each process container, using cgroups.
|
||||
|
||||
* Network isolation: each process container runs in its own network namespace, with a virtual interface and IP address of its own.
|
||||
|
||||
* Copy-on-write: root filesystems are created using copy-on-write, which makes deployment extremely fast, memory-cheap and disk-cheap.
|
||||
|
||||
* Logging: the standard streams (stdout/stderr/stdin) of each process container are collected and logged for real-time or batch retrieval.
|
||||
|
||||
* Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.
|
||||
|
||||
* Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.
|
||||
|
||||
Install instructions
|
||||
==================
|
||||
|
||||
Quick install on Ubuntu 12.04 and 12.10
|
||||
---------------------------------------
|
||||
|
||||
```bash
|
||||
FROM ubuntu:12.04
|
||||
RUN apt-get update && apt-get install -y python python-pip curl
|
||||
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
|
||||
RUN cd helloflask-master && pip install -r requirements.txt
|
||||
curl get.docker.io | sh -x
|
||||
```
|
||||
|
||||
Note that Docker doesn't care *how* dependencies are built - as long
|
||||
as they can be built by running a Unix command in a container.
|
||||
Binary installs
|
||||
----------------
|
||||
|
||||
Docker supports the following binary installation methods.
|
||||
Note that some methods are community contributions and not yet officially supported.
|
||||
|
||||
Getting started
|
||||
===============
|
||||
* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
|
||||
* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
|
||||
* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
|
||||
* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
|
||||
* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
|
||||
|
||||
Docker can be installed on your local machine as well as servers - both
|
||||
bare metal and virtualized. It is available as a binary on most modern
|
||||
Linux systems, or as a VM on Windows, Mac and other systems.
|
||||
Installing from source
|
||||
----------------------
|
||||
|
||||
We also offer an [interactive tutorial](http://www.docker.com/tryit/)
|
||||
for quickly learning the basics of using Docker.
|
||||
1. Make sure you have a [Go language](http://golang.org/doc/install) compiler and [git](http://git-scm.com) installed.
|
||||
|
||||
For up-to-date install instructions, see the [Docs](http://docs.docker.com).
|
||||
2. Checkout the source code
|
||||
|
||||
```bash
|
||||
git clone http://github.com/dotcloud/docker
|
||||
```
|
||||
|
||||
3. Build the docker binary
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
make VERBOSE=1
|
||||
sudo cp ./bin/docker /usr/local/bin/docker
|
||||
```
|
||||
|
||||
Usage examples
|
||||
==============
|
||||
|
||||
Docker can be used to run short-lived commands, long-running daemons
|
||||
(app servers, databases etc.), interactive shell sessions, etc.
|
||||
First run the docker daemon
|
||||
---------------------------
|
||||
|
||||
You can find a [list of real-world
|
||||
examples](http://docs.docker.com/examples/) in the
|
||||
documentation.
|
||||
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
|
||||
|
||||
```bash
|
||||
# On a production system you want this running in an init script
|
||||
sudo docker -d &
|
||||
```
|
||||
|
||||
Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client can run from any account.
|
||||
|
||||
```bash
|
||||
# Now you can run docker commands from any account.
|
||||
docker help
|
||||
```
|
||||
|
||||
|
||||
Throwaway shell in a base ubuntu image
|
||||
--------------------------------------
|
||||
|
||||
```bash
|
||||
docker pull ubuntu:12.10
|
||||
|
||||
# Run an interactive shell, allocate a tty, attach stdin and stdout
|
||||
# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q
|
||||
docker run -i -t ubuntu:12.10 /bin/bash
|
||||
```
|
||||
|
||||
Starting a long-running worker process
|
||||
--------------------------------------
|
||||
|
||||
```bash
|
||||
# Start a very useful long-running process
|
||||
JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
|
||||
|
||||
# Collect the output of the job so far
|
||||
docker logs $JOB
|
||||
|
||||
# Kill the job
|
||||
docker kill $JOB
|
||||
```
|
||||
|
||||
Running an irc bouncer
|
||||
----------------------
|
||||
|
||||
```bash
|
||||
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
|
||||
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
|
||||
```
|
||||
|
||||
Running Redis
|
||||
-------------
|
||||
|
||||
```bash
|
||||
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
|
||||
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
|
||||
```
|
||||
|
||||
Share your own image!
|
||||
---------------------
|
||||
|
||||
```bash
|
||||
CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl)
|
||||
docker commit -m "Installed curl" $CONTAINER $USER/betterbase
|
||||
docker push $USER/betterbase
|
||||
```
|
||||
|
||||
A list of publicly available images is [available here](https://github.com/dotcloud/docker/wiki/Public-docker-images).
|
||||
|
||||
Expose a service on a TCP port
|
||||
------------------------------
|
||||
|
||||
```bash
|
||||
# Expose port 4444 of this container, and tell netcat to listen on it
|
||||
JOB=$(docker run -d -p 4444 base /bin/nc -l -p 4444)
|
||||
|
||||
# Which public port is NATed to my container?
|
||||
PORT=$(docker port $JOB 4444)
|
||||
|
||||
# Connect to the public port via the host's public address
|
||||
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
|
||||
IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
|
||||
echo hello world | nc $IP $PORT
|
||||
|
||||
# Verify that the network connection worked
|
||||
echo "Daemon received: $(docker logs $JOB)"
|
||||
```
|
||||
|
||||
Under the hood
|
||||
--------------
|
||||
|
||||
Under the hood, Docker is built on the following components:
|
||||
|
||||
* The
|
||||
[cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c)
|
||||
and
|
||||
[namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part)
|
||||
capabilities of the Linux kernel;
|
||||
* The [Go](http://golang.org) programming language.
|
||||
* The [Docker Image Specification] (https://github.com/docker/docker/blob/master/image/spec/v1.md)
|
||||
* The [Libcontainer Specification] (https://github.com/docker/libcontainer/blob/master/SPEC.md)
|
||||
|
||||
* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
|
||||
|
||||
* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
|
||||
|
||||
* The [Go](http://golang.org) programming language;
|
||||
|
||||
* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
|
||||
|
||||
|
||||
|
||||
Contributing to Docker
|
||||
======================
|
||||
|
||||
[](https://godoc.org/github.com/docker/docker)
|
||||
[](https://jenkins.dockerproject.com/job/Docker%20Master/)
|
||||
Want to hack on Docker? Awesome! There are instructions to get you started on the website: http://docs.docker.io/en/latest/contributing/contributing/
|
||||
|
||||
Want to hack on Docker? Awesome! We have [instructions to help you get
|
||||
started contributing code or documentation.](https://docs.docker.com/project/who-written-for/).
|
||||
|
||||
These instructions are probably not perfect, please let us know if anything
|
||||
feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
|
||||
|
||||
Getting the development builds
|
||||
==============================
|
||||
|
||||
Want to run Docker from a master build? You can download
|
||||
master builds at [master.dockerproject.com](https://master.dockerproject.com).
|
||||
They are updated with each commit merged into the master branch.
|
||||
|
||||
Don't know how to use that super cool new feature in the master build? Check
|
||||
out the master docs at
|
||||
[docs.master.dockerproject.com](http://docs.master.dockerproject.com).
|
||||
|
||||
How the project is run
|
||||
======================
|
||||
|
||||
Docker is a very, very active project. If you want to learn more about how it is run,
|
||||
or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
|
||||
|
||||
We are always open to suggestions on process improvements, and are always looking for more maintainers.
|
||||
|
||||
### Legal
|
||||
|
||||
*Brought to you courtesy of our legal counsel. For more context,
|
||||
please see the "NOTICE" document in this repo.*
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
They are probably not perfect, please let us know if anything feels wrong or incomplete.
|
||||
|
||||
|
||||
Licensing
|
||||
=========
|
||||
Docker is licensed under the Apache License, Version 2.0. See
|
||||
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
|
||||
license text.
|
||||
Note
|
||||
----
|
||||
|
||||
Other Docker Related Projects
|
||||
We also keep the documentation in this repository. The website documentation is generated using sphinx using these sources.
|
||||
Please find it under docs/sources/ and read more about it https://github.com/dotcloud/docker/master/docs/README.md
|
||||
|
||||
Please feel free to fix / update the documentation and send us pull requests. More tutorials are also welcome.
|
||||
|
||||
|
||||
Setting up a dev environment
|
||||
----------------------------
|
||||
|
||||
Instructions that have been verified to work on Ubuntu 12.10,
|
||||
|
||||
```bash
|
||||
sudo apt-get -y install lxc wget bsdtar curl golang git
|
||||
|
||||
export GOPATH=~/go/
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
mkdir -p $GOPATH/src/github.com/dotcloud
|
||||
cd $GOPATH/src/github.com/dotcloud
|
||||
git clone git@github.com:dotcloud/docker.git
|
||||
cd docker
|
||||
|
||||
go get -v github.com/dotcloud/docker/...
|
||||
go install -v github.com/dotcloud/docker/...
|
||||
```
|
||||
|
||||
Then run the docker daemon,
|
||||
|
||||
```bash
|
||||
sudo $GOPATH/bin/docker -d
|
||||
```
|
||||
|
||||
Run the `go install` command (above) to recompile docker.
|
||||
|
||||
|
||||
What is a Standard Container?
|
||||
=============================
|
||||
There are a number of projects under development that are based on Docker's
|
||||
core technology. These projects expand the tooling built around the
|
||||
Docker platform to broaden its application and utility.
|
||||
|
||||
If you know of another project underway that should be listed here, please help
|
||||
us keep this list up-to-date by submitting a PR.
|
||||
Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
|
||||
a format that is self-describing and portable, so that any compliant runtime can run it without extra dependencies, regardless of the underlying machine and the contents of the container.
|
||||
|
||||
The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
|
||||
|
||||
A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
|
||||
|
||||
### 1. STANDARD OPERATIONS
|
||||
|
||||
Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
|
||||
|
||||
|
||||
### 2. CONTENT-AGNOSTIC
|
||||
|
||||
Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
|
||||
|
||||
|
||||
### 3. INFRASTRUCTURE-AGNOSTIC
|
||||
|
||||
Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
|
||||
|
||||
|
||||
### 4. DESIGNED FOR AUTOMATION
|
||||
|
||||
Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
|
||||
|
||||
Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
|
||||
|
||||
Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
|
||||
|
||||
|
||||
### 5. INDUSTRIAL-GRADE DELIVERY
|
||||
|
||||
There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
|
||||
|
||||
With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
|
||||
|
||||
|
||||
|
||||
|
||||
Standard Container Specification
|
||||
--------------------------------
|
||||
|
||||
(TODO)
|
||||
|
||||
### Image format
|
||||
|
||||
|
||||
### Standard operations
|
||||
|
||||
* Copy
|
||||
* Run
|
||||
* Stop
|
||||
* Wait
|
||||
* Commit
|
||||
* Attach standard streams
|
||||
* List filesystem changes
|
||||
* ...
|
||||
|
||||
### Execution environment
|
||||
|
||||
#### Root filesystem
|
||||
|
||||
#### Environment variables
|
||||
|
||||
#### Process arguments
|
||||
|
||||
#### Networking
|
||||
|
||||
#### Process namespacing
|
||||
|
||||
#### Resource limits
|
||||
|
||||
#### Process monitoring
|
||||
|
||||
#### Logging
|
||||
|
||||
#### Signals
|
||||
|
||||
#### Pseudo-terminal allocation
|
||||
|
||||
#### Security
|
||||
|
||||
* [Docker Registry](https://github.com/docker/distribution): Registry
|
||||
server for Docker (hosting/delivery of repositories and images)
|
||||
* [Docker Machine](https://github.com/docker/machine): Machine management
|
||||
for a container-centric world
|
||||
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering
|
||||
system
|
||||
* [Docker Compose](https://github.com/docker/compose) (formerly Fig):
|
||||
Define and run multi-container apps
|
||||
|
||||
|
||||
71
SPECS/data-volumes.md
Normal file
71
SPECS/data-volumes.md
Normal file
@@ -0,0 +1,71 @@
|
||||
|
||||
## Spec for data volumes
|
||||
|
||||
Spec owner: Solomon Hykes <solomon@dotcloud.com>
|
||||
|
||||
Data volumes (issue #111) are a much-requested feature which trigger much discussion and debate. Below is the current authoritative spec for implementing data volumes.
|
||||
This spec will be deprecated once the feature is fully implemented.
|
||||
|
||||
Discussion, requests, trolls, demands, offerings, threats and other forms of supplications concerning this spec should be addressed to Solomon here: https://github.com/dotcloud/docker/issues/111
|
||||
|
||||
|
||||
### 1. Creating data volumes
|
||||
|
||||
At container creation, parts of a container's filesystem can be mounted as separate data volumes. Volumes are defined with the -v flag.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
$ docker run -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres
|
||||
```
|
||||
|
||||
In this example, a new container is created from the 'postgres' image. At the same time, docker creates 2 new data volumes: one will be mapped to the container at /var/lib/postgres, the other at /var/log.
|
||||
|
||||
2 important notes:
|
||||
|
||||
1) Volumes don't have top-level names. At no point does the user provide a name, or is a name given to him. Volumes are identified by the path at which they are mounted inside their container.
|
||||
|
||||
2) The user doesn't choose the source of the volume. Docker only mounts volumes it created itself, in the same way that it only runs containers that it created itself. That is by design.
|
||||
|
||||
|
||||
### 2. Sharing data volumes
|
||||
|
||||
Instead of creating its own volumes, a container can share another container's volumes. For example:
|
||||
|
||||
```bash
|
||||
$ docker run --volumes-from $OTHER_CONTAINER_ID postgres /usr/local/bin/postgres-backup
|
||||
```
|
||||
|
||||
In this example, a new container is created from the 'postgres' example. At the same time, docker will *re-use* the 2 data volumes created in the previous example. One volume will be mounted on the /var/lib/postgres of *both* containers, and the other will be mounted on the /var/log of both containers.
|
||||
|
||||
### 3. Under the hood
|
||||
|
||||
Docker stores volumes in /var/lib/docker/volumes. Each volume receives a globally unique ID at creation, and is stored at /var/lib/docker/volumes/ID.
|
||||
|
||||
At creation, volumes are attached to a single container - the source of truth for this mapping will be the container's configuration.
|
||||
|
||||
Mounting a volume consists of calling "mount --bind" from the volume's directory to the appropriate sub-directory of the container mountpoint. This may be done by Docker itself, or farmed out to lxc (which supports mount-binding) if possible.
|
||||
|
||||
|
||||
### 4. Backups, transfers and other volume operations
|
||||
|
||||
Volumes sometimes need to be backed up, transfered between hosts, synchronized, etc. These operations typically are application-specific or site-specific, eg. rsync vs. S3 upload vs. replication vs...
|
||||
|
||||
Rather than attempting to implement all these scenarios directly, Docker will allow for custom implementations using an extension mechanism.
|
||||
|
||||
### 5. Custom volume handlers
|
||||
|
||||
Docker allows for arbitrary code to be executed against a container's volumes, to implement any custom action: backup, transfer, synchronization across hosts, etc.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```bash
|
||||
$ DB=$(docker run -d -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres)
|
||||
|
||||
$ BACKUP_JOB=$(docker run -d --volumes-from $DB shykes/backuper /usr/local/bin/backup-postgres --s3creds=$S3CREDS)
|
||||
|
||||
$ docker wait $BACKUP_JOB
|
||||
```
|
||||
|
||||
Congratulations, you just implemented a custom volume handler, using Docker's built-in ability to 1) execute arbitrary code and 2) share volumes between containers.
|
||||
|
||||
82
Vagrantfile
vendored
Normal file
82
Vagrantfile
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
def v10(config)
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
|
||||
# Install ubuntu packaging dependencies and create ubuntu packages
|
||||
config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
|
||||
config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker'
|
||||
end
|
||||
|
||||
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
||||
v10(config)
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("1") do |config|
|
||||
v10(config)
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.provider :aws do |aws|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
aws.region = "us-east-1"
|
||||
aws.ami = "ami-d0f89fb9"
|
||||
aws.ssh_username = "ubuntu"
|
||||
aws.instance_type = "t1.micro"
|
||||
end
|
||||
|
||||
config.vm.provider :rackspace do |rs|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
|
||||
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
|
||||
rs.username = ENV["RS_USERNAME"]
|
||||
rs.api_key = ENV["RS_API_KEY"]
|
||||
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
|
||||
rs.flavor = /512MB/
|
||||
rs.image = /Ubuntu/
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.provider :aws do |aws, override|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
override.ssh.username = "ubuntu"
|
||||
aws.region = "us-east-1"
|
||||
aws.ami = "ami-d0f89fb9"
|
||||
aws.instance_type = "t1.micro"
|
||||
end
|
||||
|
||||
config.vm.provider :rackspace do |rs|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
|
||||
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
|
||||
rs.username = ENV["RS_USERNAME"]
|
||||
rs.api_key = ENV["RS_API_KEY"]
|
||||
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
|
||||
rs.flavor = /512MB/
|
||||
rs.image = /Ubuntu/
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
end
|
||||
|
||||
end
|
||||
@@ -1,5 +0,0 @@
|
||||
This directory contains code pertaining to the Docker API:
|
||||
|
||||
- Used by the docker client when communicating with the docker daemon
|
||||
|
||||
- Used by third party tools wishing to interface with the docker daemon
|
||||
@@ -1,19 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJsonContentType(t *testing.T) {
|
||||
if !MatchesContentType("application/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !MatchesContentType("application/json; charset=utf-8", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if MatchesContentType("dockerapplication/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
type DockerCli struct {
|
||||
proto string
|
||||
addr string
|
||||
configFile *registry.ConfigFile
|
||||
in io.ReadCloser
|
||||
out io.Writer
|
||||
err io.Writer
|
||||
keyFile string
|
||||
tlsConfig *tls.Config
|
||||
scheme string
|
||||
// inFd holds file descriptor of the client's STDIN, if it's a valid file
|
||||
inFd uintptr
|
||||
// outFd holds file descriptor of the client's STDOUT, if it's a valid file
|
||||
outFd uintptr
|
||||
// isTerminalIn describes if client's STDIN is a TTY
|
||||
isTerminalIn bool
|
||||
// isTerminalOut describes if client's STDOUT is a TTY
|
||||
isTerminalOut bool
|
||||
transport *http.Transport
|
||||
}
|
||||
|
||||
var funcMap = template.FuncMap{
|
||||
"json": func(v interface{}) string {
|
||||
a, _ := json.Marshal(v)
|
||||
return string(a)
|
||||
},
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) {
|
||||
camelArgs := make([]string, len(args))
|
||||
for i, s := range args {
|
||||
if len(s) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:])
|
||||
}
|
||||
methodName := "Cmd" + strings.Join(camelArgs, "")
|
||||
method := reflect.ValueOf(cli).MethodByName(methodName)
|
||||
if !method.IsValid() {
|
||||
return nil, false
|
||||
}
|
||||
return method.Interface().(func(...string) error), true
|
||||
}
|
||||
|
||||
// Cmd executes the specified command
|
||||
func (cli *DockerCli) Cmd(args ...string) error {
|
||||
if len(args) > 1 {
|
||||
method, exists := cli.getMethod(args[:2]...)
|
||||
if exists {
|
||||
return method(args[2:]...)
|
||||
}
|
||||
}
|
||||
if len(args) > 0 {
|
||||
method, exists := cli.getMethod(args[0])
|
||||
if !exists {
|
||||
fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0])
|
||||
os.Exit(1)
|
||||
}
|
||||
return method(args[1:]...)
|
||||
}
|
||||
return cli.CmdHelp()
|
||||
}
|
||||
|
||||
func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bool) *flag.FlagSet {
|
||||
var errorHandling flag.ErrorHandling
|
||||
if exitOnError {
|
||||
errorHandling = flag.ExitOnError
|
||||
} else {
|
||||
errorHandling = flag.ContinueOnError
|
||||
}
|
||||
flags := flag.NewFlagSet(name, errorHandling)
|
||||
flags.Usage = func() {
|
||||
options := ""
|
||||
if signature != "" {
|
||||
signature = " " + signature
|
||||
}
|
||||
if flags.FlagCountUndeprecated() > 0 {
|
||||
options = " [OPTIONS]"
|
||||
}
|
||||
fmt.Fprintf(cli.out, "\nUsage: docker %s%s%s\n\n%s\n\n", name, options, signature, description)
|
||||
flags.SetOutput(cli.out)
|
||||
flags.PrintDefaults()
|
||||
os.Exit(0)
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func (cli *DockerCli) LoadConfigFile() (err error) {
|
||||
cli.configFile, err = registry.LoadConfig(homedir.Get())
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "WARNING: %s\n", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error {
|
||||
// In order to attach to a container tty, input stream for the client must
|
||||
// be a tty itself: redirecting or piping the client standard input is
|
||||
// incompatible with `docker run -t`, `docker exec -t` or `docker attach`.
|
||||
if ttyMode && attachStdin && !cli.isTerminalIn {
|
||||
return errors.New("cannot enable tty mode on non tty input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, addr string, tlsConfig *tls.Config) *DockerCli {
|
||||
var (
|
||||
inFd uintptr
|
||||
outFd uintptr
|
||||
isTerminalIn = false
|
||||
isTerminalOut = false
|
||||
scheme = "http"
|
||||
)
|
||||
|
||||
if tlsConfig != nil {
|
||||
scheme = "https"
|
||||
}
|
||||
if in != nil {
|
||||
inFd, isTerminalIn = term.GetFdInfo(in)
|
||||
}
|
||||
|
||||
if out != nil {
|
||||
outFd, isTerminalOut = term.GetFdInfo(out)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = out
|
||||
}
|
||||
|
||||
// The transport is created here for reuse during the client session
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
|
||||
// Why 32? See issue 8035
|
||||
timeout := 32 * time.Second
|
||||
if proto == "unix" {
|
||||
// no need in compressing for local communications
|
||||
tr.DisableCompression = true
|
||||
tr.Dial = func(_, _ string) (net.Conn, error) {
|
||||
return net.DialTimeout(proto, addr, timeout)
|
||||
}
|
||||
} else {
|
||||
tr.Proxy = http.ProxyFromEnvironment
|
||||
tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
|
||||
}
|
||||
|
||||
return &DockerCli{
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
in: in,
|
||||
out: out,
|
||||
err: err,
|
||||
keyFile: keyFile,
|
||||
inFd: inFd,
|
||||
outFd: outFd,
|
||||
isTerminalIn: isTerminalIn,
|
||||
isTerminalOut: isTerminalOut,
|
||||
tlsConfig: tlsConfig,
|
||||
scheme: scheme,
|
||||
transport: tr,
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,250 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/autogen/dockerversion"
|
||||
"github.com/docker/docker/pkg/promise"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
)
|
||||
|
||||
type tlsClientCon struct {
|
||||
*tls.Conn
|
||||
rawConn net.Conn
|
||||
}
|
||||
|
||||
func (c *tlsClientCon) CloseWrite() error {
|
||||
// Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
|
||||
// on its underlying connection.
|
||||
if cwc, ok := c.rawConn.(interface {
|
||||
CloseWrite() error
|
||||
}); ok {
|
||||
return cwc.CloseWrite()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
|
||||
return tlsDialWithDialer(new(net.Dialer), network, addr, config)
|
||||
}
|
||||
|
||||
// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
|
||||
// order to return our custom tlsClientCon struct which holds both the tls.Conn
|
||||
// object _and_ its underlying raw connection. The rationale for this is that
|
||||
// we need to be able to close the write end of the connection when attaching,
|
||||
// which tls.Conn does not provide.
|
||||
func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
|
||||
// We want the Timeout and Deadline values from dialer to cover the
|
||||
// whole process: TCP connection and TLS handshake. This means that we
|
||||
// also need to start our own timers now.
|
||||
timeout := dialer.Timeout
|
||||
|
||||
if !dialer.Deadline.IsZero() {
|
||||
deadlineTimeout := dialer.Deadline.Sub(time.Now())
|
||||
if timeout == 0 || deadlineTimeout < timeout {
|
||||
timeout = deadlineTimeout
|
||||
}
|
||||
}
|
||||
|
||||
var errChannel chan error
|
||||
|
||||
if timeout != 0 {
|
||||
errChannel = make(chan error, 2)
|
||||
time.AfterFunc(timeout, func() {
|
||||
errChannel <- errors.New("")
|
||||
})
|
||||
}
|
||||
|
||||
rawConn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// When we set up a TCP connection for hijack, there could be long periods
|
||||
// of inactivity (a long running command with no output) that in certain
|
||||
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
|
||||
// state. Setting TCP KeepAlive on the socket connection will prohibit
|
||||
// ECONNTIMEOUT unless the socket connection truly is broken
|
||||
if tcpConn, ok := rawConn.(*net.TCPConn); ok {
|
||||
tcpConn.SetKeepAlive(true)
|
||||
tcpConn.SetKeepAlivePeriod(30 * time.Second)
|
||||
}
|
||||
|
||||
colonPos := strings.LastIndex(addr, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(addr)
|
||||
}
|
||||
hostname := addr[:colonPos]
|
||||
|
||||
// If no ServerName is set, infer the ServerName
|
||||
// from the hostname we're connecting to.
|
||||
if config.ServerName == "" {
|
||||
// Make a copy to avoid polluting argument or default.
|
||||
c := *config
|
||||
c.ServerName = hostname
|
||||
config = &c
|
||||
}
|
||||
|
||||
conn := tls.Client(rawConn, config)
|
||||
|
||||
if timeout == 0 {
|
||||
err = conn.Handshake()
|
||||
} else {
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
}()
|
||||
|
||||
err = <-errChannel
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
rawConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This is Docker difference with standard's crypto/tls package: returned a
|
||||
// wrapper which holds both the TLS and raw connections.
|
||||
return &tlsClientCon{conn, rawConn}, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) dial() (net.Conn, error) {
|
||||
if cli.tlsConfig != nil && cli.proto != "unix" {
|
||||
// Notice this isn't Go standard's tls.Dial function
|
||||
return tlsDial(cli.proto, cli.addr, cli.tlsConfig)
|
||||
}
|
||||
return net.Dial(cli.proto, cli.addr)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error {
|
||||
defer func() {
|
||||
if started != nil {
|
||||
close(started)
|
||||
}
|
||||
}()
|
||||
|
||||
params, err := cli.encodeData(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
req.Header.Set("Connection", "Upgrade")
|
||||
req.Header.Set("Upgrade", "tcp")
|
||||
req.Host = cli.addr
|
||||
|
||||
dial, err := cli.dial()
|
||||
// When we set up a TCP connection for hijack, there could be long periods
|
||||
// of inactivity (a long running command with no output) that in certain
|
||||
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
|
||||
// state. Setting TCP KeepAlive on the socket connection will prohibit
|
||||
// ECONNTIMEOUT unless the socket connection truly is broken
|
||||
if tcpConn, ok := dial.(*net.TCPConn); ok {
|
||||
tcpConn.SetKeepAlive(true)
|
||||
tcpConn.SetKeepAlivePeriod(30 * time.Second)
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
}
|
||||
return err
|
||||
}
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
defer clientconn.Close()
|
||||
|
||||
// Server hijacks the connection, error 'connection closed' expected
|
||||
clientconn.Do(req)
|
||||
|
||||
rwc, br := clientconn.Hijack()
|
||||
defer rwc.Close()
|
||||
|
||||
if started != nil {
|
||||
started <- rwc
|
||||
}
|
||||
|
||||
var receiveStdout chan error
|
||||
|
||||
var oldState *term.State
|
||||
|
||||
if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" {
|
||||
oldState, err = term.SetRawTerminal(cli.inFd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer term.RestoreTerminal(cli.inFd, oldState)
|
||||
}
|
||||
|
||||
if stdout != nil || stderr != nil {
|
||||
receiveStdout = promise.Go(func() (err error) {
|
||||
defer func() {
|
||||
if in != nil {
|
||||
if setRawTerminal && cli.isTerminalIn {
|
||||
term.RestoreTerminal(cli.inFd, oldState)
|
||||
}
|
||||
// For some reason this Close call blocks on darwin..
|
||||
// As the client exists right after, simply discard the close
|
||||
// until we find a better solution.
|
||||
if runtime.GOOS != "darwin" {
|
||||
in.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// When TTY is ON, use regular copy
|
||||
if setRawTerminal && stdout != nil {
|
||||
_, err = io.Copy(stdout, br)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(stdout, stderr, br)
|
||||
}
|
||||
log.Debugf("[hijack] End of stdout")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
sendStdin := promise.Go(func() error {
|
||||
if in != nil {
|
||||
io.Copy(rwc, in)
|
||||
log.Debugf("[hijack] End of stdin")
|
||||
}
|
||||
|
||||
if conn, ok := rwc.(interface {
|
||||
CloseWrite() error
|
||||
}); ok {
|
||||
if err := conn.CloseWrite(); err != nil {
|
||||
log.Debugf("Couldn't send EOF: %s", err)
|
||||
}
|
||||
}
|
||||
// Discard errors due to pipe interruption
|
||||
return nil
|
||||
})
|
||||
|
||||
if stdout != nil || stderr != nil {
|
||||
if err := <-receiveStdout; err != nil {
|
||||
log.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !cli.isTerminalIn {
|
||||
if err := <-sendStdin; err != nil {
|
||||
log.Debugf("Error sendStdin: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,334 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/autogen/dockerversion"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
|
||||
)
|
||||
|
||||
func (cli *DockerCli) HTTPClient() *http.Client {
|
||||
return &http.Client{Transport: cli.transport}
|
||||
}
|
||||
|
||||
func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
|
||||
params := bytes.NewBuffer(nil)
|
||||
if data != nil {
|
||||
if env, ok := data.(engine.Env); ok {
|
||||
if err := env.Encode(params); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
buf, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := params.Write(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers map[string][]string) (io.ReadCloser, string, int, error) {
|
||||
expectedPayload := (method == "POST" || method == "PUT")
|
||||
if expectedPayload && in == nil {
|
||||
in = bytes.NewReader([]byte{})
|
||||
}
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in)
|
||||
if err != nil {
|
||||
return nil, "", -1, err
|
||||
}
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
|
||||
req.URL.Host = cli.addr
|
||||
req.URL.Scheme = cli.scheme
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
}
|
||||
if expectedPayload && req.Header.Get("Content-Type") == "" {
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
}
|
||||
|
||||
resp, err := cli.HTTPClient().Do(req)
|
||||
statusCode := -1
|
||||
if resp != nil {
|
||||
statusCode = resp.StatusCode
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return nil, "", statusCode, ErrConnectionRefused
|
||||
}
|
||||
|
||||
if cli.tlsConfig == nil {
|
||||
return nil, "", statusCode, fmt.Errorf("%v. Are you trying to connect to a TLS-enabled daemon without TLS?", err)
|
||||
}
|
||||
|
||||
return nil, "", statusCode, fmt.Errorf("An error occurred trying to connect: %v", err)
|
||||
}
|
||||
|
||||
if statusCode < 200 || statusCode >= 400 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, "", statusCode, err
|
||||
}
|
||||
if len(body) == 0 {
|
||||
return nil, "", statusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(statusCode), req.URL)
|
||||
}
|
||||
return nil, "", statusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body))
|
||||
}
|
||||
|
||||
return resp.Body, resp.Header.Get("Content-Type"), statusCode, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) {
|
||||
cmdAttempt := func(authConfig registry.AuthConfig) (io.ReadCloser, int, error) {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
registryAuthHeader := []string{
|
||||
base64.URLEncoding.EncodeToString(buf),
|
||||
}
|
||||
|
||||
// begin the request
|
||||
body, contentType, statusCode, err := cli.clientRequest(method, path, in, map[string][]string{
|
||||
"X-Registry-Auth": registryAuthHeader,
|
||||
})
|
||||
if err == nil && out != nil {
|
||||
// If we are streaming output, complete the stream since
|
||||
// errors may not appear until later.
|
||||
err = cli.streamBody(body, contentType, true, out, nil)
|
||||
}
|
||||
if err != nil {
|
||||
// Since errors in a stream appear after status 200 has been written,
|
||||
// we may need to change the status code.
|
||||
if strings.Contains(err.Error(), "Authentication is required") ||
|
||||
strings.Contains(err.Error(), "Status 401") ||
|
||||
strings.Contains(err.Error(), "status code 401") {
|
||||
statusCode = http.StatusUnauthorized
|
||||
}
|
||||
}
|
||||
return body, statusCode, err
|
||||
}
|
||||
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := cli.configFile.ResolveAuthConfig(index)
|
||||
body, statusCode, err := cmdAttempt(authConfig)
|
||||
if statusCode == http.StatusUnauthorized {
|
||||
fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName)
|
||||
if err = cli.CmdLogin(index.GetAuthConfigKey()); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
authConfig = cli.configFile.ResolveAuthConfig(index)
|
||||
return cmdAttempt(authConfig)
|
||||
}
|
||||
return body, statusCode, err
|
||||
}
|
||||
|
||||
func (cli *DockerCli) call(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
|
||||
params, err := cli.encodeData(data)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
if data != nil {
|
||||
if headers == nil {
|
||||
headers = make(map[string][]string)
|
||||
}
|
||||
headers["Content-Type"] = []string{"application/json"}
|
||||
}
|
||||
|
||||
body, _, statusCode, err := cli.clientRequest(method, path, params, headers)
|
||||
return body, statusCode, err
|
||||
}
|
||||
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
|
||||
return cli.streamHelper(method, path, true, in, out, nil, headers)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error {
|
||||
body, contentType, _, err := cli.clientRequest(method, path, in, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cli.streamBody(body, contentType, setRawTerminal, stdout, stderr)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, setRawTerminal bool, stdout, stderr io.Writer) error {
|
||||
defer body.Close()
|
||||
|
||||
if api.MatchesContentType(contentType, "application/json") {
|
||||
return utils.DisplayJSONMessagesStream(body, stdout, cli.outFd, cli.isTerminalOut)
|
||||
}
|
||||
if stdout != nil || stderr != nil {
|
||||
// When TTY is ON, use regular copy
|
||||
var err error
|
||||
if setRawTerminal {
|
||||
_, err = io.Copy(stdout, body)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(stdout, stderr, body)
|
||||
}
|
||||
log.Debugf("[stream] End of stdout")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) resizeTty(id string, isExec bool) {
|
||||
height, width := cli.getTtySize()
|
||||
if height == 0 && width == 0 {
|
||||
return
|
||||
}
|
||||
v := url.Values{}
|
||||
v.Set("h", strconv.Itoa(height))
|
||||
v.Set("w", strconv.Itoa(width))
|
||||
|
||||
path := ""
|
||||
if !isExec {
|
||||
path = "/containers/" + id + "/resize?"
|
||||
} else {
|
||||
path = "/exec/" + id + "/resize?"
|
||||
}
|
||||
|
||||
if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, nil)); err != nil {
|
||||
log.Debugf("Error resize: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForExit(cli *DockerCli, containerID string) (int, error) {
|
||||
stream, _, err := cli.call("POST", "/containers/"+containerID+"/wait", nil, nil)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
var out engine.Env
|
||||
if err := out.Decode(stream); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return out.GetInt("StatusCode"), nil
|
||||
}
|
||||
|
||||
// getExitCode perform an inspect on the container. It returns
|
||||
// the running state and the exit code.
|
||||
func getExitCode(cli *DockerCli, containerID string) (bool, int, error) {
|
||||
stream, _, err := cli.call("GET", "/containers/"+containerID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if err != ErrConnectionRefused {
|
||||
return false, -1, err
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
var result engine.Env
|
||||
if err := result.Decode(stream); err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
|
||||
state := result.GetSubEnv("State")
|
||||
return state.GetBool("Running"), state.GetInt("ExitCode"), nil
|
||||
}
|
||||
|
||||
// getExecExitCode perform an inspect on the exec command. It returns
|
||||
// the running state and the exit code.
|
||||
func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) {
|
||||
stream, _, err := cli.call("GET", "/exec/"+execID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if err != ErrConnectionRefused {
|
||||
return false, -1, err
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
var result engine.Env
|
||||
if err := result.Decode(stream); err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
|
||||
return result.GetBool("Running"), result.GetInt("ExitCode"), nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
|
||||
cli.resizeTty(id, isExec)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
go func() {
|
||||
prevH, prevW := cli.getTtySize()
|
||||
for {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
h, w := cli.getTtySize()
|
||||
|
||||
if prevW != w || prevH != h {
|
||||
cli.resizeTty(id, isExec)
|
||||
}
|
||||
prevH = h
|
||||
prevW = w
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
gosignal.Notify(sigchan, signal.SIGWINCH)
|
||||
go func() {
|
||||
for _ = range sigchan {
|
||||
cli.resizeTty(id, isExec)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getTtySize() (int, int) {
|
||||
if !cli.isTerminalOut {
|
||||
return 0, 0
|
||||
}
|
||||
ws, err := term.GetWinsize(cli.outFd)
|
||||
if err != nil {
|
||||
log.Debugf("Error getting size: %s", err)
|
||||
if ws == nil {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
return int(ws.Height), int(ws.Width)
|
||||
}
|
||||
|
||||
func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
|
||||
if stream != nil {
|
||||
defer stream.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, statusCode, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(stream)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return body, statusCode, nil
|
||||
}
|
||||
132
api/common.go
132
api/common.go
@@ -1,132 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"mime"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
const (
|
||||
APIVERSION version.Version = "1.18"
|
||||
DEFAULTHTTPHOST = "127.0.0.1"
|
||||
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
|
||||
DefaultDockerfileName string = "Dockerfile"
|
||||
)
|
||||
|
||||
func ValidateHost(val string) (string, error) {
|
||||
host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
return host, nil
|
||||
}
|
||||
|
||||
// TODO remove, used on < 1.5 in getContainersJSON
|
||||
func DisplayablePorts(ports *engine.Table) string {
|
||||
var (
|
||||
result = []string{}
|
||||
hostMappings = []string{}
|
||||
firstInGroupMap map[string]int
|
||||
lastInGroupMap map[string]int
|
||||
)
|
||||
firstInGroupMap = make(map[string]int)
|
||||
lastInGroupMap = make(map[string]int)
|
||||
ports.SetKey("PrivatePort")
|
||||
ports.Sort()
|
||||
for _, port := range ports.Data {
|
||||
var (
|
||||
current = port.GetInt("PrivatePort")
|
||||
portKey = port.Get("Type")
|
||||
firstInGroup int
|
||||
lastInGroup int
|
||||
)
|
||||
if port.Get("IP") != "" {
|
||||
if port.GetInt("PublicPort") != current {
|
||||
hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
|
||||
continue
|
||||
}
|
||||
portKey = fmt.Sprintf("%s/%s", port.Get("IP"), port.Get("Type"))
|
||||
}
|
||||
firstInGroup = firstInGroupMap[portKey]
|
||||
lastInGroup = lastInGroupMap[portKey]
|
||||
|
||||
if firstInGroup == 0 {
|
||||
firstInGroupMap[portKey] = current
|
||||
lastInGroupMap[portKey] = current
|
||||
continue
|
||||
}
|
||||
|
||||
if current == (lastInGroup + 1) {
|
||||
lastInGroupMap[portKey] = current
|
||||
continue
|
||||
}
|
||||
result = append(result, FormGroup(portKey, firstInGroup, lastInGroup))
|
||||
firstInGroupMap[portKey] = current
|
||||
lastInGroupMap[portKey] = current
|
||||
}
|
||||
for portKey, firstInGroup := range firstInGroupMap {
|
||||
result = append(result, FormGroup(portKey, firstInGroup, lastInGroupMap[portKey]))
|
||||
}
|
||||
result = append(result, hostMappings...)
|
||||
return strings.Join(result, ", ")
|
||||
}
|
||||
|
||||
func FormGroup(key string, start, last int) string {
|
||||
var (
|
||||
group string
|
||||
parts = strings.Split(key, "/")
|
||||
groupType = parts[0]
|
||||
ip = ""
|
||||
)
|
||||
if len(parts) > 1 {
|
||||
ip = parts[0]
|
||||
groupType = parts[1]
|
||||
}
|
||||
if start == last {
|
||||
group = fmt.Sprintf("%d", start)
|
||||
} else {
|
||||
group = fmt.Sprintf("%d-%d", start, last)
|
||||
}
|
||||
if ip != "" {
|
||||
group = fmt.Sprintf("%s:%s->%s", ip, group, group)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", group, groupType)
|
||||
}
|
||||
|
||||
func MatchesContentType(contentType, expectedType string) bool {
|
||||
mimetype, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing media type: %s error: %v", contentType, err)
|
||||
}
|
||||
return err == nil && mimetype == expectedType
|
||||
}
|
||||
|
||||
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
|
||||
// otherwise generates a new one
|
||||
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||
err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
|
||||
if err == libtrust.ErrKeyFileDoesNotExist {
|
||||
trustKey, err = libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error generating key: %s", err)
|
||||
}
|
||||
if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil {
|
||||
return nil, fmt.Errorf("Error saving key file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
|
||||
}
|
||||
return trustKey, nil
|
||||
}
|
||||
1614
api/server/server.go
1614
api/server/server.go
File diff suppressed because it is too large
Load Diff
@@ -1,105 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/systemd"
|
||||
)
|
||||
|
||||
// NewServer sets up the required Server and does protocol specific checking.
|
||||
func NewServer(proto, addr string, job *engine.Job) (Server, error) {
|
||||
// Basic error and sanity checking
|
||||
switch proto {
|
||||
case "fd":
|
||||
return nil, serveFd(addr, job)
|
||||
case "tcp":
|
||||
return setupTcpHttp(addr, job)
|
||||
case "unix":
|
||||
return setupUnixHttp(addr, job)
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid protocol format.")
|
||||
}
|
||||
}
|
||||
|
||||
func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) {
|
||||
r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))
|
||||
|
||||
if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
mask := syscall.Umask(0777)
|
||||
defer syscall.Umask(mask)
|
||||
|
||||
l, err := newListener("unix", addr, job.GetenvBool("BufferRequests"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.Chmod(addr, 0660); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
|
||||
}
|
||||
|
||||
// serveFd creates an http.Server and sets it up to serve given a socket activated
|
||||
// argument.
|
||||
func serveFd(addr string, job *engine.Job) error {
|
||||
r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))
|
||||
|
||||
ls, e := systemd.ListenFD(addr)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
chErrors := make(chan error, len(ls))
|
||||
|
||||
// We don't want to start serving on these sockets until the
|
||||
// daemon is initialized and installed. Otherwise required handlers
|
||||
// won't be ready.
|
||||
<-activationLock
|
||||
|
||||
// Since ListenFD will return one or more sockets we have
|
||||
// to create a go func to spawn off multiple serves
|
||||
for i := range ls {
|
||||
listener := ls[i]
|
||||
go func() {
|
||||
httpSrv := http.Server{Handler: r}
|
||||
chErrors <- httpSrv.Serve(listener)
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < len(ls); i++ {
|
||||
err := <-chErrors
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Called through eng.Job("acceptconnections")
|
||||
func AcceptConnections(job *engine.Job) engine.Status {
|
||||
// Tell the init daemon we are accepting requests
|
||||
go systemd.SdNotify("READY=1")
|
||||
|
||||
// close the lock so the listeners start accepting connections
|
||||
select {
|
||||
case <-activationLock:
|
||||
default:
|
||||
close(activationLock)
|
||||
}
|
||||
|
||||
return engine.StatusOK
|
||||
}
|
||||
@@ -1,553 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
)
|
||||
|
||||
func TestGetBoolParam(t *testing.T) {
|
||||
if ret, err := getBoolParam("true"); err != nil || !ret {
|
||||
t.Fatalf("true -> true, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("True"); err != nil || !ret {
|
||||
t.Fatalf("True -> true, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("1"); err != nil || !ret {
|
||||
t.Fatalf("1 -> true, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam(""); err != nil || ret {
|
||||
t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("false"); err != nil || ret {
|
||||
t.Fatalf("false -> false, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("0"); err != nil || ret {
|
||||
t.Fatalf("0 -> false, nil | got %t %s", ret, err)
|
||||
}
|
||||
if ret, err := getBoolParam("faux"); err == nil || ret {
|
||||
t.Fatalf("faux -> false, err | got %t %s", ret, err)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TesthttpError(t *testing.T) {
|
||||
r := httptest.NewRecorder()
|
||||
|
||||
httpError(r, fmt.Errorf("No such method"))
|
||||
if r.Code != http.StatusNotFound {
|
||||
t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
|
||||
}
|
||||
|
||||
httpError(r, fmt.Errorf("This accound hasn't been activated"))
|
||||
if r.Code != http.StatusForbidden {
|
||||
t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
|
||||
}
|
||||
|
||||
httpError(r, fmt.Errorf("Some error"))
|
||||
if r.Code != http.StatusInternalServerError {
|
||||
t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("version", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
v := &engine.Env{}
|
||||
v.SetJson("Version", "42.1")
|
||||
v.Set("ApiVersion", "1.1.1.1.1")
|
||||
v.Set("GoVersion", "2.42")
|
||||
v.Set("Os", "Linux")
|
||||
v.Set("Arch", "x86_64")
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/version", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
v := readEnv(r.Body, t)
|
||||
if v.Get("Version") != "42.1" {
|
||||
t.Fatalf("%#v\n", v)
|
||||
}
|
||||
if r.HeaderMap.Get("Content-Type") != "application/json" {
|
||||
t.Fatalf("%#v\n", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInfo(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("info", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
v := &engine.Env{}
|
||||
v.SetInt("Containers", 1)
|
||||
v.SetInt("Images", 42000)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/info", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
v := readEnv(r.Body, t)
|
||||
if v.GetInt("Images") != 42000 {
|
||||
t.Fatalf("%#v\n", v)
|
||||
}
|
||||
if v.GetInt("Containers") != 1 {
|
||||
t.Fatalf("%#v\n", v)
|
||||
}
|
||||
assertContentType(r, "application/json", t)
|
||||
}
|
||||
|
||||
func TestGetImagesJSON(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
v := createEnvFromGetImagesJSONStruct(sampleImage)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/images/json", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
assertHttpNotError(r, t)
|
||||
assertContentType(r, "application/json", t)
|
||||
var observed getImagesJSONStruct
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(observed, sampleImage) {
|
||||
t.Errorf("Expected %#v but got %#v", sampleImage, observed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesJSONFilter(t *testing.T) {
|
||||
eng := engine.New()
|
||||
filter := "nothing"
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
filter = job.Getenv("filter")
|
||||
return engine.StatusOK
|
||||
})
|
||||
serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t)
|
||||
if filter != "aaaa" {
|
||||
t.Errorf("%#v", filter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesJSONFilters(t *testing.T) {
|
||||
eng := engine.New()
|
||||
filter := "nothing"
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
filter = job.Getenv("filters")
|
||||
return engine.StatusOK
|
||||
})
|
||||
serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t)
|
||||
if filter != "nnnn" {
|
||||
t.Errorf("%#v", filter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesJSONAll(t *testing.T) {
|
||||
eng := engine.New()
|
||||
allFilter := "-1"
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
allFilter = job.Getenv("all")
|
||||
return engine.StatusOK
|
||||
})
|
||||
serveRequest("GET", "/images/json?all=1", nil, eng, t)
|
||||
if allFilter != "1" {
|
||||
t.Errorf("%#v", allFilter)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesJSONLegacyFormat(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("images", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
outsLegacy := engine.NewTable("Created", 0)
|
||||
outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage))
|
||||
if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
assertHttpNotError(r, t)
|
||||
assertContentType(r, "application/json", t)
|
||||
images := engine.NewTable("Created", 0)
|
||||
if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if images.Len() != 1 {
|
||||
t.Fatalf("Expected 1 image, %d found", images.Len())
|
||||
}
|
||||
image := images.Data[0]
|
||||
if image.Get("Tag") != "test-tag" {
|
||||
t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag"))
|
||||
}
|
||||
if image.Get("Repository") != "test-name" {
|
||||
t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainersByName(t *testing.T) {
|
||||
eng := engine.New()
|
||||
name := "container_name"
|
||||
var called bool
|
||||
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if job.Args[0] != name {
|
||||
t.Errorf("name != '%s': %#v", name, job.Args[0])
|
||||
}
|
||||
if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
|
||||
t.Errorf("dirty env variable not set")
|
||||
} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
|
||||
t.Errorf("dirty env variable set when it shouldn't")
|
||||
}
|
||||
v := &engine.Env{}
|
||||
v.SetBool("dirty", true)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
assertContentType(r, "application/json", t)
|
||||
var stdoutJson interface{}
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
|
||||
t.Fatalf("%#v", err)
|
||||
}
|
||||
if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
|
||||
t.Fatalf("%#v", stdoutJson)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEvents(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var called bool
|
||||
eng.Register("events", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
since := job.Getenv("since")
|
||||
if since != "1" {
|
||||
t.Fatalf("'since' should be 1, found %#v instead", since)
|
||||
}
|
||||
until := job.Getenv("until")
|
||||
if until != "0" {
|
||||
t.Fatalf("'until' should be 0, found %#v instead", until)
|
||||
}
|
||||
v := &engine.Env{}
|
||||
v.Set("since", since)
|
||||
v.Set("until", until)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
assertContentType(r, "application/json", t)
|
||||
var stdout_json struct {
|
||||
Since int
|
||||
Until int
|
||||
}
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stdout_json.Since != 1 {
|
||||
t.Errorf("since != 1: %#v", stdout_json.Since)
|
||||
}
|
||||
if stdout_json.Until != 0 {
|
||||
t.Errorf("until != 0: %#v", stdout_json.Until)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogs(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var inspect bool
|
||||
var logs bool
|
||||
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
||||
inspect = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
expected := "logs"
|
||||
eng.Register("logs", func(job *engine.Job) engine.Status {
|
||||
logs = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
follow := job.Getenv("follow")
|
||||
if follow != "1" {
|
||||
t.Fatalf("follow: %s, must be 1", follow)
|
||||
}
|
||||
stdout := job.Getenv("stdout")
|
||||
if stdout != "1" {
|
||||
t.Fatalf("stdout %s, must be 1", stdout)
|
||||
}
|
||||
stderr := job.Getenv("stderr")
|
||||
if stderr != "" {
|
||||
t.Fatalf("stderr %s, must be empty", stderr)
|
||||
}
|
||||
timestamps := job.Getenv("timestamps")
|
||||
if timestamps != "1" {
|
||||
t.Fatalf("timestamps %s, must be 1", timestamps)
|
||||
}
|
||||
job.Stdout.Write([]byte(expected))
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t)
|
||||
if r.Code != http.StatusOK {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
|
||||
}
|
||||
if !inspect {
|
||||
t.Fatal("container_inspect job was not called")
|
||||
}
|
||||
if !logs {
|
||||
t.Fatal("logs job was not called")
|
||||
}
|
||||
res := r.Body.String()
|
||||
if res != expected {
|
||||
t.Fatalf("Output %s, expected %s", res, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogsNoStreams(t *testing.T) {
|
||||
eng := engine.New()
|
||||
var inspect bool
|
||||
var logs bool
|
||||
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
||||
inspect = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != "test" {
|
||||
t.Fatalf("Container name %s, must be test", job.Args[0])
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
eng.Register("logs", func(job *engine.Job) engine.Status {
|
||||
logs = true
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/containers/test/logs", nil, eng, t)
|
||||
if r.Code != http.StatusBadRequest {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest)
|
||||
}
|
||||
if inspect {
|
||||
t.Fatal("container_inspect job was called, but it shouldn't")
|
||||
}
|
||||
if logs {
|
||||
t.Fatal("logs job was called, but it shouldn't")
|
||||
}
|
||||
res := strings.TrimSpace(r.Body.String())
|
||||
expected := "Bad parameters: you must choose at least one stream"
|
||||
if !strings.Contains(res, expected) {
|
||||
t.Fatalf("Output %s, expected %s in it", res, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesHistory(t *testing.T) {
|
||||
eng := engine.New()
|
||||
imageName := "docker-test-image"
|
||||
var called bool
|
||||
eng.Register("history", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatal("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != imageName {
|
||||
t.Fatalf("name != '%s': %#v", imageName, job.Args[0])
|
||||
}
|
||||
v := &engine.Env{}
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
if r.Code != http.StatusOK {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
|
||||
}
|
||||
if r.HeaderMap.Get("Content-Type") != "application/json" {
|
||||
t.Fatalf("%#v\n", r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetImagesByName(t *testing.T) {
|
||||
eng := engine.New()
|
||||
name := "image_name"
|
||||
var called bool
|
||||
eng.Register("image_inspect", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if job.Args[0] != name {
|
||||
t.Fatalf("name != '%s': %#v", name, job.Args[0])
|
||||
}
|
||||
if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
|
||||
t.Fatal("dirty env variable not set")
|
||||
} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
|
||||
t.Fatal("dirty env variable set when it shouldn't")
|
||||
}
|
||||
v := &engine.Env{}
|
||||
v.SetBool("dirty", true)
|
||||
if _, err := v.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t)
|
||||
if !called {
|
||||
t.Fatal("handler was not called")
|
||||
}
|
||||
if r.HeaderMap.Get("Content-Type") != "application/json" {
|
||||
t.Fatalf("%#v\n", r)
|
||||
}
|
||||
var stdoutJson interface{}
|
||||
if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
|
||||
t.Fatalf("%#v", err)
|
||||
}
|
||||
if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
|
||||
t.Fatalf("%#v", stdoutJson)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteContainers(t *testing.T) {
|
||||
eng := engine.New()
|
||||
name := "foo"
|
||||
var called bool
|
||||
eng.Register("rm", func(job *engine.Job) engine.Status {
|
||||
called = true
|
||||
if len(job.Args) == 0 {
|
||||
t.Fatalf("Job arguments is empty")
|
||||
}
|
||||
if job.Args[0] != name {
|
||||
t.Fatalf("name != '%s': %#v", name, job.Args[0])
|
||||
}
|
||||
return engine.StatusOK
|
||||
})
|
||||
r := serveRequest("DELETE", "/containers/"+name, nil, eng, t)
|
||||
if !called {
|
||||
t.Fatalf("handler was not called")
|
||||
}
|
||||
if r.Code != http.StatusNoContent {
|
||||
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent)
|
||||
}
|
||||
}
|
||||
|
||||
func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
|
||||
return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t)
|
||||
}
|
||||
|
||||
func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
|
||||
r := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, target, body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ServeRequest(eng, version, r, req)
|
||||
return r
|
||||
}
|
||||
|
||||
func readEnv(src io.Reader, t *testing.T) *engine.Env {
|
||||
out := engine.NewOutput()
|
||||
v, err := out.AddEnv()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := io.Copy(out, src); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out.Close()
|
||||
return v
|
||||
}
|
||||
|
||||
func toJson(data interface{}, t *testing.T) io.Reader {
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &buf
|
||||
}
|
||||
|
||||
func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) {
|
||||
if recorder.HeaderMap.Get("Content-Type") != content_type {
|
||||
t.Fatalf("%#v\n", recorder)
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that
|
||||
// should die as soon as we converted all integration tests?
|
||||
// assertHttpNotError expect the given response to not have an error.
|
||||
// Otherwise the it causes the test to fail.
|
||||
func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) {
|
||||
// Non-error http status are [200, 400)
|
||||
if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
|
||||
t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
|
||||
}
|
||||
}
|
||||
|
||||
func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env {
|
||||
v := &engine.Env{}
|
||||
v.SetList("RepoTags", data.RepoTags)
|
||||
v.Set("Id", data.Id)
|
||||
v.SetInt64("Created", data.Created)
|
||||
v.SetInt64("Size", data.Size)
|
||||
v.SetInt64("VirtualSize", data.VirtualSize)
|
||||
return v
|
||||
}
|
||||
|
||||
type getImagesJSONStruct struct {
|
||||
RepoTags []string
|
||||
Id string
|
||||
Created int64
|
||||
Size int64
|
||||
VirtualSize int64
|
||||
}
|
||||
|
||||
var sampleImage getImagesJSONStruct = getImagesJSONStruct{
|
||||
RepoTags: []string{"test-name:test-tag"},
|
||||
Id: "ID",
|
||||
Created: 999,
|
||||
Size: 777,
|
||||
VirtualSize: 666,
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/engine"
|
||||
)
|
||||
|
||||
// NewServer sets up the required Server and does protocol specific checking.
|
||||
func NewServer(proto, addr string, job *engine.Job) (Server, error) {
|
||||
// Basic error and sanity checking
|
||||
switch proto {
|
||||
case "tcp":
|
||||
return setupTcpHttp(addr, job)
|
||||
default:
|
||||
return nil, errors.New("Invalid protocol format. Windows only supports tcp.")
|
||||
}
|
||||
}
|
||||
|
||||
// Called through eng.Job("acceptconnections")
|
||||
func AcceptConnections(job *engine.Job) engine.Status {
|
||||
|
||||
// close the lock so the listeners start accepting connections
|
||||
if activationLock != nil {
|
||||
close(activationLock)
|
||||
}
|
||||
|
||||
return engine.StatusOK
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
// This package is used for API stability in the types and response to the
|
||||
// consumers of the API stats endpoint.
|
||||
package types
|
||||
|
||||
import "time"
|
||||
|
||||
type ThrottlingData struct {
|
||||
// Number of periods with throttling active
|
||||
Periods uint64 `json:"periods"`
|
||||
// Number of periods when the container hit its throttling limit.
|
||||
ThrottledPeriods uint64 `json:"throttled_periods"`
|
||||
// Aggregate time the container was throttled for in nanoseconds.
|
||||
ThrottledTime uint64 `json:"throttled_time"`
|
||||
}
|
||||
|
||||
// All CPU stats are aggregated since container inception.
|
||||
type CpuUsage struct {
|
||||
// Total CPU time consumed.
|
||||
// Units: nanoseconds.
|
||||
TotalUsage uint64 `json:"total_usage"`
|
||||
// Total CPU time consumed per core.
|
||||
// Units: nanoseconds.
|
||||
PercpuUsage []uint64 `json:"percpu_usage"`
|
||||
// Time spent by tasks of the cgroup in kernel mode.
|
||||
// Units: nanoseconds.
|
||||
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
|
||||
// Time spent by tasks of the cgroup in user mode.
|
||||
// Units: nanoseconds.
|
||||
UsageInUsermode uint64 `json:"usage_in_usermode"`
|
||||
}
|
||||
|
||||
type CpuStats struct {
|
||||
CpuUsage CpuUsage `json:"cpu_usage"`
|
||||
SystemUsage uint64 `json:"system_cpu_usage"`
|
||||
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
|
||||
}
|
||||
|
||||
type MemoryStats struct {
|
||||
// current res_counter usage for memory
|
||||
Usage uint64 `json:"usage"`
|
||||
// maximum usage ever recorded.
|
||||
MaxUsage uint64 `json:"max_usage"`
|
||||
// TODO(vishh): Export these as stronger types.
|
||||
// all the stats exported via memory.stat.
|
||||
Stats map[string]uint64 `json:"stats"`
|
||||
// number of times memory usage hits limits.
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
Limit uint64 `json:"limit"`
|
||||
}
|
||||
|
||||
type BlkioStatEntry struct {
|
||||
Major uint64 `json:"major"`
|
||||
Minor uint64 `json:"minor"`
|
||||
Op string `json:"op"`
|
||||
Value uint64 `json:"value"`
|
||||
}
|
||||
|
||||
type BlkioStats struct {
|
||||
// number of bytes tranferred to and from the block device
|
||||
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
|
||||
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
|
||||
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
|
||||
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
|
||||
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
|
||||
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
|
||||
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
|
||||
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
|
||||
}
|
||||
|
||||
type Network struct {
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
RxPackets uint64 `json:"rx_packets"`
|
||||
RxErrors uint64 `json:"rx_errors"`
|
||||
RxDropped uint64 `json:"rx_dropped"`
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
TxPackets uint64 `json:"tx_packets"`
|
||||
TxErrors uint64 `json:"tx_errors"`
|
||||
TxDropped uint64 `json:"tx_dropped"`
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
Read time.Time `json:"read"`
|
||||
Network Network `json:"network,omitempty"`
|
||||
CpuStats CpuStats `json:"cpu_stats,omitempty"`
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package types
|
||||
|
||||
// ContainerCreateResponse contains the information returned to a client on the
|
||||
// creation of a new container.
|
||||
type ContainerCreateResponse struct {
|
||||
// ID is the ID of the created container.
|
||||
ID string `json:"Id"`
|
||||
|
||||
// Warnings are any warnings encountered during the creation of the container.
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
||||
124
archive.go
Normal file
124
archive.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
type Archive io.Reader
|
||||
|
||||
type Compression uint32
|
||||
|
||||
const (
|
||||
Uncompressed Compression = iota
|
||||
Bzip2
|
||||
Gzip
|
||||
Xz
|
||||
)
|
||||
|
||||
func (compression *Compression) Flag() string {
|
||||
switch *compression {
|
||||
case Bzip2:
|
||||
return "j"
|
||||
case Gzip:
|
||||
return "z"
|
||||
case Xz:
|
||||
return "J"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func Tar(path string, compression Compression) (io.Reader, error) {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-c"+compression.Flag(), ".")
|
||||
return CmdStream(cmd)
|
||||
}
|
||||
|
||||
func Untar(archive io.Reader, path string) error {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-x")
|
||||
cmd.Stdin = archive
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.New(err.Error() + ": " + string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
// If the command fails to run or doesn't complete successfully, an error
|
||||
// will be returned, including anything written on stderr.
|
||||
func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipeR, pipeW := io.Pipe()
|
||||
errChan := make(chan []byte)
|
||||
// Collect stderr, we will use it in case of an error
|
||||
go func() {
|
||||
errText, e := ioutil.ReadAll(stderr)
|
||||
if e != nil {
|
||||
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
|
||||
}
|
||||
errChan <- errText
|
||||
}()
|
||||
// Copy stdout to the returned pipe
|
||||
go func() {
|
||||
_, err := io.Copy(pipeW, stdout)
|
||||
if err != nil {
|
||||
pipeW.CloseWithError(err)
|
||||
}
|
||||
errText := <-errChan
|
||||
if err := cmd.Wait(); err != nil {
|
||||
pipeW.CloseWithError(errors.New(err.Error() + ": " + string(errText)))
|
||||
} else {
|
||||
pipeW.Close()
|
||||
}
|
||||
}()
|
||||
// Run the command and return the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pipeR, nil
|
||||
}
|
||||
|
||||
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
||||
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
||||
// the file will be deleted.
|
||||
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
|
||||
f, err := ioutil.TempFile(dir, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := io.Copy(f, src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := st.Size()
|
||||
return &TempArchive{f, size}, nil
|
||||
}
|
||||
|
||||
type TempArchive struct {
|
||||
*os.File
|
||||
Size int64 // Pre-computed from Stat().Size() as a convenience
|
||||
}
|
||||
|
||||
func (archive *TempArchive) Read(data []byte) (int, error) {
|
||||
n, err := archive.File.Read(data)
|
||||
if err != nil {
|
||||
os.Remove(archive.File.Name())
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
77
archive_test.go
Normal file
77
archive_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCmdStreamLargeStderr(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
|
||||
out, err := CmdStream(cmd)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: " + err.Error())
|
||||
}
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
_, err := io.Copy(ioutil.Discard, out)
|
||||
errCh <- err
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatalf("Command should not have failed (err=%s...)", err.Error()[:100])
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmdStreamBad(t *testing.T) {
|
||||
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
|
||||
out, err := CmdStream(badCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: " + err.Error())
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err == nil {
|
||||
t.Fatalf("Command should have failed")
|
||||
} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
|
||||
t.Fatalf("Wrong error value (%s)", err.Error())
|
||||
} else if s := string(output); s != "hello\n" {
|
||||
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmdStreamGood(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
|
||||
out, err := CmdStream(cmd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err != nil {
|
||||
t.Fatalf("Command should not have failed (err=%s)", err)
|
||||
} else if s := string(output); s != "hello\n" {
|
||||
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTarUntar(t *testing.T) {
|
||||
archive, err := Tar(".", Uncompressed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp, err := ioutil.TempDir("", "docker-test-untar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
if err := Untar(archive, tmp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := os.Stat(tmp); err != nil {
|
||||
t.Fatalf("Error stating %s: %s", tmp, err.Error())
|
||||
}
|
||||
}
|
||||
168
auth/auth.go
Normal file
168
auth/auth.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Where we store the config file
|
||||
const CONFIGFILE = ".dockercfg"
|
||||
|
||||
// the registry server we want to login against
|
||||
const REGISTRY_SERVER = "https://registry.docker.io"
|
||||
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Email string `json:"email"`
|
||||
rootPath string `json:-`
|
||||
}
|
||||
|
||||
func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
|
||||
return &AuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Email: email,
|
||||
rootPath: rootPath,
|
||||
}
|
||||
}
|
||||
|
||||
// create a base64 encoded auth string to store in config
|
||||
func EncodeAuth(authConfig *AuthConfig) string {
|
||||
authStr := authConfig.Username + ":" + authConfig.Password
|
||||
msg := []byte(authStr)
|
||||
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
|
||||
base64.StdEncoding.Encode(encoded, msg)
|
||||
return string(encoded)
|
||||
}
|
||||
|
||||
// decode the auth string
|
||||
func DecodeAuth(authStr string) (*AuthConfig, error) {
|
||||
decLen := base64.StdEncoding.DecodedLen(len(authStr))
|
||||
decoded := make([]byte, decLen)
|
||||
authByte := []byte(authStr)
|
||||
n, err := base64.StdEncoding.Decode(decoded, authByte)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n > decLen {
|
||||
return nil, fmt.Errorf("Something went wrong decoding auth config")
|
||||
}
|
||||
arr := strings.Split(string(decoded), ":")
|
||||
if len(arr) != 2 {
|
||||
return nil, fmt.Errorf("Invalid auth configuration file")
|
||||
}
|
||||
password := strings.Trim(arr[1], "\x00")
|
||||
return &AuthConfig{Username: arr[0], Password: password}, nil
|
||||
|
||||
}
|
||||
|
||||
// load up the auth config information and return values
|
||||
// FIXME: use the internal golang config parser
|
||||
func LoadConfig(rootPath string) (*AuthConfig, error) {
|
||||
confFile := path.Join(rootPath, CONFIGFILE)
|
||||
if _, err := os.Stat(confFile); err != nil {
|
||||
return &AuthConfig{}, fmt.Errorf("The Auth config file is missing")
|
||||
}
|
||||
b, err := ioutil.ReadFile(confFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arr := strings.Split(string(b), "\n")
|
||||
origAuth := strings.Split(arr[0], " = ")
|
||||
origEmail := strings.Split(arr[1], " = ")
|
||||
authConfig, err := DecodeAuth(origAuth[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authConfig.Email = origEmail[1]
|
||||
authConfig.rootPath = rootPath
|
||||
return authConfig, nil
|
||||
}
|
||||
|
||||
// save the auth config
|
||||
func saveConfig(rootPath, authStr string, email string) error {
|
||||
lines := "auth = " + authStr + "\n" + "email = " + email + "\n"
|
||||
b := []byte(lines)
|
||||
err := ioutil.WriteFile(path.Join(rootPath, CONFIGFILE), b, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// try to register/login to the registry server
|
||||
func Login(authConfig *AuthConfig) (string, error) {
|
||||
storeConfig := false
|
||||
reqStatusCode := 0
|
||||
var status string
|
||||
var errMsg string
|
||||
var reqBody []byte
|
||||
jsonBody, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Config Error: %s", err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
|
||||
b := strings.NewReader(string(jsonBody))
|
||||
req1, err := http.Post(REGISTRY_SERVER+"/v1/users", "application/json; charset=utf-8", b)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Server Error: %s", err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
reqStatusCode = req1.StatusCode
|
||||
defer req1.Body.Close()
|
||||
reqBody, err = ioutil.ReadAll(req1.Body)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Server Error: [%#v] %s", reqStatusCode, err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
if reqStatusCode == 201 {
|
||||
status = "Account Created\n"
|
||||
storeConfig = true
|
||||
} else if reqStatusCode == 400 {
|
||||
// FIXME: This should be 'exists', not 'exist'. Need to change on the server first.
|
||||
if string(reqBody) == "Username or email already exist" {
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("GET", REGISTRY_SERVER+"/v1/users", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
status = "Login Succeeded\n"
|
||||
storeConfig = true
|
||||
} else {
|
||||
status = fmt.Sprintf("Login: %s", body)
|
||||
return "", errors.New(status)
|
||||
}
|
||||
} else {
|
||||
status = fmt.Sprintf("Registration: %s", reqBody)
|
||||
return "", errors.New(status)
|
||||
}
|
||||
} else {
|
||||
status = fmt.Sprintf("[%s] : %s", reqStatusCode, reqBody)
|
||||
return "", errors.New(status)
|
||||
}
|
||||
if storeConfig {
|
||||
authStr := EncodeAuth(authConfig)
|
||||
saveConfig(authConfig.rootPath, authStr, authConfig.Email)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
23
auth/auth_test.go
Normal file
23
auth/auth_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncodeAuth(t *testing.T) {
|
||||
newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
|
||||
authStr := EncodeAuth(newAuthConfig)
|
||||
decAuthConfig, err := DecodeAuth(authStr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if newAuthConfig.Username != decAuthConfig.Username {
|
||||
t.Fatal("Encode Username doesn't match decoded Username")
|
||||
}
|
||||
if newAuthConfig.Password != decAuthConfig.Password {
|
||||
t.Fatal("Encode Password doesn't match decoded Password")
|
||||
}
|
||||
if authStr != "a2VuOnRlc3Q=" {
|
||||
t.Fatal("AuthString encoding isn't correct.")
|
||||
}
|
||||
}
|
||||
20
buildbot/README.rst
Normal file
20
buildbot/README.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
Buildbot
|
||||
========
|
||||
|
||||
Buildbot is a continuous integration system designed to automate the
|
||||
build/test cycle. By automatically rebuilding and testing the tree each time
|
||||
something has changed, build problems are pinpointed quickly, before other
|
||||
developers are inconvenienced by the failure.
|
||||
|
||||
When running 'make hack' at the docker root directory, it spawns a virtual
|
||||
machine in the background running a buildbot instance and adds a git
|
||||
post-commit hook that automatically run docker tests for you.
|
||||
|
||||
You can check your buildbot instance at http://192.168.33.21:8010/waterfall
|
||||
|
||||
|
||||
Buildbot dependencies
|
||||
---------------------
|
||||
|
||||
vagrant, virtualbox packages and python package requests
|
||||
|
||||
28
buildbot/Vagrantfile
vendored
Normal file
28
buildbot/Vagrantfile
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
$BUILDBOT_IP = '192.168.33.21'
|
||||
|
||||
def v10(config)
|
||||
config.vm.box = "quantal64_3.5.0-25"
|
||||
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
|
||||
config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..'
|
||||
config.vm.network :hostonly, $BUILDBOT_IP
|
||||
|
||||
# Ensure puppet is installed on the instance
|
||||
config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet'
|
||||
|
||||
config.vm.provision :puppet do |puppet|
|
||||
puppet.manifests_path = '.'
|
||||
puppet.manifest_file = 'buildbot.pp'
|
||||
puppet.options = ['--templatedir','.']
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
|
||||
v10(config)
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
|
||||
v10(config)
|
||||
end
|
||||
43
buildbot/buildbot-cfg/buildbot-cfg.sh
Executable file
43
buildbot/buildbot-cfg/buildbot-cfg.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Auto setup of buildbot configuration. Package installation is being done
|
||||
# on buildbot.pp
|
||||
# Dependencies: buildbot, buildbot-slave, supervisor
|
||||
|
||||
SLAVE_NAME='buildworker'
|
||||
SLAVE_SOCKET='localhost:9989'
|
||||
BUILDBOT_PWD='pass-docker'
|
||||
USER='vagrant'
|
||||
ROOT_PATH='/data/buildbot'
|
||||
DOCKER_PATH='/data/docker'
|
||||
BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg"
|
||||
IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }')
|
||||
|
||||
function run { su $USER -c "$1"; }
|
||||
|
||||
export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin
|
||||
|
||||
# Exit if buildbot has already been installed
|
||||
[ -d "$ROOT_PATH" ] && exit 0
|
||||
|
||||
# Setup buildbot
|
||||
run "mkdir -p ${ROOT_PATH}"
|
||||
cd ${ROOT_PATH}
|
||||
run "buildbot create-master master"
|
||||
run "cp $BUILDBOT_CFG/master.cfg master"
|
||||
run "sed -i 's/localhost/$IP/' master/master.cfg"
|
||||
run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
|
||||
|
||||
# Allow buildbot subprocesses (docker tests) to properly run in containers,
|
||||
# in particular with docker -u
|
||||
run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac"
|
||||
|
||||
# Setup supervisor
|
||||
cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
|
||||
sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
|
||||
kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"`
|
||||
|
||||
# Add git hook
|
||||
cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks
|
||||
sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit
|
||||
|
||||
18
buildbot/buildbot-cfg/buildbot.conf
Normal file
18
buildbot/buildbot-cfg/buildbot.conf
Normal file
@@ -0,0 +1,18 @@
|
||||
[program:buildmaster]
|
||||
command=su vagrant -c "buildbot start master"
|
||||
directory=/data/buildbot
|
||||
chown= root:root
|
||||
redirect_stderr=true
|
||||
stdout_logfile=/var/log/supervisor/buildbot-master.log
|
||||
stderr_logfile=/var/log/supervisor/buildbot-master.log
|
||||
|
||||
[program:buildworker]
|
||||
command=buildslave start slave
|
||||
directory=/data/buildbot
|
||||
chown= root:root
|
||||
redirect_stderr=true
|
||||
stdout_logfile=/var/log/supervisor/buildbot-slave.log
|
||||
stderr_logfile=/var/log/supervisor/buildbot-slave.log
|
||||
|
||||
[group:buildbot]
|
||||
programs=buildmaster,buildworker
|
||||
46
buildbot/buildbot-cfg/master.cfg
Normal file
46
buildbot/buildbot-cfg/master.cfg
Normal file
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
from buildbot.buildslave import BuildSlave
|
||||
from buildbot.schedulers.forcesched import ForceScheduler
|
||||
from buildbot.config import BuilderConfig
|
||||
from buildbot.process.factory import BuildFactory
|
||||
from buildbot.steps.shell import ShellCommand
|
||||
from buildbot.status import html
|
||||
from buildbot.status.web import authz, auth
|
||||
|
||||
PORT_WEB = 8010 # Buildbot webserver port
|
||||
PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
|
||||
TEST_USER = 'buildbot' # Credential to authenticate build triggers
|
||||
TEST_PWD = 'docker' # Credential to authenticate build triggers
|
||||
BUILDER_NAME = 'docker'
|
||||
BUILDPASSWORD = 'pass-docker' # Credential to authenticate buildworkers
|
||||
DOCKER_PATH = '/data/docker'
|
||||
|
||||
|
||||
c = BuildmasterConfig = {}
|
||||
|
||||
c['title'] = "Docker"
|
||||
c['titleURL'] = "waterfall"
|
||||
c['buildbotURL'] = "http://localhost:{0}/".format(PORT_WEB)
|
||||
c['db'] = {'db_url':"sqlite:///state.sqlite"}
|
||||
c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)]
|
||||
c['slavePortnum'] = PORT_MASTER
|
||||
|
||||
c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
|
||||
|
||||
# Docker test command
|
||||
test_cmd = """(
|
||||
cd {0}/..; rm -rf docker-tmp; git clone docker docker-tmp;
|
||||
cd docker-tmp; make test; exit_status=$?;
|
||||
cd ..; rm -rf docker-tmp; exit $exit_status)""".format(DOCKER_PATH)
|
||||
|
||||
# Builder
|
||||
factory = BuildFactory()
|
||||
factory.addStep(ShellCommand(description='Docker',logEnviron=False,
|
||||
usePTY=True,command=test_cmd))
|
||||
c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
|
||||
factory=factory)]
|
||||
|
||||
# Status
|
||||
authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]),
|
||||
forceBuild='auth')
|
||||
c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]
|
||||
21
buildbot/buildbot-cfg/post-commit
Executable file
21
buildbot/buildbot-cfg/post-commit
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
'''Trigger buildbot docker test build
|
||||
|
||||
post-commit git hook designed to automatically trigger buildbot on
|
||||
the provided vagrant docker VM.'''
|
||||
|
||||
import requests
|
||||
|
||||
USERNAME = 'buildbot'
|
||||
PASSWORD = 'docker'
|
||||
BASE_URL = 'http://localhost:8010'
|
||||
path = lambda s: BASE_URL + '/' + s
|
||||
|
||||
try:
|
||||
session = requests.session()
|
||||
session.post(path('login'),data={'username':USERNAME,'passwd':PASSWORD})
|
||||
session.post(path('builders/docker/force'),
|
||||
data={'forcescheduler':'trigger','reason':'Test commit'})
|
||||
except:
|
||||
pass
|
||||
32
buildbot/buildbot.pp
Normal file
32
buildbot/buildbot.pp
Normal file
@@ -0,0 +1,32 @@
|
||||
node default {
|
||||
$USER = 'vagrant'
|
||||
$ROOT_PATH = '/data/buildbot'
|
||||
$DOCKER_PATH = '/data/docker'
|
||||
|
||||
exec {'apt_update': command => '/usr/bin/apt-get update' }
|
||||
Package { require => Exec['apt_update'] }
|
||||
group {'puppet': ensure => 'present'}
|
||||
|
||||
# Install dependencies
|
||||
Package { ensure => 'installed' }
|
||||
package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: }
|
||||
|
||||
file{[ '/data' ]:
|
||||
owner => $USER, group => $USER, ensure => 'directory' }
|
||||
|
||||
file {'/var/tmp/requirements.txt':
|
||||
content => template('requirements.txt') }
|
||||
|
||||
exec {'requirements':
|
||||
require => [ Package['python-dev'], Package['python-pip'],
|
||||
File['/var/tmp/requirements.txt'] ],
|
||||
cwd => '/var/tmp',
|
||||
command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt;
|
||||
rm /var/tmp/requirements.txt)'" }
|
||||
|
||||
exec {'buildbot-cfg-sh':
|
||||
require => [ Package['supervisor'], Exec['requirements']],
|
||||
path => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin',
|
||||
cwd => '/data',
|
||||
command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" }
|
||||
}
|
||||
6
buildbot/requirements.txt
Normal file
6
buildbot/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
sqlalchemy<=0.7.9
|
||||
sqlalchemy-migrate>=0.7.2
|
||||
buildbot==0.8.7p1
|
||||
buildbot_slave==0.8.7p1
|
||||
nose==1.2.1
|
||||
requests==1.1.0
|
||||
263
builder.go
Normal file
263
builder.go
Normal file
@@ -0,0 +1,263 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Builder struct {
|
||||
runtime *Runtime
|
||||
repositories *TagStore
|
||||
graph *Graph
|
||||
}
|
||||
|
||||
func NewBuilder(runtime *Runtime) *Builder {
|
||||
return &Builder{
|
||||
runtime: runtime,
|
||||
graph: runtime.graph,
|
||||
repositories: runtime.repositories,
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) Create(config *Config) (*Container, error) {
|
||||
// Lookup image
|
||||
img, err := builder.repositories.LookupImage(config.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Generate id
|
||||
id := GenerateId()
|
||||
// Generate default hostname
|
||||
// FIXME: the lxc template no longer needs to set a default hostname
|
||||
if config.Hostname == "" {
|
||||
config.Hostname = id[:12]
|
||||
}
|
||||
|
||||
container := &Container{
|
||||
// FIXME: we should generate the ID here instead of receiving it as an argument
|
||||
Id: id,
|
||||
Created: time.Now(),
|
||||
Path: config.Cmd[0],
|
||||
Args: config.Cmd[1:], //FIXME: de-duplicate from config
|
||||
Config: config,
|
||||
Image: img.Id, // Always use the resolved image id
|
||||
NetworkSettings: &NetworkSettings{},
|
||||
// FIXME: do we need to store this in the container?
|
||||
SysInitPath: sysInitPath,
|
||||
}
|
||||
container.root = builder.runtime.containerRoot(container.Id)
|
||||
// Step 1: create the container directory.
|
||||
// This doubles as a barrier to avoid race conditions.
|
||||
if err := os.Mkdir(container.root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If custom dns exists, then create a resolv.conf for the container
|
||||
if len(config.Dns) > 0 {
|
||||
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
|
||||
f, err := os.Create(container.ResolvConfPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
for _, dns := range config.Dns {
|
||||
if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
container.ResolvConfPath = "/etc/resolv.conf"
|
||||
}
|
||||
|
||||
// Step 2: save the container json
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Step 3: register the container
|
||||
if err := builder.runtime.Register(container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
||||
// Commit creates a new filesystem image from the current state of a container.
|
||||
// The image can optionally be tagged into a repository
|
||||
func (builder *Builder) Commit(container *Container, repository, tag, comment, author string) (*Image, error) {
|
||||
// FIXME: freeze the container before copying it to avoid data corruption?
|
||||
// FIXME: this shouldn't be in commands.
|
||||
rwTar, err := container.ExportRw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a new image from the container's base layers + a new layer from container changes
|
||||
img, err := builder.graph.Create(rwTar, container, comment, author)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Register the image if needed
|
||||
if repository != "" {
|
||||
if err := builder.repositories.Set(repository, tag, img.Id, true); err != nil {
|
||||
return img, err
|
||||
}
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
func (builder *Builder) clearTmp(containers, images map[string]struct{}) {
|
||||
for c := range containers {
|
||||
tmp := builder.runtime.Get(c)
|
||||
builder.runtime.Destroy(tmp)
|
||||
Debugf("Removing container %s", c)
|
||||
}
|
||||
for i := range images {
|
||||
builder.runtime.graph.Delete(i)
|
||||
Debugf("Removing image %s", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) Build(dockerfile io.Reader, stdout io.Writer) (*Image, error) {
|
||||
var (
|
||||
image, base *Image
|
||||
tmpContainers map[string]struct{} = make(map[string]struct{})
|
||||
tmpImages map[string]struct{} = make(map[string]struct{})
|
||||
)
|
||||
defer builder.clearTmp(tmpContainers, tmpImages)
|
||||
|
||||
file := bufio.NewReader(dockerfile)
|
||||
for {
|
||||
line, err := file.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
// Skip comments and empty line
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
tmp := strings.SplitN(line, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("Invalid Dockerfile format")
|
||||
}
|
||||
instruction := tmp[0]
|
||||
arguments := tmp[1]
|
||||
switch strings.ToLower(instruction) {
|
||||
case "from":
|
||||
fmt.Fprintf(stdout, "FROM %s\n", arguments)
|
||||
image, err = builder.runtime.repositories.LookupImage(arguments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
case "run":
|
||||
fmt.Fprintf(stdout, "RUN %s\n", arguments)
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
config, err := ParseRun([]string{image.Id, "/bin/sh", "-c", arguments}, nil, builder.runtime.capabilities)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the container and start it
|
||||
c, err := builder.Create(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpContainers[c.Id] = struct{}{}
|
||||
|
||||
// Wait for it to finish
|
||||
if result := c.Wait(); result != 0 {
|
||||
return nil, fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", arguments, result)
|
||||
}
|
||||
|
||||
// Commit the container
|
||||
base, err = builder.Commit(c, "", "", "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpImages[base.Id] = struct{}{}
|
||||
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
|
||||
// use the base as the new image
|
||||
image = base
|
||||
|
||||
break
|
||||
case "insert":
|
||||
if image == nil {
|
||||
return nil, fmt.Errorf("Please provide a source image with `from` prior to copy")
|
||||
}
|
||||
tmp = strings.SplitN(arguments, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("Invalid INSERT format")
|
||||
}
|
||||
sourceUrl := tmp[0]
|
||||
destPath := tmp[1]
|
||||
fmt.Fprintf(stdout, "COPY %s to %s in %s\n", sourceUrl, destPath, base.ShortId())
|
||||
|
||||
file, err := Download(sourceUrl, stdout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
config, err := ParseRun([]string{base.Id, "echo", "insert", sourceUrl, destPath}, nil, builder.runtime.capabilities)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := builder.Create(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for echo to finish
|
||||
if result := c.Wait(); result != 0 {
|
||||
return nil, fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", arguments, result)
|
||||
}
|
||||
|
||||
if err := c.Inject(file.Body, destPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base, err = builder.Commit(c, "", "", "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
|
||||
image = base
|
||||
|
||||
break
|
||||
default:
|
||||
fmt.Fprintf(stdout, "Skipping unknown instruction %s\n", instruction)
|
||||
}
|
||||
}
|
||||
if base != nil {
|
||||
// The build is successful, keep the temporary containers and images
|
||||
for i := range tmpImages {
|
||||
delete(tmpImages, i)
|
||||
}
|
||||
for i := range tmpContainers {
|
||||
delete(tmpContainers, i)
|
||||
}
|
||||
fmt.Fprintf(stdout, "Build finished. image id: %s\n", base.ShortId())
|
||||
} else {
|
||||
fmt.Fprintf(stdout, "An error occured during the build\n")
|
||||
}
|
||||
return base, nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
// This package contains the set of Dockerfile commands.
|
||||
package command
|
||||
|
||||
const (
|
||||
Env = "env"
|
||||
Label = "label"
|
||||
Maintainer = "maintainer"
|
||||
Add = "add"
|
||||
Copy = "copy"
|
||||
From = "from"
|
||||
Onbuild = "onbuild"
|
||||
Workdir = "workdir"
|
||||
Run = "run"
|
||||
Cmd = "cmd"
|
||||
Entrypoint = "entrypoint"
|
||||
Expose = "expose"
|
||||
Volume = "volume"
|
||||
User = "user"
|
||||
Insert = "insert"
|
||||
)
|
||||
|
||||
// Commands is list of all Dockerfile commands
|
||||
var Commands = map[string]struct{}{
|
||||
Env: {},
|
||||
Label: {},
|
||||
Maintainer: {},
|
||||
Add: {},
|
||||
Copy: {},
|
||||
From: {},
|
||||
Onbuild: {},
|
||||
Workdir: {},
|
||||
Run: {},
|
||||
Cmd: {},
|
||||
Entrypoint: {},
|
||||
Expose: {},
|
||||
Volume: {},
|
||||
User: {},
|
||||
Insert: {},
|
||||
}
|
||||
@@ -1,445 +0,0 @@
|
||||
package builder
|
||||
|
||||
// This file contains the dispatchers for each command. Note that
|
||||
// `nullDispatch` is not actually a command, but support for commands we parse
|
||||
// but do nothing with.
|
||||
//
|
||||
// See evaluator.go for a higher level discussion of the whole evaluator
|
||||
// package.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/nat"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/runconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
NoBaseImageSpecifier string = "scratch"
|
||||
)
|
||||
|
||||
// dispatch with no layer / parsing. This is effectively not a command.
|
||||
func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ENV foo bar
|
||||
//
|
||||
// Sets the environment variable foo to bar, also makes interpolation
|
||||
// in the dockerfile available from the next statement on via ${foo}.
|
||||
//
|
||||
func env(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("ENV requires at least one argument")
|
||||
}
|
||||
|
||||
if len(args)%2 != 0 {
|
||||
// should never get here, but just in case
|
||||
return fmt.Errorf("Bad input to ENV, too many args")
|
||||
}
|
||||
|
||||
commitStr := "ENV"
|
||||
|
||||
for j := 0; j < len(args); j++ {
|
||||
// name ==> args[j]
|
||||
// value ==> args[j+1]
|
||||
newVar := args[j] + "=" + args[j+1] + ""
|
||||
commitStr += " " + newVar
|
||||
|
||||
gotOne := false
|
||||
for i, envVar := range b.Config.Env {
|
||||
envParts := strings.SplitN(envVar, "=", 2)
|
||||
if envParts[0] == args[j] {
|
||||
b.Config.Env[i] = newVar
|
||||
gotOne = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !gotOne {
|
||||
b.Config.Env = append(b.Config.Env, newVar)
|
||||
}
|
||||
j++
|
||||
}
|
||||
|
||||
return b.commit("", b.Config.Cmd, commitStr)
|
||||
}
|
||||
|
||||
// MAINTAINER some text <maybe@an.email.address>
|
||||
//
|
||||
// Sets the maintainer metadata.
|
||||
func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("MAINTAINER requires exactly one argument")
|
||||
}
|
||||
|
||||
b.maintainer = args[0]
|
||||
return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
||||
}
|
||||
|
||||
// LABEL some json data describing the image
|
||||
//
|
||||
// Sets the Label variable foo to bar,
|
||||
//
|
||||
func label(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("LABEL requires at least one argument")
|
||||
}
|
||||
if len(args)%2 != 0 {
|
||||
// should never get here, but just in case
|
||||
return fmt.Errorf("Bad input to LABEL, too many args")
|
||||
}
|
||||
|
||||
commitStr := "LABEL"
|
||||
|
||||
if b.Config.Labels == nil {
|
||||
b.Config.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
for j := 0; j < len(args); j++ {
|
||||
// name ==> args[j]
|
||||
// value ==> args[j+1]
|
||||
newVar := args[j] + "=" + args[j+1] + ""
|
||||
commitStr += " " + newVar
|
||||
|
||||
b.Config.Labels[args[j]] = args[j+1]
|
||||
j++
|
||||
}
|
||||
return b.commit("", b.Config.Cmd, commitStr)
|
||||
}
|
||||
|
||||
// ADD foo /path
|
||||
//
|
||||
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
|
||||
// exist here. If you do not wish to have this automatic handling, use COPY.
|
||||
//
|
||||
func add(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("ADD requires at least two arguments")
|
||||
}
|
||||
|
||||
return b.runContextCommand(args, true, true, "ADD")
|
||||
}
|
||||
|
||||
// COPY foo /path
|
||||
//
|
||||
// Same as 'ADD' but without the tar and remote url handling.
|
||||
//
|
||||
func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("COPY requires at least two arguments")
|
||||
}
|
||||
|
||||
return b.runContextCommand(args, false, false, "COPY")
|
||||
}
|
||||
|
||||
// FROM imagename
|
||||
//
|
||||
// This sets the image the dockerfile will build on top of.
|
||||
//
|
||||
func from(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("FROM requires one argument")
|
||||
}
|
||||
|
||||
name := args[0]
|
||||
|
||||
if name == NoBaseImageSpecifier {
|
||||
b.image = ""
|
||||
b.noBaseImage = true
|
||||
return nil
|
||||
}
|
||||
|
||||
image, err := b.Daemon.Repositories().LookupImage(name)
|
||||
if b.Pull {
|
||||
image, err = b.pullImage(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
if b.Daemon.Graph().IsNotExist(err) {
|
||||
image, err = b.pullImage(name)
|
||||
}
|
||||
|
||||
// note that the top level err will still be !nil here if IsNotExist is
|
||||
// not the error. This approach just simplifies hte logic a bit.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return b.processImageFrom(image)
|
||||
}
|
||||
|
||||
// ONBUILD RUN echo yo
|
||||
//
|
||||
// ONBUILD triggers run when the image is used in a FROM statement.
|
||||
//
|
||||
// ONBUILD handling has a lot of special-case functionality, the heading in
|
||||
// evaluator.go and comments around dispatch() in the same file explain the
|
||||
// special cases. search for 'OnBuild' in internals.go for additional special
|
||||
// cases.
|
||||
//
|
||||
func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("ONBUILD requires at least one argument")
|
||||
}
|
||||
|
||||
triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
|
||||
switch triggerInstruction {
|
||||
case "ONBUILD":
|
||||
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
||||
case "MAINTAINER", "FROM":
|
||||
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
|
||||
}
|
||||
|
||||
original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "")
|
||||
|
||||
b.Config.OnBuild = append(b.Config.OnBuild, original)
|
||||
return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
|
||||
}
|
||||
|
||||
// WORKDIR /tmp
|
||||
//
|
||||
// Set the working directory for future RUN/CMD/etc statements.
|
||||
//
|
||||
func workdir(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("WORKDIR requires exactly one argument")
|
||||
}
|
||||
|
||||
workdir := args[0]
|
||||
|
||||
if !filepath.IsAbs(workdir) {
|
||||
workdir = filepath.Join("/", b.Config.WorkingDir, workdir)
|
||||
}
|
||||
|
||||
b.Config.WorkingDir = workdir
|
||||
|
||||
return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
||||
}
|
||||
|
||||
// RUN some command yo
|
||||
//
|
||||
// run a command and commit the image. Args are automatically prepended with
|
||||
// 'sh -c' in the event there is only one argument. The difference in
|
||||
// processing:
|
||||
//
|
||||
// RUN echo hi # sh -c echo hi
|
||||
// RUN [ "echo", "hi" ] # echo hi
|
||||
//
|
||||
func run(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if b.image == "" && !b.noBaseImage {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
|
||||
args = handleJsonArgs(args, attributes)
|
||||
|
||||
if !attributes["json"] {
|
||||
args = append([]string{"/bin/sh", "-c"}, args...)
|
||||
}
|
||||
|
||||
runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
|
||||
runCmd.SetOutput(ioutil.Discard)
|
||||
runCmd.Usage = nil
|
||||
|
||||
config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := b.Config.Cmd
|
||||
// set Cmd manually, this is special case only for Dockerfiles
|
||||
b.Config.Cmd = config.Cmd
|
||||
runconfig.Merge(b.Config, config)
|
||||
|
||||
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
|
||||
|
||||
log.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
|
||||
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hit {
|
||||
return nil
|
||||
}
|
||||
|
||||
c, err := b.create()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure that we keep the container mounted until the commit
|
||||
// to avoid unmounting and then mounting directly again
|
||||
c.Mount()
|
||||
defer c.Unmount()
|
||||
|
||||
err = b.run(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.commit(c.ID, cmd, "run"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CMD foo
|
||||
//
|
||||
// Set the default command to run in the container (which may be empty).
|
||||
// Argument handling is the same as RUN.
|
||||
//
|
||||
func cmd(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
b.Config.Cmd = handleJsonArgs(args, attributes)
|
||||
|
||||
if !attributes["json"] {
|
||||
b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...)
|
||||
}
|
||||
|
||||
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", b.Config.Cmd)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(args) != 0 {
|
||||
b.cmdSet = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ENTRYPOINT /usr/sbin/nginx
|
||||
//
|
||||
// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will
|
||||
// accept the CMD as the arguments to /usr/sbin/nginx.
|
||||
//
|
||||
// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
|
||||
// is initialized at NewBuilder time instead of through argument parsing.
|
||||
//
|
||||
func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
parsed := handleJsonArgs(args, attributes)
|
||||
|
||||
switch {
|
||||
case attributes["json"]:
|
||||
// ENTRYPOINT ["echo", "hi"]
|
||||
b.Config.Entrypoint = parsed
|
||||
case len(parsed) == 0:
|
||||
// ENTRYPOINT []
|
||||
b.Config.Entrypoint = nil
|
||||
default:
|
||||
// ENTRYPOINT echo hi
|
||||
b.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]}
|
||||
}
|
||||
|
||||
// when setting the entrypoint if a CMD was not explicitly set then
|
||||
// set the command to nil
|
||||
if !b.cmdSet {
|
||||
b.Config.Cmd = nil
|
||||
}
|
||||
|
||||
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EXPOSE 6667/tcp 7000/tcp
|
||||
//
|
||||
// Expose ports for links and port mappings. This all ends up in
|
||||
// b.Config.ExposedPorts for runconfig.
|
||||
//
|
||||
func expose(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
portsTab := args
|
||||
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("EXPOSE requires at least one argument")
|
||||
}
|
||||
|
||||
if b.Config.ExposedPorts == nil {
|
||||
b.Config.ExposedPorts = make(nat.PortSet)
|
||||
}
|
||||
|
||||
ports, bindingMap, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bindings := range bindingMap {
|
||||
if bindings[0].HostIp != "" || bindings[0].HostPort != "" {
|
||||
fmt.Fprintf(b.ErrStream, " ---> Using Dockerfile's EXPOSE instruction"+
|
||||
" to map host ports to container ports (ip:hostPort:containerPort) is deprecated.\n"+
|
||||
" Please use -p to publish the ports.\n")
|
||||
}
|
||||
}
|
||||
|
||||
// instead of using ports directly, we build a list of ports and sort it so
|
||||
// the order is consistent. This prevents cache burst where map ordering
|
||||
// changes between builds
|
||||
portList := make([]string, len(ports))
|
||||
var i int
|
||||
for port := range ports {
|
||||
if _, exists := b.Config.ExposedPorts[port]; !exists {
|
||||
b.Config.ExposedPorts[port] = struct{}{}
|
||||
}
|
||||
portList[i] = string(port)
|
||||
i++
|
||||
}
|
||||
sort.Strings(portList)
|
||||
b.Config.PortSpecs = nil
|
||||
return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
|
||||
}
|
||||
|
||||
// USER foo
|
||||
//
|
||||
// Set the user to 'foo' for future commands and when running the
|
||||
// ENTRYPOINT/CMD at container run time.
|
||||
//
|
||||
func user(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("USER requires exactly one argument")
|
||||
}
|
||||
|
||||
b.Config.User = args[0]
|
||||
return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
|
||||
}
|
||||
|
||||
// VOLUME /foo
|
||||
//
|
||||
// Expose the volume /foo for use. Will also accept the JSON array form.
|
||||
//
|
||||
func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("VOLUME requires at least one argument")
|
||||
}
|
||||
|
||||
if b.Config.Volumes == nil {
|
||||
b.Config.Volumes = map[string]struct{}{}
|
||||
}
|
||||
for _, v := range args {
|
||||
v = strings.TrimSpace(v)
|
||||
if v == "" {
|
||||
return fmt.Errorf("Volume specified can not be an empty string")
|
||||
}
|
||||
b.Config.Volumes[v] = struct{}{}
|
||||
}
|
||||
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// INSERT is no longer accepted, but we still parse it.
|
||||
func insert(b *Builder, args []string, attributes map[string]bool, original string) error {
|
||||
return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
|
||||
}
|
||||
@@ -1,336 +0,0 @@
|
||||
// builder is the evaluation step in the Dockerfile parse/evaluate pipeline.
|
||||
//
|
||||
// It incorporates a dispatch table based on the parser.Node values (see the
|
||||
// parser package for more information) that are yielded from the parser itself.
|
||||
// Calling NewBuilder with the BuildOpts struct can be used to customize the
|
||||
// experience for execution purposes only. Parsing is controlled in the parser
|
||||
// package, and this division of resposibility should be respected.
|
||||
//
|
||||
// Please see the jump table targets for the actual invocations, most of which
|
||||
// will call out to the functions in internals.go to deal with their tasks.
|
||||
//
|
||||
// ONBUILD is a special case, which is covered in the onbuild() func in
|
||||
// dispatchers.go.
|
||||
//
|
||||
// The evaluator uses the concept of "steps", which are usually each processable
|
||||
// line in the Dockerfile. Each step is numbered and certain actions are taken
|
||||
// before and after each step, such as creating an image ID and removing temporary
|
||||
// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which
|
||||
// includes its own set of steps (usually only one of them).
|
||||
package builder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/builder/command"
|
||||
"github.com/docker/docker/builder/parser"
|
||||
"github.com/docker/docker/daemon"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/pkg/common"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
|
||||
)
|
||||
|
||||
// Environment variable interpolation will happen on these statements only.
|
||||
var replaceEnvAllowed = map[string]struct{}{
|
||||
command.Env: {},
|
||||
command.Label: {},
|
||||
command.Add: {},
|
||||
command.Copy: {},
|
||||
command.Workdir: {},
|
||||
command.Expose: {},
|
||||
command.Volume: {},
|
||||
command.User: {},
|
||||
}
|
||||
|
||||
var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error
|
||||
|
||||
func init() {
|
||||
evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{
|
||||
command.Env: env,
|
||||
command.Label: label,
|
||||
command.Maintainer: maintainer,
|
||||
command.Add: add,
|
||||
command.Copy: dispatchCopy, // copy() is a go builtin
|
||||
command.From: from,
|
||||
command.Onbuild: onbuild,
|
||||
command.Workdir: workdir,
|
||||
command.Run: run,
|
||||
command.Cmd: cmd,
|
||||
command.Entrypoint: entrypoint,
|
||||
command.Expose: expose,
|
||||
command.Volume: volume,
|
||||
command.User: user,
|
||||
command.Insert: insert,
|
||||
}
|
||||
}
|
||||
|
||||
// internal struct, used to maintain configuration of the Dockerfile's
|
||||
// processing as it evaluates the parsing result.
|
||||
type Builder struct {
|
||||
Daemon *daemon.Daemon
|
||||
Engine *engine.Engine
|
||||
|
||||
// effectively stdio for the run. Because it is not stdio, I said
|
||||
// "Effectively". Do not use stdio anywhere in this package for any reason.
|
||||
OutStream io.Writer
|
||||
ErrStream io.Writer
|
||||
|
||||
Verbose bool
|
||||
UtilizeCache bool
|
||||
cacheBusted bool
|
||||
|
||||
// controls how images and containers are handled between steps.
|
||||
Remove bool
|
||||
ForceRemove bool
|
||||
Pull bool
|
||||
|
||||
// set this to true if we want the builder to not commit between steps.
|
||||
// This is useful when we only want to use the evaluator table to generate
|
||||
// the final configs of the Dockerfile but dont want the layers
|
||||
disableCommit bool
|
||||
|
||||
AuthConfig *registry.AuthConfig
|
||||
AuthConfigFile *registry.ConfigFile
|
||||
|
||||
// Deprecated, original writer used for ImagePull. To be removed.
|
||||
OutOld io.Writer
|
||||
StreamFormatter *utils.StreamFormatter
|
||||
|
||||
Config *runconfig.Config // runconfig for cmd, run, entrypoint etc.
|
||||
|
||||
// both of these are controlled by the Remove and ForceRemove options in BuildOpts
|
||||
TmpContainers map[string]struct{} // a map of containers used for removes
|
||||
|
||||
dockerfileName string // name of Dockerfile
|
||||
dockerfile *parser.Node // the syntax tree of the dockerfile
|
||||
image string // image name for commit processing
|
||||
maintainer string // maintainer name. could probably be removed.
|
||||
cmdSet bool // indicates is CMD was set in current Dockerfile
|
||||
context tarsum.TarSum // the context is a tarball that is uploaded by the client
|
||||
contextPath string // the path of the temporary directory the local context is unpacked to (server side)
|
||||
noBaseImage bool // indicates that this build does not start from any base image, but is being built from an empty file system.
|
||||
|
||||
// Set resource restrictions for build containers
|
||||
cpuSetCpus string
|
||||
cpuShares int64
|
||||
memory int64
|
||||
memorySwap int64
|
||||
|
||||
cancelled <-chan struct{} // When closed, job was cancelled.
|
||||
}
|
||||
|
||||
// Run the builder with the context. This is the lynchpin of this package. This
|
||||
// will (barring errors):
|
||||
//
|
||||
// * call readContext() which will set up the temporary directory and unpack
|
||||
// the context into it.
|
||||
// * read the dockerfile
|
||||
// * parse the dockerfile
|
||||
// * walk the parse tree and execute it by dispatching to handlers. If Remove
|
||||
// or ForceRemove is set, additional cleanup around containers happens after
|
||||
// processing.
|
||||
// * Print a happy message and return the image ID.
|
||||
//
|
||||
func (b *Builder) Run(context io.Reader) (string, error) {
|
||||
if err := b.readContext(context); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := os.RemoveAll(b.contextPath); err != nil {
|
||||
log.Debugf("[BUILDER] failed to remove temporary context: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := b.readDockerfile(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// some initializations that would not have been supplied by the caller.
|
||||
b.Config = &runconfig.Config{}
|
||||
|
||||
b.TmpContainers = map[string]struct{}{}
|
||||
|
||||
for i, n := range b.dockerfile.Children {
|
||||
select {
|
||||
case <-b.cancelled:
|
||||
log.Debug("Builder: build cancelled!")
|
||||
fmt.Fprintf(b.OutStream, "Build cancelled")
|
||||
return "", fmt.Errorf("Build cancelled")
|
||||
default:
|
||||
// Not cancelled yet, keep going...
|
||||
}
|
||||
if err := b.dispatch(i, n); err != nil {
|
||||
if b.ForceRemove {
|
||||
b.clearTmp()
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
fmt.Fprintf(b.OutStream, " ---> %s\n", common.TruncateID(b.image))
|
||||
if b.Remove {
|
||||
b.clearTmp()
|
||||
}
|
||||
}
|
||||
|
||||
if b.image == "" {
|
||||
return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.OutStream, "Successfully built %s\n", common.TruncateID(b.image))
|
||||
return b.image, nil
|
||||
}
|
||||
|
||||
// Reads a Dockerfile from the current context. It assumes that the
|
||||
// 'filename' is a relative path from the root of the context
|
||||
func (b *Builder) readDockerfile() error {
|
||||
// If no -f was specified then look for 'Dockerfile'. If we can't find
|
||||
// that then look for 'dockerfile'. If neither are found then default
|
||||
// back to 'Dockerfile' and use that in the error message.
|
||||
if b.dockerfileName == "" {
|
||||
b.dockerfileName = api.DefaultDockerfileName
|
||||
tmpFN := filepath.Join(b.contextPath, api.DefaultDockerfileName)
|
||||
if _, err := os.Lstat(tmpFN); err != nil {
|
||||
tmpFN = filepath.Join(b.contextPath, strings.ToLower(api.DefaultDockerfileName))
|
||||
if _, err := os.Lstat(tmpFN); err == nil {
|
||||
b.dockerfileName = strings.ToLower(api.DefaultDockerfileName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
origFile := b.dockerfileName
|
||||
|
||||
filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile)
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile)
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
return ErrDockerfileEmpty
|
||||
}
|
||||
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.dockerfile, err = parser.Parse(f)
|
||||
f.Close()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// After the Dockerfile has been parsed, we need to check the .dockerignore
|
||||
// file for either "Dockerfile" or ".dockerignore", and if either are
|
||||
// present then erase them from the build context. These files should never
|
||||
// have been sent from the client but we did send them to make sure that
|
||||
// we had the Dockerfile to actually parse, and then we also need the
|
||||
// .dockerignore file to know whether either file should be removed.
|
||||
// Note that this assumes the Dockerfile has been read into memory and
|
||||
// is now safe to be removed.
|
||||
|
||||
excludes, _ := utils.ReadDockerIgnore(filepath.Join(b.contextPath, ".dockerignore"))
|
||||
if rm, _ := fileutils.Matches(".dockerignore", excludes); rm == true {
|
||||
os.Remove(filepath.Join(b.contextPath, ".dockerignore"))
|
||||
b.context.(tarsum.BuilderContext).Remove(".dockerignore")
|
||||
}
|
||||
if rm, _ := fileutils.Matches(b.dockerfileName, excludes); rm == true {
|
||||
os.Remove(filepath.Join(b.contextPath, b.dockerfileName))
|
||||
b.context.(tarsum.BuilderContext).Remove(b.dockerfileName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This method is the entrypoint to all statement handling routines.
|
||||
//
|
||||
// Almost all nodes will have this structure:
|
||||
// Child[Node, Node, Node] where Child is from parser.Node.Children and each
|
||||
// node comes from parser.Node.Next. This forms a "line" with a statement and
|
||||
// arguments and we process them in this normalized form by hitting
|
||||
// evaluateTable with the leaf nodes of the command and the Builder object.
|
||||
//
|
||||
// ONBUILD is a special case; in this case the parser will emit:
|
||||
// Child[Node, Child[Node, Node...]] where the first node is the literal
|
||||
// "onbuild" and the child entrypoint is the command of the ONBUILD statmeent,
|
||||
// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
|
||||
// deal with that, at least until it becomes more of a general concern with new
|
||||
// features.
|
||||
func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
|
||||
cmd := ast.Value
|
||||
attrs := ast.Attributes
|
||||
original := ast.Original
|
||||
strs := []string{}
|
||||
msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
|
||||
|
||||
if cmd == "onbuild" {
|
||||
if ast.Next == nil {
|
||||
return fmt.Errorf("ONBUILD requires at least one argument")
|
||||
}
|
||||
ast = ast.Next.Children[0]
|
||||
strs = append(strs, ast.Value)
|
||||
msg += " " + ast.Value
|
||||
}
|
||||
|
||||
// count the number of nodes that we are going to traverse first
|
||||
// so we can pre-create the argument and message array. This speeds up the
|
||||
// allocation of those list a lot when they have a lot of arguments
|
||||
cursor := ast
|
||||
var n int
|
||||
for cursor.Next != nil {
|
||||
cursor = cursor.Next
|
||||
n++
|
||||
}
|
||||
l := len(strs)
|
||||
strList := make([]string, n+l)
|
||||
copy(strList, strs)
|
||||
msgList := make([]string, n)
|
||||
|
||||
var i int
|
||||
for ast.Next != nil {
|
||||
ast = ast.Next
|
||||
var str string
|
||||
str = ast.Value
|
||||
if _, ok := replaceEnvAllowed[cmd]; ok {
|
||||
var err error
|
||||
str, err = ProcessWord(ast.Value, b.Config.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
strList[i+l] = str
|
||||
msgList[i] = ast.Value
|
||||
i++
|
||||
}
|
||||
|
||||
msg += " " + strings.Join(msgList, " ")
|
||||
fmt.Fprintln(b.OutStream, msg)
|
||||
|
||||
// XXX yes, we skip any cmds that are not valid; the parser should have
|
||||
// picked these out already.
|
||||
if f, ok := evaluateTable[cmd]; ok {
|
||||
return f(b, strList, attrs, original)
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))
|
||||
}
|
||||
@@ -1,763 +0,0 @@
|
||||
package builder
|
||||
|
||||
// internals for handling commands. Covers many areas and a lot of
|
||||
// non-contiguous functionality. Please read the comments.
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/builder/parser"
|
||||
"github.com/docker/docker/daemon"
|
||||
imagepkg "github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/common"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/progressreader"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
func (b *Builder) readContext(context io.Reader) error {
|
||||
tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
decompressedStream, err := archive.DecompressStream(context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.contextPath = tmpdirPath
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) commit(id string, autoCmd []string, comment string) error {
|
||||
if b.disableCommit {
|
||||
return nil
|
||||
}
|
||||
if b.image == "" && !b.noBaseImage {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
||||
}
|
||||
b.Config.Image = b.image
|
||||
if id == "" {
|
||||
cmd := b.Config.Cmd
|
||||
b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
||||
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
|
||||
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hit {
|
||||
return nil
|
||||
}
|
||||
|
||||
container, err := b.create()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id = container.ID
|
||||
|
||||
if err := container.Mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer container.Unmount()
|
||||
}
|
||||
container, err := b.Daemon.Get(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: Actually copy the struct
|
||||
autoConfig := *b.Config
|
||||
autoConfig.Cmd = autoCmd
|
||||
|
||||
// Commit the container
|
||||
image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.image = image.ID
|
||||
return nil
|
||||
}
|
||||
|
||||
type copyInfo struct {
|
||||
origPath string
|
||||
destPath string
|
||||
hash string
|
||||
decompress bool
|
||||
tmpDir string
|
||||
}
|
||||
|
||||
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
||||
if b.context == nil {
|
||||
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
||||
}
|
||||
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
|
||||
}
|
||||
|
||||
dest := args[len(args)-1] // last one is always the dest
|
||||
|
||||
copyInfos := []*copyInfo{}
|
||||
|
||||
b.Config.Image = b.image
|
||||
|
||||
defer func() {
|
||||
for _, ci := range copyInfos {
|
||||
if ci.tmpDir != "" {
|
||||
os.RemoveAll(ci.tmpDir)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Loop through each src file and calculate the info we need to
|
||||
// do the copy (e.g. hash value if cached). Don't actually do
|
||||
// the copy until we've looked at all src files
|
||||
for _, orig := range args[0 : len(args)-1] {
|
||||
err := calcCopyInfo(b, cmdName, ©Infos, orig, dest, allowRemote, allowDecompression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(copyInfos) == 0 {
|
||||
return fmt.Errorf("No source files were specified")
|
||||
}
|
||||
|
||||
if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
|
||||
return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
||||
}
|
||||
|
||||
// For backwards compat, if there's just one CI then use it as the
|
||||
// cache look-up string, otherwise hash 'em all into one
|
||||
var srcHash string
|
||||
var origPaths string
|
||||
|
||||
if len(copyInfos) == 1 {
|
||||
srcHash = copyInfos[0].hash
|
||||
origPaths = copyInfos[0].origPath
|
||||
} else {
|
||||
var hashs []string
|
||||
var origs []string
|
||||
for _, ci := range copyInfos {
|
||||
hashs = append(hashs, ci.hash)
|
||||
origs = append(origs, ci.origPath)
|
||||
}
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(strings.Join(hashs, ",")))
|
||||
srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
|
||||
origPaths = strings.Join(origs, " ")
|
||||
}
|
||||
|
||||
cmd := b.Config.Cmd
|
||||
b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
|
||||
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
|
||||
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hit {
|
||||
return nil
|
||||
}
|
||||
|
||||
container, _, err := b.Daemon.Create(b.Config, nil, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.TmpContainers[container.ID] = struct{}{}
|
||||
|
||||
if err := container.Mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer container.Unmount()
|
||||
|
||||
for _, ci := range copyInfos {
|
||||
if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
|
||||
|
||||
if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
|
||||
origPath = origPath[1:]
|
||||
}
|
||||
origPath = strings.TrimPrefix(origPath, "./")
|
||||
|
||||
// Twiddle the destPath when its a relative path - meaning, make it
|
||||
// relative to the WORKINGDIR
|
||||
if !filepath.IsAbs(destPath) {
|
||||
hasSlash := strings.HasSuffix(destPath, "/")
|
||||
destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
|
||||
|
||||
// Make sure we preserve any trailing slash
|
||||
if hasSlash {
|
||||
destPath += "/"
|
||||
}
|
||||
}
|
||||
|
||||
// In the remote/URL case, download it and gen its hashcode
|
||||
if urlutil.IsURL(origPath) {
|
||||
if !allowRemote {
|
||||
return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
||||
}
|
||||
|
||||
ci := copyInfo{}
|
||||
ci.origPath = origPath
|
||||
ci.hash = origPath // default to this but can change
|
||||
ci.destPath = destPath
|
||||
ci.decompress = false
|
||||
*cInfos = append(*cInfos, &ci)
|
||||
|
||||
// Initiate the download
|
||||
resp, err := utils.Download(ci.origPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a tmp dir
|
||||
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ci.tmpDir = tmpDirName
|
||||
|
||||
// Create a tmp file within our tmp dir
|
||||
tmpFileName := path.Join(tmpDirName, "tmp")
|
||||
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Download and dump result to tmp file
|
||||
if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
|
||||
In: resp.Body,
|
||||
Out: b.OutOld,
|
||||
Formatter: b.StreamFormatter,
|
||||
Size: int(resp.ContentLength),
|
||||
NewLines: true,
|
||||
ID: "",
|
||||
Action: "Downloading",
|
||||
})); err != nil {
|
||||
tmpFile.Close()
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(b.OutStream, "\n")
|
||||
tmpFile.Close()
|
||||
|
||||
// Set the mtime to the Last-Modified header value if present
|
||||
// Otherwise just remove atime and mtime
|
||||
times := make([]syscall.Timespec, 2)
|
||||
|
||||
lastMod := resp.Header.Get("Last-Modified")
|
||||
if lastMod != "" {
|
||||
mTime, err := http.ParseTime(lastMod)
|
||||
// If we can't parse it then just let it default to 'zero'
|
||||
// otherwise use the parsed time value
|
||||
if err == nil {
|
||||
times[1] = syscall.NsecToTimespec(mTime.UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
if err := system.UtimesNano(tmpFileName, times); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
||||
|
||||
// If the destination is a directory, figure out the filename.
|
||||
if strings.HasSuffix(ci.destPath, "/") {
|
||||
u, err := url.Parse(origPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := u.Path
|
||||
if strings.HasSuffix(path, "/") {
|
||||
path = path[:len(path)-1]
|
||||
}
|
||||
parts := strings.Split(path, "/")
|
||||
filename := parts[len(parts)-1]
|
||||
if filename == "" {
|
||||
return fmt.Errorf("cannot determine filename from url: %s", u)
|
||||
}
|
||||
ci.destPath = ci.destPath + filename
|
||||
}
|
||||
|
||||
// Calc the checksum, even if we're using the cache
|
||||
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
|
||||
return err
|
||||
}
|
||||
ci.hash = tarSum.Sum(nil)
|
||||
r.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deal with wildcards
|
||||
if ContainsWildcards(origPath) {
|
||||
for _, fileInfo := range b.context.GetSums() {
|
||||
if fileInfo.Name() == "" {
|
||||
continue
|
||||
}
|
||||
match, _ := path.Match(origPath, fileInfo.Name())
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must be a dir or a file
|
||||
|
||||
if err := b.checkPathForAddition(origPath); err != nil {
|
||||
return err
|
||||
}
|
||||
fi, _ := os.Stat(path.Join(b.contextPath, origPath))
|
||||
|
||||
ci := copyInfo{}
|
||||
ci.origPath = origPath
|
||||
ci.hash = origPath
|
||||
ci.destPath = destPath
|
||||
ci.decompress = allowDecompression
|
||||
*cInfos = append(*cInfos, &ci)
|
||||
|
||||
// Deal with the single file case
|
||||
if !fi.IsDir() {
|
||||
// This will match first file in sums of the archive
|
||||
fis := b.context.GetSums().GetFile(ci.origPath)
|
||||
if fis != nil {
|
||||
ci.hash = "file:" + fis.Sum()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must be a dir
|
||||
var subfiles []string
|
||||
absOrigPath := path.Join(b.contextPath, ci.origPath)
|
||||
|
||||
// Add a trailing / to make sure we only pick up nested files under
|
||||
// the dir and not sibling files of the dir that just happen to
|
||||
// start with the same chars
|
||||
if !strings.HasSuffix(absOrigPath, "/") {
|
||||
absOrigPath += "/"
|
||||
}
|
||||
|
||||
// Need path w/o / too to find matching dir w/o trailing /
|
||||
absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
|
||||
|
||||
for _, fileInfo := range b.context.GetSums() {
|
||||
absFile := path.Join(b.contextPath, fileInfo.Name())
|
||||
// Any file in the context that starts with the given path will be
|
||||
// picked up and its hashcode used. However, we'll exclude the
|
||||
// root dir itself. We do this for a coupel of reasons:
|
||||
// 1 - ADD/COPY will not copy the dir itself, just its children
|
||||
// so there's no reason to include it in the hash calc
|
||||
// 2 - the metadata on the dir will change when any child file
|
||||
// changes. This will lead to a miss in the cache check if that
|
||||
// child file is in the .dockerignore list.
|
||||
if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
|
||||
subfiles = append(subfiles, fileInfo.Sum())
|
||||
}
|
||||
}
|
||||
sort.Strings(subfiles)
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
||||
ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ContainsWildcards(name string) bool {
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '\\' {
|
||||
i++
|
||||
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
|
||||
remote, tag := parsers.ParseRepositoryTag(name)
|
||||
if tag == "" {
|
||||
tag = "latest"
|
||||
}
|
||||
job := b.Engine.Job("pull", remote, tag)
|
||||
pullRegistryAuth := b.AuthConfig
|
||||
if len(b.AuthConfigFile.Configs) > 0 {
|
||||
// The request came with a full auth config file, we prefer to use that
|
||||
repoInfo, err := registry.ResolveRepositoryInfo(job, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index)
|
||||
pullRegistryAuth = &resolvedAuth
|
||||
}
|
||||
job.SetenvBool("json", b.StreamFormatter.Json())
|
||||
job.SetenvBool("parallel", true)
|
||||
job.SetenvJson("authConfig", pullRegistryAuth)
|
||||
job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld))
|
||||
if err := job.Run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
image, err := b.Daemon.Repositories().LookupImage(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func (b *Builder) processImageFrom(img *imagepkg.Image) error {
|
||||
b.image = img.ID
|
||||
|
||||
if img.Config != nil {
|
||||
b.Config = img.Config
|
||||
}
|
||||
|
||||
if len(b.Config.Env) == 0 {
|
||||
b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
|
||||
}
|
||||
|
||||
// Process ONBUILD triggers if they exist
|
||||
if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
|
||||
fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
|
||||
}
|
||||
|
||||
// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
|
||||
onBuildTriggers := b.Config.OnBuild
|
||||
b.Config.OnBuild = []string{}
|
||||
|
||||
// parse the ONBUILD triggers by invoking the parser
|
||||
for stepN, step := range onBuildTriggers {
|
||||
ast, err := parser.Parse(strings.NewReader(step))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, n := range ast.Children {
|
||||
switch strings.ToUpper(n.Value) {
|
||||
case "ONBUILD":
|
||||
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
||||
case "MAINTAINER", "FROM":
|
||||
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
|
||||
|
||||
if err := b.dispatch(i, n); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
|
||||
// and if so attempts to look up the current `b.image` and `b.Config` pair
|
||||
// in the current server `b.Daemon`. If an image is found, probeCache returns
|
||||
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
|
||||
// is any error, it returns `(false, err)`.
|
||||
func (b *Builder) probeCache() (bool, error) {
|
||||
if !b.UtilizeCache || b.cacheBusted {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if cache == nil {
|
||||
log.Debugf("[BUILDER] Cache miss")
|
||||
b.cacheBusted = true
|
||||
return false, nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(b.OutStream, " ---> Using cache\n")
|
||||
log.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (b *Builder) create() (*daemon.Container, error) {
|
||||
if b.image == "" && !b.noBaseImage {
|
||||
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
b.Config.Image = b.image
|
||||
|
||||
hostConfig := &runconfig.HostConfig{
|
||||
CpuShares: b.cpuShares,
|
||||
CpusetCpus: b.cpuSetCpus,
|
||||
Memory: b.memory,
|
||||
MemorySwap: b.memorySwap,
|
||||
}
|
||||
|
||||
config := *b.Config
|
||||
|
||||
// Create the container
|
||||
c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, warning := range warnings {
|
||||
fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
|
||||
}
|
||||
|
||||
b.TmpContainers[c.ID] = struct{}{}
|
||||
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", common.TruncateID(c.ID))
|
||||
|
||||
if len(config.Cmd) > 0 {
|
||||
// override the entry point that may have been picked up from the base image
|
||||
c.Path = config.Cmd[0]
|
||||
c.Args = config.Cmd[1:]
|
||||
} else {
|
||||
config.Cmd = []string{}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (b *Builder) run(c *daemon.Container) error {
|
||||
var errCh chan error
|
||||
if b.Verbose {
|
||||
errCh = b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, b.OutStream, b.ErrStream)
|
||||
}
|
||||
|
||||
//start the container
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
finished := make(chan struct{})
|
||||
defer close(finished)
|
||||
go func() {
|
||||
select {
|
||||
case <-b.cancelled:
|
||||
log.Debugln("Build cancelled, killing container:", c.ID)
|
||||
c.Kill()
|
||||
case <-finished:
|
||||
}
|
||||
}()
|
||||
|
||||
if b.Verbose {
|
||||
// Block on reading output from container, stop on err or chan closed
|
||||
if err := <-errCh; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for it to finish
|
||||
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
|
||||
err := &utils.JSONError{
|
||||
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
|
||||
Code: ret,
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) checkPathForAddition(orig string) error {
|
||||
origPath := path.Join(b.contextPath, orig)
|
||||
origPath, err := filepath.EvalSymlinks(origPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("%s: no such file or directory", orig)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !strings.HasPrefix(origPath, b.contextPath) {
|
||||
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
||||
}
|
||||
if _, err := os.Stat(origPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("%s: no such file or directory", orig)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
|
||||
var (
|
||||
err error
|
||||
destExists = true
|
||||
origPath = path.Join(b.contextPath, orig)
|
||||
destPath = path.Join(container.RootfsPath(), dest)
|
||||
)
|
||||
|
||||
if destPath != container.RootfsPath() {
|
||||
destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve the trailing '/'
|
||||
if strings.HasSuffix(dest, "/") || dest == "." {
|
||||
destPath = destPath + "/"
|
||||
}
|
||||
|
||||
destStat, err := os.Stat(destPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
destExists = false
|
||||
}
|
||||
|
||||
fi, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("%s: no such file or directory", orig)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
return copyAsDirectory(origPath, destPath, destExists)
|
||||
}
|
||||
|
||||
// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
|
||||
if decompress {
|
||||
// First try to unpack the source as an archive
|
||||
// to support the untar feature we need to clean up the path a little bit
|
||||
// because tar is very forgiving. First we need to strip off the archive's
|
||||
// filename from the path but this is only added if it does not end in / .
|
||||
tarDest := destPath
|
||||
if strings.HasSuffix(tarDest, "/") {
|
||||
tarDest = filepath.Dir(destPath)
|
||||
}
|
||||
|
||||
// try to successfully untar the orig
|
||||
if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
|
||||
return nil
|
||||
} else if err != io.EOF {
|
||||
log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resPath := destPath
|
||||
if destExists && destStat.IsDir() {
|
||||
resPath = path.Join(destPath, path.Base(origPath))
|
||||
}
|
||||
|
||||
return fixPermissions(origPath, resPath, 0, 0, destExists)
|
||||
}
|
||||
|
||||
func copyAsDirectory(source, destination string, destExisted bool) error {
|
||||
if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
||||
return err
|
||||
}
|
||||
return fixPermissions(source, destination, 0, 0, destExisted)
|
||||
}
|
||||
|
||||
func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
||||
// If the destination didn't already exist, or the destination isn't a
|
||||
// directory, then we should Lchown the destination. Otherwise, we shouldn't
|
||||
// Lchown the destination.
|
||||
destStat, err := os.Stat(destination)
|
||||
if err != nil {
|
||||
// This should *never* be reached, because the destination must've already
|
||||
// been created while untar-ing the context.
|
||||
return err
|
||||
}
|
||||
doChownDestination := !destExisted || !destStat.IsDir()
|
||||
|
||||
// We Walk on the source rather than on the destination because we don't
|
||||
// want to change permissions on things we haven't created or modified.
|
||||
return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
|
||||
// Do not alter the walk root iff. it existed before, as it doesn't fall under
|
||||
// the domain of "things we should chown".
|
||||
if !doChownDestination && (source == fullpath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Path is prefixed by source: substitute with destination instead.
|
||||
cleaned, err := filepath.Rel(source, fullpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullpath = path.Join(destination, cleaned)
|
||||
return os.Lchown(fullpath, uid, gid)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Builder) clearTmp() {
|
||||
for c := range b.TmpContainers {
|
||||
tmp, err := b.Daemon.Get(c)
|
||||
if err != nil {
|
||||
fmt.Fprint(b.OutStream, err.Error())
|
||||
}
|
||||
|
||||
if err := b.Daemon.Rm(tmp); err != nil {
|
||||
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", common.TruncateID(c), err)
|
||||
return
|
||||
}
|
||||
b.Daemon.DeleteVolumes(tmp.VolumePaths())
|
||||
delete(b.TmpContainers, c)
|
||||
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", common.TruncateID(c))
|
||||
}
|
||||
}
|
||||
215
builder/job.go
215
builder/job.go
@@ -1,215 +0,0 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/builder/parser"
|
||||
"github.com/docker/docker/daemon"
|
||||
"github.com/docker/docker/engine"
|
||||
"github.com/docker/docker/graph"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
// whitelist of commands allowed for a commit/import
|
||||
var validCommitCommands = map[string]bool{
|
||||
"entrypoint": true,
|
||||
"cmd": true,
|
||||
"user": true,
|
||||
"workdir": true,
|
||||
"env": true,
|
||||
"volume": true,
|
||||
"expose": true,
|
||||
"onbuild": true,
|
||||
}
|
||||
|
||||
type BuilderJob struct {
|
||||
Engine *engine.Engine
|
||||
Daemon *daemon.Daemon
|
||||
}
|
||||
|
||||
func (b *BuilderJob) Install() {
|
||||
b.Engine.Register("build", b.CmdBuild)
|
||||
b.Engine.Register("build_config", b.CmdBuildConfig)
|
||||
}
|
||||
|
||||
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
|
||||
if len(job.Args) != 0 {
|
||||
return job.Errorf("Usage: %s\n", job.Name)
|
||||
}
|
||||
var (
|
||||
dockerfileName = job.Getenv("dockerfile")
|
||||
remoteURL = job.Getenv("remote")
|
||||
repoName = job.Getenv("t")
|
||||
suppressOutput = job.GetenvBool("q")
|
||||
noCache = job.GetenvBool("nocache")
|
||||
rm = job.GetenvBool("rm")
|
||||
forceRm = job.GetenvBool("forcerm")
|
||||
pull = job.GetenvBool("pull")
|
||||
memory = job.GetenvInt64("memory")
|
||||
memorySwap = job.GetenvInt64("memswap")
|
||||
cpuShares = job.GetenvInt64("cpushares")
|
||||
cpuSetCpus = job.Getenv("cpusetcpus")
|
||||
authConfig = ®istry.AuthConfig{}
|
||||
configFile = ®istry.ConfigFile{}
|
||||
tag string
|
||||
context io.ReadCloser
|
||||
)
|
||||
|
||||
job.GetenvJson("authConfig", authConfig)
|
||||
job.GetenvJson("configFile", configFile)
|
||||
|
||||
repoName, tag = parsers.ParseRepositoryTag(repoName)
|
||||
if repoName != "" {
|
||||
if err := registry.ValidateRepositoryName(repoName); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
if len(tag) > 0 {
|
||||
if err := graph.ValidateTagName(tag); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if remoteURL == "" {
|
||||
context = ioutil.NopCloser(job.Stdin)
|
||||
} else if urlutil.IsGitURL(remoteURL) {
|
||||
if !urlutil.IsGitTransport(remoteURL) {
|
||||
remoteURL = "https://" + remoteURL
|
||||
}
|
||||
root, err := ioutil.TempDir("", "docker-build-git")
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
|
||||
return job.Errorf("Error trying to use git: %s (%s)", err, output)
|
||||
}
|
||||
|
||||
c, err := archive.Tar(root, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
context = c
|
||||
} else if urlutil.IsURL(remoteURL) {
|
||||
f, err := utils.Download(remoteURL)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
defer f.Body.Close()
|
||||
dockerFile, err := ioutil.ReadAll(f.Body)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
// When we're downloading just a Dockerfile put it in
|
||||
// the default name - don't allow the client to move/specify it
|
||||
dockerfileName = api.DefaultDockerfileName
|
||||
|
||||
c, err := archive.Generate(dockerfileName, string(dockerFile))
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
context = c
|
||||
}
|
||||
defer context.Close()
|
||||
|
||||
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
||||
|
||||
builder := &Builder{
|
||||
Daemon: b.Daemon,
|
||||
Engine: b.Engine,
|
||||
OutStream: &utils.StdoutFormater{
|
||||
Writer: job.Stdout,
|
||||
StreamFormatter: sf,
|
||||
},
|
||||
ErrStream: &utils.StderrFormater{
|
||||
Writer: job.Stdout,
|
||||
StreamFormatter: sf,
|
||||
},
|
||||
Verbose: !suppressOutput,
|
||||
UtilizeCache: !noCache,
|
||||
Remove: rm,
|
||||
ForceRemove: forceRm,
|
||||
Pull: pull,
|
||||
OutOld: job.Stdout,
|
||||
StreamFormatter: sf,
|
||||
AuthConfig: authConfig,
|
||||
AuthConfigFile: configFile,
|
||||
dockerfileName: dockerfileName,
|
||||
cpuShares: cpuShares,
|
||||
cpuSetCpus: cpuSetCpus,
|
||||
memory: memory,
|
||||
memorySwap: memorySwap,
|
||||
cancelled: job.WaitCancelled(),
|
||||
}
|
||||
|
||||
id, err := builder.Run(context)
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
if repoName != "" {
|
||||
b.Daemon.Repositories().Set(repoName, tag, id, true)
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
|
||||
if len(job.Args) != 0 {
|
||||
return job.Errorf("Usage: %s\n", job.Name)
|
||||
}
|
||||
|
||||
var (
|
||||
changes = job.GetenvList("changes")
|
||||
newConfig runconfig.Config
|
||||
)
|
||||
|
||||
if err := job.GetenvJson("config", &newConfig); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
// ensure that the commands are valid
|
||||
for _, n := range ast.Children {
|
||||
if !validCommitCommands[n.Value] {
|
||||
return job.Errorf("%s is not a valid change command", n.Value)
|
||||
}
|
||||
}
|
||||
|
||||
builder := &Builder{
|
||||
Daemon: b.Daemon,
|
||||
Engine: b.Engine,
|
||||
Config: &newConfig,
|
||||
OutStream: ioutil.Discard,
|
||||
ErrStream: ioutil.Discard,
|
||||
disableCommit: true,
|
||||
}
|
||||
|
||||
for i, n := range ast.Children {
|
||||
if err := builder.dispatch(i, n); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(job.Stdout).Encode(builder.Config); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
return engine.StatusOK
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/builder/parser"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var f *os.File
|
||||
var err error
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
fmt.Println("please supply filename(s)")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, fn := range os.Args[1:] {
|
||||
f, err = os.Open(fn)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ast, err := parser.Parse(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Println(ast.Dump())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var invalidJSONArraysOfStrings = []string{
|
||||
`["a",42,"b"]`,
|
||||
`["a",123.456,"b"]`,
|
||||
`["a",{},"b"]`,
|
||||
`["a",{"c": "d"},"b"]`,
|
||||
`["a",["c"],"b"]`,
|
||||
`["a",true,"b"]`,
|
||||
`["a",false,"b"]`,
|
||||
`["a",null,"b"]`,
|
||||
}
|
||||
|
||||
var validJSONArraysOfStrings = map[string][]string{
|
||||
`[]`: {},
|
||||
`[""]`: {""},
|
||||
`["a"]`: {"a"},
|
||||
`["a","b"]`: {"a", "b"},
|
||||
`[ "a", "b" ]`: {"a", "b"},
|
||||
`[ "a", "b" ]`: {"a", "b"},
|
||||
` [ "a", "b" ] `: {"a", "b"},
|
||||
`["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"},
|
||||
}
|
||||
|
||||
func TestJSONArraysOfStrings(t *testing.T) {
|
||||
for json, expected := range validJSONArraysOfStrings {
|
||||
if node, _, err := parseJSON(json); err != nil {
|
||||
t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err)
|
||||
} else {
|
||||
i := 0
|
||||
for node != nil {
|
||||
if i >= len(expected) {
|
||||
t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json)
|
||||
}
|
||||
if node.Value != expected[i] {
|
||||
t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i)
|
||||
}
|
||||
node = node.Next
|
||||
i++
|
||||
}
|
||||
if i != len(expected) {
|
||||
t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, json := range invalidJSONArraysOfStrings {
|
||||
if _, _, err := parseJSON(json); err != errDockerfileNotStringArray {
|
||||
t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,294 +0,0 @@
|
||||
package parser
|
||||
|
||||
// line parsers are dispatch calls that parse a single unit of text into a
|
||||
// Node object which contains the whole statement. Dockerfiles have varied
|
||||
// (but not usually unique, see ONBUILD for a unique example) parsing rules
|
||||
// per-command, and these unify the processing in a way that makes it
|
||||
// manageable.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.")
|
||||
)
|
||||
|
||||
// ignore the current argument. This will still leave a command parsed, but
|
||||
// will not incorporate the arguments into the ast.
|
||||
func parseIgnore(rest string) (*Node, map[string]bool, error) {
|
||||
return &Node{}, nil, nil
|
||||
}
|
||||
|
||||
// used for onbuild. Could potentially be used for anything that represents a
|
||||
// statement with sub-statements.
|
||||
//
|
||||
// ONBUILD RUN foo bar -> (onbuild (run foo bar))
|
||||
//
|
||||
func parseSubCommand(rest string) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
_, child, err := parseLine(rest)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &Node{Children: []*Node{child}}, nil, nil
|
||||
}
|
||||
|
||||
// parse environment like statements. Note that this does *not* handle
|
||||
// variable interpolation, which will be handled in the evaluator.
|
||||
func parseNameVal(rest string, key string) (*Node, map[string]bool, error) {
|
||||
// This is kind of tricky because we need to support the old
|
||||
// variant: KEY name value
|
||||
// as well as the new one: KEY name=value ...
|
||||
// The trigger to know which one is being used will be whether we hit
|
||||
// a space or = first. space ==> old, "=" ==> new
|
||||
|
||||
const (
|
||||
inSpaces = iota // looking for start of a word
|
||||
inWord
|
||||
inQuote
|
||||
)
|
||||
|
||||
words := []string{}
|
||||
phase := inSpaces
|
||||
word := ""
|
||||
quote := '\000'
|
||||
blankOK := false
|
||||
var ch rune
|
||||
|
||||
for pos := 0; pos <= len(rest); pos++ {
|
||||
if pos != len(rest) {
|
||||
ch = rune(rest[pos])
|
||||
}
|
||||
|
||||
if phase == inSpaces { // Looking for start of word
|
||||
if pos == len(rest) { // end of input
|
||||
break
|
||||
}
|
||||
if unicode.IsSpace(ch) { // skip spaces
|
||||
continue
|
||||
}
|
||||
phase = inWord // found it, fall thru
|
||||
}
|
||||
if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
|
||||
if blankOK || len(word) > 0 {
|
||||
words = append(words, word)
|
||||
}
|
||||
break
|
||||
}
|
||||
if phase == inWord {
|
||||
if unicode.IsSpace(ch) {
|
||||
phase = inSpaces
|
||||
if blankOK || len(word) > 0 {
|
||||
words = append(words, word)
|
||||
|
||||
// Look for = and if not there assume
|
||||
// we're doing the old stuff and
|
||||
// just read the rest of the line
|
||||
if !strings.Contains(word, "=") {
|
||||
word = strings.TrimSpace(rest[pos:])
|
||||
words = append(words, word)
|
||||
break
|
||||
}
|
||||
}
|
||||
word = ""
|
||||
blankOK = false
|
||||
continue
|
||||
}
|
||||
if ch == '\'' || ch == '"' {
|
||||
quote = ch
|
||||
blankOK = true
|
||||
phase = inQuote
|
||||
}
|
||||
if ch == '\\' {
|
||||
if pos+1 == len(rest) {
|
||||
continue // just skip \ at end
|
||||
}
|
||||
// If we're not quoted and we see a \, then always just
|
||||
// add \ plus the char to the word, even if the char
|
||||
// is a quote.
|
||||
word += string(ch)
|
||||
pos++
|
||||
ch = rune(rest[pos])
|
||||
}
|
||||
word += string(ch)
|
||||
continue
|
||||
}
|
||||
if phase == inQuote {
|
||||
if ch == quote {
|
||||
phase = inWord
|
||||
}
|
||||
// \ is special except for ' quotes - can't escape anything for '
|
||||
if ch == '\\' && quote != '\'' {
|
||||
if pos+1 == len(rest) {
|
||||
phase = inWord
|
||||
continue // just skip \ at end
|
||||
}
|
||||
pos++
|
||||
nextCh := rune(rest[pos])
|
||||
word += string(ch)
|
||||
ch = nextCh
|
||||
}
|
||||
word += string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
if len(words) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Old format (KEY name value)
|
||||
var rootnode *Node
|
||||
|
||||
if !strings.Contains(words[0], "=") {
|
||||
node := &Node{}
|
||||
rootnode = node
|
||||
strs := TOKEN_WHITESPACE.Split(rest, 2)
|
||||
|
||||
if len(strs) < 2 {
|
||||
return nil, nil, fmt.Errorf(key + " must have two arguments")
|
||||
}
|
||||
|
||||
node.Value = strs[0]
|
||||
node.Next = &Node{}
|
||||
node.Next.Value = strs[1]
|
||||
} else {
|
||||
var prevNode *Node
|
||||
for i, word := range words {
|
||||
if !strings.Contains(word, "=") {
|
||||
return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
|
||||
}
|
||||
parts := strings.SplitN(word, "=", 2)
|
||||
|
||||
name := &Node{}
|
||||
value := &Node{}
|
||||
|
||||
name.Next = value
|
||||
name.Value = parts[0]
|
||||
value.Value = parts[1]
|
||||
|
||||
if i == 0 {
|
||||
rootnode = name
|
||||
} else {
|
||||
prevNode.Next = name
|
||||
}
|
||||
prevNode = value
|
||||
}
|
||||
}
|
||||
|
||||
return rootnode, nil, nil
|
||||
}
|
||||
|
||||
func parseEnv(rest string) (*Node, map[string]bool, error) {
|
||||
return parseNameVal(rest, "ENV")
|
||||
}
|
||||
|
||||
func parseLabel(rest string) (*Node, map[string]bool, error) {
|
||||
return parseNameVal(rest, "LABEL")
|
||||
}
|
||||
|
||||
// parses a whitespace-delimited set of arguments. The result is effectively a
|
||||
// linked list of string arguments.
|
||||
func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
node := &Node{}
|
||||
rootnode := node
|
||||
prevnode := node
|
||||
for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp
|
||||
prevnode = node
|
||||
node.Value = str
|
||||
node.Next = &Node{}
|
||||
node = node.Next
|
||||
}
|
||||
|
||||
// XXX to get around regexp.Split *always* providing an empty string at the
|
||||
// end due to how our loop is constructed, nil out the last node in the
|
||||
// chain.
|
||||
prevnode.Next = nil
|
||||
|
||||
return rootnode, nil, nil
|
||||
}
|
||||
|
||||
// parsestring just wraps the string in quotes and returns a working node.
|
||||
func parseString(rest string) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
n := &Node{}
|
||||
n.Value = rest
|
||||
return n, nil, nil
|
||||
}
|
||||
|
||||
// parseJSON converts JSON arrays to an AST.
|
||||
func parseJSON(rest string) (*Node, map[string]bool, error) {
|
||||
var myJson []interface{}
|
||||
if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var top, prev *Node
|
||||
for _, str := range myJson {
|
||||
if s, ok := str.(string); !ok {
|
||||
return nil, nil, errDockerfileNotStringArray
|
||||
} else {
|
||||
node := &Node{Value: s}
|
||||
if prev == nil {
|
||||
top = node
|
||||
} else {
|
||||
prev.Next = node
|
||||
}
|
||||
prev = node
|
||||
}
|
||||
}
|
||||
|
||||
return top, map[string]bool{"json": true}, nil
|
||||
}
|
||||
|
||||
// parseMaybeJSON determines if the argument appears to be a JSON array. If
|
||||
// so, passes to parseJSON; if not, quotes the result and returns a single
|
||||
// node.
|
||||
func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
node, attrs, err := parseJSON(rest)
|
||||
|
||||
if err == nil {
|
||||
return node, attrs, nil
|
||||
}
|
||||
if err == errDockerfileNotStringArray {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
node = &Node{}
|
||||
node.Value = rest
|
||||
return node, nil, nil
|
||||
}
|
||||
|
||||
// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
|
||||
// so, passes to parseJSON; if not, attmpts to parse it as a whitespace
|
||||
// delimited string.
|
||||
func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) {
|
||||
node, attrs, err := parseJSON(rest)
|
||||
|
||||
if err == nil {
|
||||
return node, attrs, nil
|
||||
}
|
||||
if err == errDockerfileNotStringArray {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return parseStringsWhitespaceDelimited(rest)
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
// This package implements a parser and parse tree dumper for Dockerfiles.
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/docker/docker/builder/command"
|
||||
)
|
||||
|
||||
// Node is a structure used to represent a parse tree.
|
||||
//
|
||||
// In the node there are three fields, Value, Next, and Children. Value is the
|
||||
// current token's string value. Next is always the next non-child token, and
|
||||
// children contains all the children. Here's an example:
|
||||
//
|
||||
// (value next (child child-next child-next-next) next-next)
|
||||
//
|
||||
// This data structure is frankly pretty lousy for handling complex languages,
|
||||
// but lucky for us the Dockerfile isn't very complicated. This structure
|
||||
// works a little more effectively than a "proper" parse tree for our needs.
|
||||
//
|
||||
type Node struct {
|
||||
Value string // actual content
|
||||
Next *Node // the next item in the current sexp
|
||||
Children []*Node // the children of this sexp
|
||||
Attributes map[string]bool // special attributes for this node
|
||||
Original string // original line used before parsing
|
||||
}
|
||||
|
||||
var (
|
||||
dispatch map[string]func(string) (*Node, map[string]bool, error)
|
||||
TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||
TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\[ \t]*$`)
|
||||
TOKEN_COMMENT = regexp.MustCompile(`^#.*$`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Dispatch Table. see line_parsers.go for the parse functions.
|
||||
// The command is parsed and mapped to the line parser. The line parser
|
||||
// recieves the arguments but not the command, and returns an AST after
|
||||
// reformulating the arguments according to the rules in the parser
|
||||
// functions. Errors are propagated up by Parse() and the resulting AST can
|
||||
// be incorporated directly into the existing AST as a next.
|
||||
dispatch = map[string]func(string) (*Node, map[string]bool, error){
|
||||
command.User: parseString,
|
||||
command.Onbuild: parseSubCommand,
|
||||
command.Workdir: parseString,
|
||||
command.Env: parseEnv,
|
||||
command.Label: parseLabel,
|
||||
command.Maintainer: parseString,
|
||||
command.From: parseString,
|
||||
command.Add: parseMaybeJSONToList,
|
||||
command.Copy: parseMaybeJSONToList,
|
||||
command.Run: parseMaybeJSON,
|
||||
command.Cmd: parseMaybeJSON,
|
||||
command.Entrypoint: parseMaybeJSON,
|
||||
command.Expose: parseStringsWhitespaceDelimited,
|
||||
command.Volume: parseMaybeJSONToList,
|
||||
command.Insert: parseIgnore,
|
||||
}
|
||||
}
|
||||
|
||||
// parse a line and return the remainder.
|
||||
func parseLine(line string) (string, *Node, error) {
|
||||
if line = stripComments(line); line == "" {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
if TOKEN_LINE_CONTINUATION.MatchString(line) {
|
||||
line = TOKEN_LINE_CONTINUATION.ReplaceAllString(line, "")
|
||||
return line, nil, nil
|
||||
}
|
||||
|
||||
cmd, args, err := splitCommand(line)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
node := &Node{}
|
||||
node.Value = cmd
|
||||
|
||||
sexp, attrs, err := fullDispatch(cmd, args)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
node.Next = sexp
|
||||
node.Attributes = attrs
|
||||
node.Original = line
|
||||
|
||||
return "", node, nil
|
||||
}
|
||||
|
||||
// The main parse routine. Handles an io.ReadWriteCloser and returns the root
|
||||
// of the AST.
|
||||
func Parse(rwc io.Reader) (*Node, error) {
|
||||
root := &Node{}
|
||||
scanner := bufio.NewScanner(rwc)
|
||||
|
||||
for scanner.Scan() {
|
||||
scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace)
|
||||
line, child, err := parseLine(scannedLine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if line != "" && child == nil {
|
||||
for scanner.Scan() {
|
||||
newline := scanner.Text()
|
||||
|
||||
if stripComments(strings.TrimSpace(newline)) == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
line, child, err = parseLine(line + newline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if child != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if child == nil && line != "" {
|
||||
line, child, err = parseLine(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if child != nil {
|
||||
root.Children = append(root.Children, child)
|
||||
}
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const testDir = "testfiles"
|
||||
const negativeTestDir = "testfiles-negative"
|
||||
|
||||
func getDirs(t *testing.T, dir string) []string {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
dirs, err := f.Readdirnames(0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return dirs
|
||||
}
|
||||
|
||||
func TestTestNegative(t *testing.T) {
|
||||
for _, dir := range getDirs(t, negativeTestDir) {
|
||||
dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile")
|
||||
|
||||
df, err := os.Open(dockerfile)
|
||||
if err != nil {
|
||||
t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
||||
}
|
||||
|
||||
_, err = Parse(df)
|
||||
if err == nil {
|
||||
t.Fatalf("No error parsing broken dockerfile for %s", dir)
|
||||
}
|
||||
|
||||
df.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestData(t *testing.T) {
|
||||
for _, dir := range getDirs(t, testDir) {
|
||||
dockerfile := filepath.Join(testDir, dir, "Dockerfile")
|
||||
resultfile := filepath.Join(testDir, dir, "result")
|
||||
|
||||
df, err := os.Open(dockerfile)
|
||||
if err != nil {
|
||||
t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
||||
}
|
||||
defer df.Close()
|
||||
|
||||
ast, err := Parse(df)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing %s's dockerfile: %v", dir, err)
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(resultfile)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading %s's result file: %v", dir, err)
|
||||
}
|
||||
|
||||
if ast.Dump()+"\n" != string(content) {
|
||||
fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump())
|
||||
fmt.Fprintln(os.Stderr, "Expected:\n"+string(content))
|
||||
t.Fatalf("%s: AST dump of dockerfile does not match result", dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
FROM busybox
|
||||
|
||||
ENV PATH
|
||||
@@ -1 +0,0 @@
|
||||
CMD [ "echo", [ "nested json" ] ]
|
||||
@@ -1,9 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Seongyeol Lim <seongyeol37@gmail.com>
|
||||
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
ADD . /
|
||||
ADD [ "vimrc", "/tmp" ]
|
||||
COPY [ "bashrc", "/tmp" ]
|
||||
COPY [ "test file", "/tmp" ]
|
||||
ADD [ "test file", "/tmp/test file" ]
|
||||
@@ -1,8 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(maintainer "Seongyeol Lim <seongyeol37@gmail.com>")
|
||||
(copy "." "/go/src/github.com/docker/docker")
|
||||
(add "." "/")
|
||||
(add "vimrc" "/tmp")
|
||||
(copy "bashrc" "/tmp")
|
||||
(copy "test file" "/tmp")
|
||||
(add "test file" "/tmp/test file")
|
||||
@@ -1,25 +0,0 @@
|
||||
FROM brimstone/ubuntu:14.04
|
||||
|
||||
MAINTAINER brimstone@the.narro.ws
|
||||
|
||||
# TORUN -v /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
ENV GOPATH /go
|
||||
|
||||
# Set our command
|
||||
ENTRYPOINT ["/usr/local/bin/consuldock"]
|
||||
|
||||
# Install the packages we need, clean up after them and us
|
||||
RUN apt-get update \
|
||||
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
||||
&& apt-get install -y --no-install-recommends git golang ca-certificates \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists \
|
||||
|
||||
&& go get -v github.com/brimstone/consuldock \
|
||||
&& mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
|
||||
|
||||
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
||||
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
||||
&& rm /tmp/dpkg.* \
|
||||
&& rm -rf $GOPATH
|
||||
@@ -1,5 +0,0 @@
|
||||
(from "brimstone/ubuntu:14.04")
|
||||
(maintainer "brimstone@the.narro.ws")
|
||||
(env "GOPATH" "/go")
|
||||
(entrypoint "/usr/local/bin/consuldock")
|
||||
(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
|
||||
@@ -1,52 +0,0 @@
|
||||
FROM brimstone/ubuntu:14.04
|
||||
|
||||
CMD []
|
||||
|
||||
ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"]
|
||||
|
||||
EXPOSE 8500 8600 8400 8301 8302
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y unzip wget \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists
|
||||
|
||||
RUN cd /tmp \
|
||||
&& wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
|
||||
-O web_ui.zip \
|
||||
&& unzip web_ui.zip \
|
||||
&& mv dist /webui \
|
||||
&& rm web_ui.zip
|
||||
|
||||
RUN apt-get update \
|
||||
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
||||
&& apt-get install -y --no-install-recommends unzip wget \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists \
|
||||
|
||||
&& cd /tmp \
|
||||
&& wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
|
||||
-O web_ui.zip \
|
||||
&& unzip web_ui.zip \
|
||||
&& mv dist /webui \
|
||||
&& rm web_ui.zip \
|
||||
|
||||
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
||||
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
||||
&& rm /tmp/dpkg.*
|
||||
|
||||
ENV GOPATH /go
|
||||
|
||||
RUN apt-get update \
|
||||
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
||||
&& apt-get install -y --no-install-recommends git golang ca-certificates build-essential \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists \
|
||||
|
||||
&& go get -v github.com/hashicorp/consul \
|
||||
&& mv $GOPATH/bin/consul /usr/bin/consul \
|
||||
|
||||
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
||||
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
||||
&& rm /tmp/dpkg.* \
|
||||
&& rm -rf $GOPATH
|
||||
@@ -1,9 +0,0 @@
|
||||
(from "brimstone/ubuntu:14.04")
|
||||
(cmd)
|
||||
(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
|
||||
(expose "8500" "8600" "8400" "8301" "8302")
|
||||
(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists")
|
||||
(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip")
|
||||
(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*")
|
||||
(env "GOPATH" "/go")
|
||||
(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
|
||||
@@ -1,36 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
|
||||
RUN echo hello\
|
||||
world\
|
||||
goodnight \
|
||||
moon\
|
||||
light\
|
||||
ning
|
||||
RUN echo hello \
|
||||
world
|
||||
RUN echo hello \
|
||||
world
|
||||
RUN echo hello \
|
||||
goodbye\
|
||||
frog
|
||||
RUN echo hello \
|
||||
world
|
||||
RUN echo hi \
|
||||
\
|
||||
world \
|
||||
\
|
||||
good\
|
||||
\
|
||||
night
|
||||
RUN echo goodbye\
|
||||
frog
|
||||
RUN echo good\
|
||||
bye\
|
||||
frog
|
||||
|
||||
RUN echo hello \
|
||||
# this is a comment
|
||||
|
||||
# this is a comment with a blank line surrounding it
|
||||
|
||||
this is some more useful stuff
|
||||
@@ -1,10 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(run "echo hello world goodnight moon lightning")
|
||||
(run "echo hello world")
|
||||
(run "echo hello world")
|
||||
(run "echo hello goodbyefrog")
|
||||
(run "echo hello world")
|
||||
(run "echo hi world goodnight")
|
||||
(run "echo goodbyefrog")
|
||||
(run "echo goodbyefrog")
|
||||
(run "echo hello this is some more useful stuff")
|
||||
@@ -1,54 +0,0 @@
|
||||
FROM cpuguy83/ubuntu
|
||||
ENV NAGIOS_HOME /opt/nagios
|
||||
ENV NAGIOS_USER nagios
|
||||
ENV NAGIOS_GROUP nagios
|
||||
ENV NAGIOS_CMDUSER nagios
|
||||
ENV NAGIOS_CMDGROUP nagios
|
||||
ENV NAGIOSADMIN_USER nagiosadmin
|
||||
ENV NAGIOSADMIN_PASS nagios
|
||||
ENV APACHE_RUN_USER nagios
|
||||
ENV APACHE_RUN_GROUP nagios
|
||||
ENV NAGIOS_TIMEZONE UTC
|
||||
|
||||
RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list
|
||||
RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx
|
||||
RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP )
|
||||
RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )
|
||||
|
||||
ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz
|
||||
RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf
|
||||
ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/
|
||||
RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install
|
||||
|
||||
RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars
|
||||
RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default
|
||||
|
||||
RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo
|
||||
|
||||
RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf
|
||||
|
||||
RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs
|
||||
RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg
|
||||
RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg
|
||||
RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf
|
||||
|
||||
RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \
|
||||
sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg
|
||||
RUN cp /etc/services /var/spool/postfix/etc/
|
||||
|
||||
RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix
|
||||
ADD nagios.init /etc/sv/nagios/run
|
||||
ADD apache.init /etc/sv/apache/run
|
||||
ADD postfix.init /etc/sv/postfix/run
|
||||
ADD postfix.stop /etc/sv/postfix/finish
|
||||
|
||||
ADD start.sh /usr/local/bin/start_nagios
|
||||
|
||||
ENV APACHE_LOCK_DIR /var/run
|
||||
ENV APACHE_LOG_DIR /var/log/apache2
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"]
|
||||
|
||||
CMD ["/usr/local/bin/start_nagios"]
|
||||
@@ -1,40 +0,0 @@
|
||||
(from "cpuguy83/ubuntu")
|
||||
(env "NAGIOS_HOME" "/opt/nagios")
|
||||
(env "NAGIOS_USER" "nagios")
|
||||
(env "NAGIOS_GROUP" "nagios")
|
||||
(env "NAGIOS_CMDUSER" "nagios")
|
||||
(env "NAGIOS_CMDGROUP" "nagios")
|
||||
(env "NAGIOSADMIN_USER" "nagiosadmin")
|
||||
(env "NAGIOSADMIN_PASS" "nagios")
|
||||
(env "APACHE_RUN_USER" "nagios")
|
||||
(env "APACHE_RUN_GROUP" "nagios")
|
||||
(env "NAGIOS_TIMEZONE" "UTC")
|
||||
(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list")
|
||||
(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx")
|
||||
(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )")
|
||||
(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )")
|
||||
(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz")
|
||||
(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf")
|
||||
(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/")
|
||||
(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install")
|
||||
(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars")
|
||||
(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default")
|
||||
(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo")
|
||||
(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf")
|
||||
(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs")
|
||||
(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
|
||||
(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
|
||||
(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf")
|
||||
(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg")
|
||||
(run "cp /etc/services /var/spool/postfix/etc/")
|
||||
(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix")
|
||||
(add "nagios.init" "/etc/sv/nagios/run")
|
||||
(add "apache.init" "/etc/sv/apache/run")
|
||||
(add "postfix.init" "/etc/sv/postfix/run")
|
||||
(add "postfix.stop" "/etc/sv/postfix/finish")
|
||||
(add "start.sh" "/usr/local/bin/start_nagios")
|
||||
(env "APACHE_LOCK_DIR" "/var/run")
|
||||
(env "APACHE_LOG_DIR" "/var/log/apache2")
|
||||
(expose "80")
|
||||
(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
|
||||
(cmd "/usr/local/bin/start_nagios")
|
||||
@@ -1,104 +0,0 @@
|
||||
# This file describes the standard way to build Docker, using docker
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# # Assemble the full dev environment. This is slow the first time.
|
||||
# docker build -t docker .
|
||||
#
|
||||
# # Mount your source in an interactive container for quick testing:
|
||||
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
|
||||
#
|
||||
# # Run the test suite:
|
||||
# docker run --privileged docker hack/make.sh test
|
||||
#
|
||||
# # Publish a release:
|
||||
# docker run --privileged \
|
||||
# -e AWS_S3_BUCKET=baz \
|
||||
# -e AWS_ACCESS_KEY=foo \
|
||||
# -e AWS_SECRET_KEY=bar \
|
||||
# -e GPG_PASSPHRASE=gloubiboulga \
|
||||
# docker hack/release.sh
|
||||
#
|
||||
# Note: Apparmor used to mess with privileged mode, but this is no longer
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
|
||||
apt-utils \
|
||||
aufs-tools \
|
||||
automake \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
curl \
|
||||
dpkg-sig \
|
||||
git \
|
||||
iptables \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libsqlite3-dev \
|
||||
lxc=1.0* \
|
||||
mercurial \
|
||||
pandoc \
|
||||
parallel \
|
||||
reprepro \
|
||||
ruby1.9.1 \
|
||||
ruby1.9.1-dev \
|
||||
s3cmd=1.1.0* \
|
||||
--no-install-recommends
|
||||
|
||||
# Get lvm2 source for compiling statically
|
||||
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
|
||||
# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
|
||||
|
||||
# Compile and install lvm2
|
||||
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
# Install Go
|
||||
RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
|
||||
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
|
||||
|
||||
# Compile Go for cross compilation
|
||||
ENV DOCKER_CROSSPLATFORMS \
|
||||
linux/386 linux/arm \
|
||||
darwin/amd64 darwin/386 \
|
||||
freebsd/amd64 freebsd/386 freebsd/arm
|
||||
# (set an explicit GOARM of 5 for maximum compatibility)
|
||||
ENV GOARM 5
|
||||
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
|
||||
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
RUN go get golang.org/x/tools/cmd/cover
|
||||
|
||||
# TODO replace FPM with some very minimal debhelper stuff
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
|
||||
|
||||
# Get the "busybox" image source so we can build locally instead of pulling
|
||||
RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
|
||||
|
||||
# Setup s3cmd config
|
||||
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
|
||||
|
||||
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||
RUN git config --global user.email 'docker-dummy@example.com'
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
ENV DOCKER_BUILDTAGS apparmor selinux
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
@@ -1,24 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(maintainer "Tianon Gravi <admwiggin@gmail.com> (@tianon)")
|
||||
(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tlxc=1.0* \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends")
|
||||
(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103")
|
||||
(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper")
|
||||
(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz")
|
||||
(env "PATH" "/usr/local/go/bin:$PATH")
|
||||
(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor")
|
||||
(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1")
|
||||
(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm")
|
||||
(env "GOARM" "5")
|
||||
(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
|
||||
(run "go get golang.org/x/tools/cmd/cover")
|
||||
(run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
|
||||
(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
|
||||
(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")
|
||||
(run "git config --global user.email 'docker-dummy@example.com'")
|
||||
(run "groupadd -r docker")
|
||||
(run "useradd --create-home --gid docker unprivilegeduser")
|
||||
(volume "/var/lib/docker")
|
||||
(workdir "/go/src/github.com/docker/docker")
|
||||
(env "DOCKER_BUILDTAGS" "apparmor selinux")
|
||||
(entrypoint "hack/dind")
|
||||
(copy "." "/go/src/github.com/docker/docker")
|
||||
23
builder/parser/testfiles/env/Dockerfile
vendored
23
builder/parser/testfiles/env/Dockerfile
vendored
@@ -1,23 +0,0 @@
|
||||
FROM ubuntu
|
||||
ENV name value
|
||||
ENV name=value
|
||||
ENV name=value name2=value2
|
||||
ENV name="value value1"
|
||||
ENV name=value\ value2
|
||||
ENV name="value'quote space'value2"
|
||||
ENV name='value"double quote"value2'
|
||||
ENV name=value\ value2 name2=value2\ value3
|
||||
ENV name="a\"b"
|
||||
ENV name="a\'b"
|
||||
ENV name='a\'b'
|
||||
ENV name='a\'b''
|
||||
ENV name='a\"b'
|
||||
ENV name="''"
|
||||
# don't put anything after the next line - it must be the last line of the
|
||||
# Dockerfile and it must end with \
|
||||
ENV name=value \
|
||||
name1=value1 \
|
||||
name2="value2a \
|
||||
value2b" \
|
||||
name3="value3a\n\"value3b\"" \
|
||||
name4="value4a\\nvalue4b" \
|
||||
16
builder/parser/testfiles/env/result
vendored
16
builder/parser/testfiles/env/result
vendored
@@ -1,16 +0,0 @@
|
||||
(from "ubuntu")
|
||||
(env "name" "value")
|
||||
(env "name" "value")
|
||||
(env "name" "value" "name2" "value2")
|
||||
(env "name" "\"value value1\"")
|
||||
(env "name" "value\\ value2")
|
||||
(env "name" "\"value'quote space'value2\"")
|
||||
(env "name" "'value\"double quote\"value2'")
|
||||
(env "name" "value\\ value2" "name2" "value2\\ value3")
|
||||
(env "name" "\"a\\\"b\"")
|
||||
(env "name" "\"a\\'b\"")
|
||||
(env "name" "'a\\'b'")
|
||||
(env "name" "'a\\'b''")
|
||||
(env "name" "'a\\\"b'")
|
||||
(env "name" "\"''\"")
|
||||
(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"")
|
||||
@@ -1,14 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Erik \\Hollensbe <erik@hollensbe.org>\"
|
||||
|
||||
RUN apt-get \update && \
|
||||
apt-get \"install znc -y
|
||||
ADD \conf\\" /.znc
|
||||
|
||||
RUN foo \
|
||||
|
||||
bar \
|
||||
|
||||
baz
|
||||
|
||||
CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ]
|
||||
@@ -1,6 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(maintainer "Erik \\\\Hollensbe <erik@hollensbe.org>\\\"")
|
||||
(run "apt-get \\update && apt-get \\\"install znc -y")
|
||||
(add "\\conf\\\\\"" "/.znc")
|
||||
(run "foo bar baz")
|
||||
(cmd "/usr\\\"/bin/znc" "-f" "-r")
|
||||
@@ -1,15 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
|
||||
RUN apt-get update && apt-get install wget -y
|
||||
RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb
|
||||
RUN dpkg -i influxdb_latest_amd64.deb
|
||||
RUN rm -r /opt/influxdb/shared
|
||||
|
||||
VOLUME /opt/influxdb/shared
|
||||
|
||||
CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml
|
||||
|
||||
EXPOSE 8083
|
||||
EXPOSE 8086
|
||||
EXPOSE 8090
|
||||
EXPOSE 8099
|
||||
@@ -1,11 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(run "apt-get update && apt-get install wget -y")
|
||||
(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb")
|
||||
(run "dpkg -i influxdb_latest_amd64.deb")
|
||||
(run "rm -r /opt/influxdb/shared")
|
||||
(volume "/opt/influxdb/shared")
|
||||
(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml")
|
||||
(expose "8083")
|
||||
(expose "8086")
|
||||
(expose "8090")
|
||||
(expose "8099")
|
||||
@@ -1 +0,0 @@
|
||||
CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]"
|
||||
@@ -1 +0,0 @@
|
||||
(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"")
|
||||
@@ -1 +0,0 @@
|
||||
CMD '["echo", "Well, JSON in a string is JSON too?"]'
|
||||
@@ -1 +0,0 @@
|
||||
(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'")
|
||||
@@ -1 +0,0 @@
|
||||
CMD ['echo','single quotes are invalid JSON']
|
||||
@@ -1 +0,0 @@
|
||||
(cmd "['echo','single quotes are invalid JSON']")
|
||||
@@ -1 +0,0 @@
|
||||
CMD ["echo", "Please, close the brackets when you're done"
|
||||
@@ -1 +0,0 @@
|
||||
(cmd "[\"echo\", \"Please, close the brackets when you're done\"")
|
||||
@@ -1 +0,0 @@
|
||||
CMD ["echo", "look ma, no quote!]
|
||||
@@ -1 +0,0 @@
|
||||
(cmd "[\"echo\", \"look ma, no quote!]")
|
||||
@@ -1,8 +0,0 @@
|
||||
CMD []
|
||||
CMD [""]
|
||||
CMD ["a"]
|
||||
CMD ["a","b"]
|
||||
CMD [ "a", "b" ]
|
||||
CMD [ "a", "b" ]
|
||||
CMD [ "a", "b" ]
|
||||
CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]
|
||||
@@ -1,8 +0,0 @@
|
||||
(cmd)
|
||||
(cmd "")
|
||||
(cmd "a")
|
||||
(cmd "a" "b")
|
||||
(cmd "a" "b")
|
||||
(cmd "a" "b")
|
||||
(cmd "a" "b")
|
||||
(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00")
|
||||
@@ -1,7 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER James Turnbull "james@example.com"
|
||||
ENV REFRESHED_AT 2014-06-01
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install redis-server redis-tools
|
||||
EXPOSE 6379
|
||||
ENTRYPOINT [ "/usr/bin/redis-server" ]
|
||||
@@ -1,7 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(maintainer "James Turnbull \"james@example.com\"")
|
||||
(env "REFRESHED_AT" "2014-06-01")
|
||||
(run "apt-get update")
|
||||
(run "apt-get -y install redis-server redis-tools")
|
||||
(expose "6379")
|
||||
(entrypoint "/usr/bin/redis-server")
|
||||
@@ -1,48 +0,0 @@
|
||||
FROM busybox:buildroot-2014.02
|
||||
|
||||
MAINTAINER docker <docker@docker.io>
|
||||
|
||||
ONBUILD RUN ["echo", "test"]
|
||||
ONBUILD RUN echo test
|
||||
ONBUILD COPY . /
|
||||
|
||||
|
||||
# RUN Commands \
|
||||
# linebreak in comment \
|
||||
RUN ["ls", "-la"]
|
||||
RUN ["echo", "'1234'"]
|
||||
RUN echo "1234"
|
||||
RUN echo 1234
|
||||
RUN echo '1234' && \
|
||||
echo "456" && \
|
||||
echo 789
|
||||
RUN sh -c 'echo root:testpass \
|
||||
> /tmp/passwd'
|
||||
RUN mkdir -p /test /test2 /test3/test
|
||||
|
||||
# ENV \
|
||||
ENV SCUBA 1 DUBA 3
|
||||
ENV SCUBA "1 DUBA 3"
|
||||
|
||||
# CMD \
|
||||
CMD ["echo", "test"]
|
||||
CMD echo test
|
||||
CMD echo "test"
|
||||
CMD echo 'test'
|
||||
CMD echo 'test' | wc -
|
||||
|
||||
#EXPOSE\
|
||||
EXPOSE 3000
|
||||
EXPOSE 9000 5000 6000
|
||||
|
||||
USER docker
|
||||
USER docker:root
|
||||
|
||||
VOLUME ["/test"]
|
||||
VOLUME ["/test", "/test2"]
|
||||
VOLUME /test3
|
||||
|
||||
WORKDIR /test
|
||||
|
||||
ADD . /
|
||||
COPY . copy
|
||||
@@ -1,29 +0,0 @@
|
||||
(from "busybox:buildroot-2014.02")
|
||||
(maintainer "docker <docker@docker.io>")
|
||||
(onbuild (run "echo" "test"))
|
||||
(onbuild (run "echo test"))
|
||||
(onbuild (copy "." "/"))
|
||||
(run "ls" "-la")
|
||||
(run "echo" "'1234'")
|
||||
(run "echo \"1234\"")
|
||||
(run "echo 1234")
|
||||
(run "echo '1234' && echo \"456\" && echo 789")
|
||||
(run "sh -c 'echo root:testpass > /tmp/passwd'")
|
||||
(run "mkdir -p /test /test2 /test3/test")
|
||||
(env "SCUBA" "1 DUBA 3")
|
||||
(env "SCUBA" "\"1 DUBA 3\"")
|
||||
(cmd "echo" "test")
|
||||
(cmd "echo test")
|
||||
(cmd "echo \"test\"")
|
||||
(cmd "echo 'test'")
|
||||
(cmd "echo 'test' | wc -")
|
||||
(expose "3000")
|
||||
(expose "9000" "5000" "6000")
|
||||
(user "docker")
|
||||
(user "docker:root")
|
||||
(volume "/test")
|
||||
(volume "/test" "/test2")
|
||||
(volume "/test3")
|
||||
(workdir "/test")
|
||||
(add "." "/")
|
||||
(copy "." "copy")
|
||||
@@ -1,16 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
|
||||
RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y
|
||||
ADD .muttrc /
|
||||
ADD .offlineimaprc /
|
||||
ADD .tmux.conf /
|
||||
ADD mutt /.mutt
|
||||
ADD vim /.vim
|
||||
ADD vimrc /.vimrc
|
||||
ADD crontab /etc/crontab
|
||||
RUN chmod 644 /etc/crontab
|
||||
RUN mkdir /Mail
|
||||
RUN mkdir /.offlineimap
|
||||
RUN echo "export TERM=screen-256color" >/.zshenv
|
||||
|
||||
CMD setsid cron; tmux -2
|
||||
@@ -1,14 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y")
|
||||
(add ".muttrc" "/")
|
||||
(add ".offlineimaprc" "/")
|
||||
(add ".tmux.conf" "/")
|
||||
(add "mutt" "/.mutt")
|
||||
(add "vim" "/.vim")
|
||||
(add "vimrc" "/.vimrc")
|
||||
(add "crontab" "/etc/crontab")
|
||||
(run "chmod 644 /etc/crontab")
|
||||
(run "mkdir /Mail")
|
||||
(run "mkdir /.offlineimap")
|
||||
(run "echo \"export TERM=screen-256color\" >/.zshenv")
|
||||
(cmd "setsid cron; tmux -2")
|
||||
@@ -1,3 +0,0 @@
|
||||
FROM foo
|
||||
|
||||
VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs
|
||||
@@ -1,2 +0,0 @@
|
||||
(from "foo")
|
||||
(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
|
||||
@@ -1,7 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
|
||||
RUN apt-get update && apt-get install libcap2-bin mumble-server -y
|
||||
|
||||
ADD ./mumble-server.ini /etc/mumble-server.ini
|
||||
|
||||
CMD /usr/sbin/murmurd
|
||||
@@ -1,4 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(run "apt-get update && apt-get install libcap2-bin mumble-server -y")
|
||||
(add "./mumble-server.ini" "/etc/mumble-server.ini")
|
||||
(cmd "/usr/sbin/murmurd")
|
||||
@@ -1,14 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Erik Hollensbe <erik@hollensbe.org>
|
||||
|
||||
RUN apt-get update && apt-get install nginx-full -y
|
||||
RUN rm -rf /etc/nginx
|
||||
ADD etc /etc/nginx
|
||||
RUN chown -R root:root /etc/nginx
|
||||
RUN /usr/sbin/nginx -qt
|
||||
RUN mkdir /www
|
||||
|
||||
CMD ["/usr/sbin/nginx"]
|
||||
|
||||
VOLUME /www
|
||||
EXPOSE 80
|
||||
@@ -1,11 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(maintainer "Erik Hollensbe <erik@hollensbe.org>")
|
||||
(run "apt-get update && apt-get install nginx-full -y")
|
||||
(run "rm -rf /etc/nginx")
|
||||
(add "etc" "/etc/nginx")
|
||||
(run "chown -R root:root /etc/nginx")
|
||||
(run "/usr/sbin/nginx -qt")
|
||||
(run "mkdir /www")
|
||||
(cmd "/usr/sbin/nginx")
|
||||
(volume "/www")
|
||||
(expose "80")
|
||||
@@ -1,23 +0,0 @@
|
||||
FROM ubuntu:12.04
|
||||
|
||||
EXPOSE 27015
|
||||
EXPOSE 27005
|
||||
EXPOSE 26901
|
||||
EXPOSE 27020
|
||||
|
||||
RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y
|
||||
RUN mkdir -p /steam
|
||||
RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam
|
||||
ADD ./script /steam/script
|
||||
RUN /steam/steamcmd.sh +runscript /steam/script
|
||||
RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf
|
||||
RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf
|
||||
ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg
|
||||
ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg
|
||||
ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg
|
||||
RUN rm -r /steam/tf2/tf/addons/sourcemod/configs
|
||||
ADD ./configs /steam/tf2/tf/addons/sourcemod/configs
|
||||
RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en
|
||||
RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en
|
||||
|
||||
CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill
|
||||
@@ -1,20 +0,0 @@
|
||||
(from "ubuntu:12.04")
|
||||
(expose "27015")
|
||||
(expose "27005")
|
||||
(expose "26901")
|
||||
(expose "27020")
|
||||
(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y")
|
||||
(run "mkdir -p /steam")
|
||||
(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam")
|
||||
(add "./script" "/steam/script")
|
||||
(run "/steam/steamcmd.sh +runscript /steam/script")
|
||||
(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf")
|
||||
(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf")
|
||||
(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg")
|
||||
(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg")
|
||||
(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg")
|
||||
(run "rm -r /steam/tf2/tf/addons/sourcemod/configs")
|
||||
(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs")
|
||||
(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en")
|
||||
(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en")
|
||||
(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill")
|
||||
@@ -1,9 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
|
||||
RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y
|
||||
|
||||
ADD .weechat /.weechat
|
||||
ADD .tmux.conf /
|
||||
RUN echo "export TERM=screen-256color" >/.zshenv
|
||||
|
||||
CMD zsh -c weechat
|
||||
@@ -1,6 +0,0 @@
|
||||
(from "ubuntu:14.04")
|
||||
(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y")
|
||||
(add ".weechat" "/.weechat")
|
||||
(add ".tmux.conf" "/")
|
||||
(run "echo \"export TERM=screen-256color\" >/.zshenv")
|
||||
(cmd "zsh -c weechat")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user