Compare commits

..

7 Commits

Author SHA1 Message Date
Guillaume J. Charmes
38b8373434 Implement the COPY operator within the builder 2013-04-24 14:28:51 -07:00
Guillaume J. Charmes
03b5f8a585 Make sure the destination directory exists when using docker insert 2013-04-24 13:51:28 -07:00
Guillaume J. Charmes
bc260f0225 Add insert command in order to insert external files within an image 2013-04-24 13:37:00 -07:00
Guillaume J. Charmes
45dcd1125b Add a Builder.Commit method 2013-04-24 13:35:57 -07:00
Guillaume J. Charmes
d2e063d9e1 make builder.Run public it now runs only given arguments without sh -c 2013-04-24 12:31:20 -07:00
Guillaume J. Charmes
567a484b66 Clear the containers/images upon failure 2013-04-24 12:02:00 -07:00
Guillaume J. Charmes
5d4b886ad6 Add build command 2013-04-24 11:03:01 -07:00
1658 changed files with 22423 additions and 242713 deletions

View File

@@ -1,2 +0,0 @@
bundles
.gopath

View File

@@ -1,14 +0,0 @@
image: dockercore/docker
env:
- AUTO_GOPATH=1
- DOCKER_GRAPHDRIVER=vfs
- DOCKER_EXECDRIVER=native
script:
# Setup the DockerInDocker environment.
- hack/dind
# Tests relying on StartWithBusybox make Drone time out.
- rm integration-cli/docker_cli_daemon_test.go
- rm integration-cli/docker_cli_exec_test.go
# Validate and test.
- hack/make.sh validate-dco validate-gofmt
- hack/make.sh binary cross test-unit test-integration-cli test-integration

18
.gitignore vendored
View File

@@ -1,6 +1,3 @@
# Docker project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
.vagrant*
bin
docker/docker
@@ -8,22 +5,13 @@ docker/docker
a.out
*.orig
build_src
command-line-arguments.test
.flymake*
docker.test
auth/auth.test
.idea
.DS_Store
docs/_build
docs/_static
docs/_templates
.gopath/
.dotcloud
*.test
bundles/
.hg/
.git/
vendor/pkg/
pyenv
Vagrantfile
docs/AWS_S3_BUCKET
docs/GIT_BRANCH
docs/VERSION
docs/GITCOMMIT

100
.mailmap
View File

@@ -1,99 +1,19 @@
# Generate AUTHORS: hack/generate-authors.sh
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
# duplicates that aren't also email duplicates): scan the output of:
# git log --format='%aE - %aN' | sort -uf
<charles.hooper@dotcloud.com> <chooper@plumata.com>
# Generate AUTHORS: git log --all --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
<charles.hooper@dotcloud.com> <chooper@plumata.com>
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@docker.com>
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@charmes.net>
Guillaume J. Charmes <guillaume.charmes@dotcloud.com> creack <charmes.guillaume@gmail.com>
<guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
<kencochrane@gmail.com> <KenCochrane@gmail.com>
Thatcher Peskens <thatcher@docker.com>
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
<sridharr@activestate.com> <github@srid.name>
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@gmx.net>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
Joffrey F <joffrey@docker.com>
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
Joffrey F <joffrey@dotcloud.com>
<joffrey@dotcloud.com> <f.joffrey@gmail.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Andy Smith <github@anarkystic.com>
<kalessin@kalessin.fr> <louis@dotcloud.com>
<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
<victor.vieux@docker.com> <victor@dotcloud.com>
<victor.vieux@docker.com> <dev@vvieux.com>
<victor.vieux@docker.com> <victor@docker.com>
<victor.vieux@docker.com> <vieux@docker.com>
<victor.vieux@dotcloud.com> <victor@dotcloud.com>
<dominik@honnef.co> <dominikh@fork-bomb.org>
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
Walter Stanish <walter@pratyeka.org>
<daniel@gasienica.ch> <dgasienica@zynga.com>
Roberto Hashioka <roberto_hashioka@hotmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
David Sissitka <me@dsissitka.com>
Nolan Darilek <nolan@thewordnerd.info>
<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
Benoit Chesneau <bchesneau@gmail.com>
Jordan Arentsen <blissdev@gmail.com>
Daniel Garcia <daniel@danielgarcia.info>
Miguel Angel Fernández <elmendalerenda@gmail.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
Faiz Khan <faizkhan00@gmail.com>
Victor Lyuboslavsky <victor@victoreda.com>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
<mosoni@ebay.com> <mohitsoni1989@gmail.com>
Shih-Yuan Lee <fourdollars@gmail.com>
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
<proppy@google.com> <proppy@aminche.com>
<michael@docker.com> <michael@crosbymichael.com>
<michael@docker.com> <crosby.michael@gmail.com>
<github@developersupport.net> <github@metaliveblog.com>
<brandon@ifup.org> <brandon@ifup.co>
<dano@spotify.com> <daniel.norberg@gmail.com>
<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
<shawn@churchofgit.com> <shawnlandden@gmail.com>
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
<solomon@docker.com> <solomon.hykes@dotcloud.com>
<solomon@docker.com> <solomon@dotcloud.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
unclejack <unclejacksons@gmail.com> <unclejack@users.noreply.github.com>
<alexl@redhat.com> <alexander.larsson@gmail.com>
Alexandr Morozov <lk4d4math@gmail.com>
<git.nivoc@neverbox.com> <kuehnle@online.de>
O.S. Tezer <ostezer@gmail.com>
<ostezer@gmail.com> <ostezer@users.noreply.github.com>
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
<justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
<taim@bosboot.org> <maztaim@users.noreply.github.com>
<viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
<vbatts@redhat.com> <vbatts@hashbangbash.com>
<altsysrq@gmail.com> <iamironbob@gmail.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
Liang-Chi Hsieh <viirya@gmail.com>
Aleksa Sarai <cyphar@cyphar.com>
Will Weaver <monkey@buildingbananas.com>
Timothy Hobbs <timothyhobbs@seznam.cz>
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
<github@hollensbe.org> <erik+github@hollensbe.org>
<github@albersweb.de> <albers@users.noreply.github.com>
<lsm5@fedoraproject.org> <lsm5@redhat.com>
<marc@marc-abramowitz.com> <msabramo@gmail.com>
Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
<bernat@luffy.cx> <vincent@bernat.im>
<p@pwaller.net> <peter@scraperwiki.com>
<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
Francisco Carriedo <fcarriedo@gmail.com>
<julienbordellier@gmail.com> <git@julienbordellier.com>

View File

@@ -1,39 +0,0 @@
# Note: right now we don't use go-specific features of travis.
# Later we might automate "go test" etc. (or do it inside a docker container...?)
language: go
go:
# This should match the version in the Dockerfile.
- 1.3.1
# Test against older versions too, just for a little extra retrocompat.
- 1.2
# Let us have pretty experimental Docker-based Travis workers.
# (These spin up much faster than the VM-based ones.)
sudo: false
# Disable the normal go build.
install:
- export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false")
- export AUTO_GOPATH=1
# some of Docker's unit tests don't work inside Travis (yet!), so we purge those test files for now
- rm -f daemon/graphdriver/btrfs/*_test.go # fails to compile (missing header)
- rm -f daemon/graphdriver/devmapper/*_test.go # fails to compile (missing header)
- rm -f daemon/execdriver/lxc/*_test.go # fails to run (missing "lxc-start")
- rm -f daemon/graphdriver/aufs/*_test.go # fails to run ("backing file system is unsupported for this graph driver")
- rm -f daemon/graphdriver/vfs/*_test.go # fails to run (not root, which these tests assume "/var/tmp/... no owned by uid 0")
- rm -f daemon/networkdriver/bridge/*_test.go # fails to run ("Failed to initialize network driver")
- rm -f graph/*_test.go # fails to run ("mkdir /tmp/docker-test.../vfs/dir/foo/etc/postgres: permission denied")
- rm -f pkg/mount/*_test.go # fails to run ("permission denied")
before_script:
- env | sort
script:
- hack/make.sh validate-dco
- hack/make.sh validate-gofmt
- DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary
- ./hack/make.sh dynbinary dyntest-unit
# vim:set sw=2 ts=2:

566
AUTHORS
View File

@@ -1,597 +1,45 @@
# This file lists all individuals having contributed content to the repository.
# For how it is generated, see `hack/generate-authors.sh`.
Aanand Prasad <aanand.prasad@gmail.com>
Aaron Feng <aaron.feng@gmail.com>
Aaron Huslage <huslage@gmail.com>
Abel Muiño <amuino@gmail.com>
Adam Miller <admiller@redhat.com>
Adam Singer <financeCoding@gmail.com>
Aditya <aditya@netroy.in>
Adrian Mouat <adrian.mouat@gmail.com>
Adrien Folie <folie.adrien@gmail.com>
AJ Bowen <aj@gandi.net>
Al Tobey <al@ooyala.com>
alambike <alambike@gmail.com>
Albert Zhang <zhgwenming@gmail.com>
Aleksa Sarai <cyphar@cyphar.com>
Alex Gaynor <alex.gaynor@gmail.com>
Alex Warhawk <ax.warhawk@gmail.com>
Alexander Larsson <alexl@redhat.com>
Alexander Shopov <ash@kambanaria.org>
Alexandr Morozov <lk4d4math@gmail.com>
Alexey Kotlyarov <alexey@infoxchange.net.au>
Alexey Shamrin <shamrin@gmail.com>
Alexis THOMAS <fr.alexisthomas@gmail.com>
almoehi <almoehi@users.noreply.github.com>
amangoel <amangoel@gmail.com>
AnandkumarPatel <anandkumarpatel@gmail.com>
Andre Dublin <81dublin@gmail.com>
Andrea Luzzardi <aluzzardi@gmail.com>
Andrea Turli <andrea.turli@gmail.com>
Andreas Savvides <andreas@editd.com>
Andreas Tiefenthaler <at@an-ti.eu>
Andrew Duckworth <grillopress@gmail.com>
Andrew France <andrew@avito.co.uk>
Andrew Macgregor <andrew.macgregor@agworld.com.au>
Andrew Munsell <andrew@wizardapps.net>
Andrew Weiss <andrew.weiss@outlook.com>
Andrew Williams <williams.andrew@gmail.com>
Andrews Medina <andrewsmedina@gmail.com>
Andy Chambers <anchambers@paypal.com>
andy diller <dillera@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Andy Kipp <andy@rstudio.com>
Andy Rothfusz <github@developersupport.net>
Andy Rothfusz <github@metaliveblog.com>
Andy Smith <github@anarkystic.com>
Anthony Bishopric <git@anthonybishopric.com>
Anton Löfgren <anton.lofgren@gmail.com>
Anton Nikitin <anton.k.nikitin@gmail.com>
Antony Messerli <amesserl@rackspace.com>
apocas <petermdias@gmail.com>
Arnaud Porterie <icecrime@gmail.com>
Asbjørn Enge <asbjorn@hanafjedle.net>
Barnaby Gray <barnaby@pickle.me.uk>
Barry Allard <barry.allard@gmail.com>
Bartłomiej Piotrowski <b@bpiotrowski.pl>
bdevloed <boris.de.vloed@gmail.com>
Ben Firshman <ben@firshman.co.uk>
Ben Sargent <ben@brokendigits.com>
Ben Toews <mastahyeti@gmail.com>
Ben Wiklund <ben@daisyowl.com>
Benjamin Atkin <ben@benatkin.com>
Benoit Chesneau <bchesneau@gmail.com>
Bernerd Schaefer <bj.schaefer@gmail.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
bin liu <liubin0329@users.noreply.github.com>
Bouke Haarsma <bouke@webatoom.nl>
Boyd Hemphill <boyd@feedmagnet.com>
Brandon Liu <bdon@bdon.org>
Brandon Philips <brandon@ifup.org>
Brandon Rhodes <brandon@rhodesmill.org>
Brett Kochendorfer <brett.kochendorfer@gmail.com>
Brian (bex) Exelbierd <bexelbie@redhat.com>
Brian Dorsey <brian@dorseys.org>
Brian Flad <bflad417@gmail.com>
Brian Goff <cpuguy83@gmail.com>
Brian McCallister <brianm@skife.org>
Brian Olsen <brian@maven-group.org>
Brian Shumate <brian@couchbase.com>
Brice Jaglin <bjaglin@teads.tv>
Briehan Lombaard <briehan.lombaard@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Bruno Renié <brutasse@gmail.com>
Bryan Bess <squarejaw@bsbess.com>
Bryan Matsuo <bryan.matsuo@gmail.com>
Bryan Murphy <bmurphy1976@gmail.com>
Caleb Spare <cespare@gmail.com>
Calen Pennington <cale@edx.org>
Cameron Boehmer <cameron.boehmer@gmail.com>
Carl X. Su <bcbcarl@gmail.com>
Charles Hooper <charles.hooper@dotcloud.com>
Charles Lindsay <chaz@chazomatic.us>
Charles Merriam <charles.merriam@gmail.com>
Charlie Lewis <charliel@lab41.org>
Chewey <prosto-chewey@users.noreply.github.com>
Chia-liang Kao <clkao@clkao.org>
Chris Alfonso <calfonso@redhat.com>
Chris Snow <chsnow123@gmail.com>
Chris St. Pierre <chris.a.st.pierre@gmail.com>
chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
Christian Berendt <berendt@b1-systems.de>
ChristoperBiscardi <biscarch@sketcht.com>
Christophe Troestler <christophe.Troestler@umons.ac.be>
Christopher Currie <codemonkey+github@gmail.com>
Christopher Rigor <crigor@gmail.com>
Ciro S. Costa <ciro.costa@usp.br>
Clayton Coleman <ccoleman@redhat.com>
Colin Dunklau <colin.dunklau@gmail.com>
Colin Rice <colin@daedrum.net>
Colin Walters <walters@verbum.org>
Cory Forsyth <cory.forsyth@gmail.com>
cpuguy83 <cpuguy83@gmail.com>
cressie176 <github@stephen-cresswell.net>
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
Daan van Berkel <daan.v.berkel.1980@gmail.com>
Dafydd Crosby <dtcrsby@gmail.com>
Dan Buch <d.buch@modcloth.com>
Dan Hirsch <thequux@upstandinghackers.com>
Dan Keder <dan.keder@gmail.com>
Dan McPherson <dmcphers@redhat.com>
Dan Stine <sw@stinemail.com>
Dan Walsh <dwalsh@redhat.com>
Dan Williams <me@deedubs.com>
Daniel Exner <dex@dragonslave.de>
Daniel Garcia <daniel@danielgarcia.info>
Daniel Gasienica <daniel@gasienica.ch>
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
Daniel Norberg <dano@spotify.com>
Daniel Nordberg <dnordberg@gmail.com>
Daniel Robinson <gottagetmac@gmail.com>
Daniel Von Fange <daniel@leancoder.com>
Daniel YC Lin <dlin.tw@gmail.com>
Daniel, Dao Quang Minh <dqminh89@gmail.com>
Danny Berger <dpb587@gmail.com>
Danny Yates <danny@codeaholics.org>
Darren Coxall <darren@darrencoxall.com>
Darren Shepherd <darren.s.shepherd@gmail.com>
David Anderson <dave@natulte.net>
David Calavera <david.calavera@gmail.com>
David Corking <dmc-source@dcorking.com>
David Gageot <david@gageot.net>
David Mcanulty <github@hellspark.com>
David Röthlisberger <david@rothlis.net>
David Sissitka <me@dsissitka.com>
Deni Bertovic <deni@kset.org>
Derek <crq@kernel.org>
Deric Crago <deric.crago@gmail.com>
Dinesh Subhraveti <dineshs@altiscale.com>
Djibril Koné <kone.djibril@gmail.com>
dkumor <daniel@dkumor.com>
Dmitry Demeshchuk <demeshchuk@gmail.com>
Dolph Mathews <dolph.mathews@gmail.com>
Dominik Honnef <dominik@honnef.co>
Don Spaulding <donspauldingii@gmail.com>
Doug Davis <dug@us.ibm.com>
doug tangren <d.tangren@gmail.com>
Dr Nic Williams <drnicwilliams@gmail.com>
Dražen Lučanin <kermit666@gmail.com>
Dustin Sallings <dustin@spy.net>
Edmund Wagner <edmund-wagner@web.de>
Eiichi Tsukata <devel@etsukata.com>
Eivind Uggedal <eivind@uggedal.com>
Elias Probst <mail@eliasprobst.eu>
Emil Hernvall <emil@quench.at>
Emily Rose <emily@contactvibe.com>
Eric Hanchrow <ehanchrow@ine.com>
Eric Lee <thenorthsecedes@gmail.com>
Eric Myhre <hash@exultant.us>
Eric Windisch <eric@windisch.us>
Eric Windisch <ewindisch@docker.com>
Erik Hollensbe <github@hollensbe.org>
Erik Inge Bolsø <knan@redpill-linpro.com>
Erno Hopearuoho <erno.hopearuoho@gmail.com>
eugenkrizo <eugen.krizo@gmail.com>
Evan Hazlett <ejhazlett@gmail.com>
Evan Krall <krall@yelp.com>
Evan Phoenix <evan@fallingsnow.net>
Evan Wies <evan@neomantra.net>
evanderkoogh <info@erronis.nl>
Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
ezbercih <cem.ezberci@gmail.com>
Fabio Falci <fabiofalci@gmail.com>
Fabio Rehm <fgrehm@gmail.com>
Fabrizio Regini <freegenie@gmail.com>
Faiz Khan <faizkhan00@gmail.com>
Fareed Dudhia <fareeddudhia@googlemail.com>
Felix Rabe <felix@rabe.io>
Fernando <fermayo@gmail.com>
Flavio Castelli <fcastelli@suse.com>
FLGMwt <ryan.stelly@live.com>
Francisco Carriedo <fcarriedo@gmail.com>
Francisco Souza <f@souza.cc>
Frank Macreery <frank@macreery.com>
Fred Lifton <fred.lifton@docker.com>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
Frederik Loeffert <frederik@zitrusmedia.de>
Freek Kalter <freek@kalteronline.org>
Gabe Rosenhouse <gabe@missionst.com>
Gabor Nagy <mail@aigeruth.hu>
Gabriel Monroy <gabriel@opdemand.com>
Galen Sampson <galen.sampson@gmail.com>
Gareth Rushgrove <gareth@morethanseven.net>
Geoffrey Bachelet <grosfrais@gmail.com>
Gereon Frey <gereon.frey@dynport.de>
German DZ <germ@ndz.com.ar>
Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
Giuseppe Mazzotta <gdm85@users.noreply.github.com>
Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
Glyn Normington <gnormington@gopivotal.com>
Goffert van Gool <goffert@phusion.nl>
Graydon Hoare <graydon@pobox.com>
Greg Thornton <xdissent@me.com>
grunny <mwgrunny@gmail.com>
Guilherme Salgado <gsalgado@gmail.com>
Guillaume J. Charmes <guillaume.charmes@docker.com>
Gurjeet Singh <gurjeet@singh.im>
Guruprasad <lgp171188@gmail.com>
Harald Albers <github@albersweb.de>
Harley Laue <losinggeneration@gmail.com>
Hector Castro <hectcastro@gmail.com>
Henning Sprang <henning.sprang@gmail.com>
Hobofan <goisser94@gmail.com>
Hollie Teal <hollie.teal@docker.com>
Hollie Teal <hollietealok@users.noreply.github.com>
hollietealok <hollie@docker.com>
Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
Hunter Blanks <hunter@twilio.com>
hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
Ian Babrou <ibobrik@gmail.com>
Ian Bull <irbull@gmail.com>
Ian Main <imain@redhat.com>
Ian Truslove <ian.truslove@gmail.com>
ILYA Khlopotov <ilya.khlopotov@gmail.com>
inglesp <peter.inglesby@gmail.com>
Isaac Dupree <antispam@idupree.com>
Isabel Jimenez <contact.isabeljimenez@gmail.com>
Isao Jonas <isao.jonas@gmail.com>
Ivan Fraixedes <ifcdev@gmail.com>
Jack Danger Canty <jackdanger@squareup.com>
Jake Moshenko <jake@devtable.com>
jakedt <jake@devtable.com>
James Allen <jamesallen0108@gmail.com>
James Carr <james.r.carr@gmail.com>
James DeFelice <james.defelice@ishisystems.com>
James Harrison Fisher <jameshfisher@gmail.com>
James Kyle <james@jameskyle.org>
James Mills <prologic@shortcircuit.net.au>
James Turnbull <james@lovedthanlost.net>
Jan Pazdziora <jpazdziora@redhat.com>
Jan Toebes <jan@toebes.info>
Jaroslaw Zabiello <hipertracker@gmail.com>
jaseg <jaseg@jaseg.net>
Jason Giedymin <jasong@apache.org>
Jason Hall <imjasonh@gmail.com>
Jason Livesay <ithkuil@gmail.com>
Jason McVetta <jason.mcvetta@gmail.com>
Jason Plum <jplum@devonit.com>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
Jeff Lindsay <progrium@gmail.com>
Jeff Welch <whatthejeff@gmail.com>
Jeffrey Bolle <jeffreybolle@gmail.com>
Jeremy Grosser <jeremy@synack.me>
Jesse Dubay <jesse@thefortytwo.net>
Jezeniel Zapanta <jpzapanta22@gmail.com>
Jilles Oldenbeuving <ojilles@gmail.com>
Jim Alateras <jima@comware.com.au>
Jim Perrin <jperrin@centos.org>
Jimmy Cuadra <jimmy@jimmycuadra.com>
Jiří Župka <jzupka@redhat.com>
Joe Beda <joe.github@bedafamily.com>
Joe Shaw <joe@joeshaw.org>
Joe Van Dyk <joe@tanga.com>
Joel Handwell <joelhandwell@gmail.com>
Joffrey F <joffrey@docker.com>
Johan Euphrosine <proppy@google.com>
Johan Rydberg <johan.rydberg@gmail.com>
Johannes 'fish' Ziemke <github@freigeist.org>
Joffrey F <joffrey@dotcloud.com>
John Costa <john.costa@gmail.com>
John Feminella <jxf@jxf.me>
John Gardiner Myers <jgmyers@proofpoint.com>
John OBrien III <jobrieniii@yahoo.com>
John Warwick <jwarwick@gmail.com>
Jon Wedaman <jweede@gmail.com>
Jonas Pfenniger <jonas@pfenniger.name>
Jonathan Boulle <jonathanboulle@gmail.com>
Jonathan Camp <jonathan@irondojo.com>
Jonathan McCrohan <jmccrohan@gmail.com>
Jonathan Mueller <j.mueller@apoveda.ch>
Jonathan Pares <jonathanpa@users.noreply.github.com>
Jonathan Rudenberg <jonathan@titanous.com>
Joost Cassee <joost@cassee.net>
Jordan Arentsen <blissdev@gmail.com>
Jordan Sissel <jls@semicomplete.com>
Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
Joseph Hager <ajhager@gmail.com>
Josh <jokajak@gmail.com>
Josh Hawn <josh.hawn@docker.com>
Josh Poimboeuf <jpoimboe@redhat.com>
JP <jpellerin@leapfrogonline.com>
Julien Barbier <write0@gmail.com>
Julien Bordellier <julienbordellier@gmail.com>
Julien Dubois <julien.dubois@gmail.com>
Justin Force <justin.force@gmail.com>
Justin Plock <jplock@users.noreply.github.com>
Justin Simonelis <justin.p.simonelis@gmail.com>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
Karan Lyons <karan@karanlyons.com>
Karl Grzeszczak <karlgrz@gmail.com>
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
Keli Hu <dev@keli.hu>
Ken Cochrane <kencochrane@gmail.com>
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
Kevin Clark <kevin.clark@gmail.com>
Kevin J. Lynagh <kevin@keminglabs.com>
Kevin Menard <kevin@nirvdrum.com>
Kevin Wallace <kevin@pentabarf.net>
Keyvan Fatehi <keyvanfatehi@gmail.com>
kies <lleelm@gmail.com>
Kim BKC Carlbacker <kim.carlbacker@gmail.com>
kim0 <email.ahmedkamal@googlemail.com>
Kimbro Staken <kstaken@kstaken.com>
Kiran Gangadharan <kiran.daredevil@gmail.com>
knappe <tyler.knappe@gmail.com>
Kohei Tsuruta <coheyxyz@gmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
Kyle Conroy <kyle.j.conroy@gmail.com>
kyu <leehk1227@gmail.com>
Lachlan Coote <lcoote@vmware.com>
lalyos <lalyos@yahoo.com>
Lance Chen <cyen0312@gmail.com>
Lars R. Damerow <lars@pixar.com>
Laurie Voss <github@seldo.com>
leeplay <hyeongkyu.lee@navercorp.com>
Len Weincier <len@cloudafrica.net>
Levi Gross <levi@levigross.com>
Lewis Peckover <lew+github@lew.io>
Liang-Chi Hsieh <viirya@gmail.com>
Lokesh Mandvekar <lsm5@fedoraproject.org>
Louis Opter <kalessin@kalessin.fr>
lukaspustina <lukas.pustina@centerdevice.com>
lukemarsden <luke@digital-crocus.com>
Mahesh Tiyyagura <tmahesh@gmail.com>
Manfred Zabarauskas <manfredas@zabarauskas.com>
Manuel Meurer <manuel@krautcomputing.com>
Manuel Woelker <github@manuel.woelker.org>
Marc Abramowitz <marc@marc-abramowitz.com>
Marc Kuo <kuomarc2@gmail.com>
Marc Tamsky <mtamsky@gmail.com>
Marco Hennings <marco.hennings@freiheit.com>
Marcus Farkas <toothlessgear@finitebox.com>
Marcus Ramberg <marcus@nordaaker.com>
marcuslinke <marcus.linke@gmx.de>
Marek Goldmann <marek.goldmann@gmail.com>
Marius Voila <marius.voila@gmail.com>
Mark Allen <mrallen1@yahoo.com>
Mark McGranaghan <mmcgrana@gmail.com>
Marko Mikulicic <mmikulicic@gmail.com>
Markus Fix <lispmeister@gmail.com>
Martijn van Oosterhout <kleptog@svana.org>
Martin Redmond <martin@tinychat.com>
Mason Malone <mason.malone@gmail.com>
Mateusz Sulima <sulima.mateusz@gmail.com>
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
Matt Apperson <me@mattapperson.com>
Matt Bachmann <bachmann.matt@gmail.com>
Matt Haggard <haggardii@gmail.com>
Matthew Heon <mheon@redhat.com>
Matthew Mueller <mattmuelle@gmail.com>
Matthias Klumpp <matthias@tenstral.net>
Matthias Kühnle <git.nivoc@neverbox.com>
mattymo <raytrac3r@gmail.com>
mattyw <mattyw@me.com>
Max Shytikov <mshytikov@gmail.com>
Maxim Treskin <zerthurd@gmail.com>
Maxime Petazzoni <max@signalfuse.com>
meejah <meejah@meejah.ca>
Michael Brown <michael@netdirect.ca>
Michael Crosby <michael@docker.com>
Michael Gorsuch <gorsuch@github.com>
Michael Neale <michael.neale@gmail.com>
Michael Prokop <github@michael-prokop.at>
Michael Stapelberg <michael+gh@stapelberg.de>
Michaël Pailloncy <mpapo.dev@gmail.com>
Michiel@unhosted <michiel@unhosted.org>
Miguel Angel Fernández <elmendalerenda@gmail.com>
Mike Chelen <michael.chelen@gmail.com>
Mike Gaffney <mike@uberu.com>
Mike MacCana <mike.maccana@gmail.com>
Mike Naberezny <mike@naberezny.com>
Mike Snitzer <snitzer@redhat.com>
Mikhail Sobolev <mss@mawhrin.net>
Mohit Soni <mosoni@ebay.com>
Morgante Pell <morgante.pell@morgante.net>
Morten Siebuhr <sbhr@sbhr.dk>
Mrunal Patel <mrunalp@gmail.com>
Nan Monnand Deng <monnand@gmail.com>
Naoki Orii <norii@cs.cmu.edu>
Nate Jones <nate@endot.org>
Nathan Kleyn <nathan@nathankleyn.com>
Nathan LeClaire <nathan.leclaire@docker.com>
Nelson Chen <crazysim@gmail.com>
Niall O'Higgins <niallo@unworkable.org>
Nick Payne <nick@kurai.co.uk>
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
Nick Stinemates <nick@stinemates.org>
Nicolas Dudebout <nicolas.dudebout@gatech.edu>
Nicolas Kaiser <nikai@nikai.net>
NikolaMandic <mn080202@gmail.com>
noducks <onemannoducks@gmail.com>
Nolan Darilek <nolan@thewordnerd.info>
O.S. Tezer <ostezer@gmail.com>
OddBloke <daniel@daniel-watkins.co.uk>
odk- <github@odkurzacz.org>
Oguz Bilgic <fisyonet@gmail.com>
Ole Reifschneider <mail@ole-reifschneider.de>
Olivier Gambier <dmp42@users.noreply.github.com>
pandrew <letters@paulnotcom.se>
Pascal Borreli <pascal@borreli.com>
Patrick Hemmer <patrick.hemmer@gmail.com>
pattichen <craftsbear@gmail.com>
Paul <paul9869@gmail.com>
Paul Annesley <paul@annesley.cc>
Paul Bowsher <pbowsher@globalpersonals.co.uk>
Paul Hammond <paul@paulhammond.org>
Paul Jimenez <pj@place.org>
Paul Lietar <paul@lietar.net>
Paul Morie <pmorie@gmail.com>
Paul Nasrat <pnasrat@gmail.com>
Paul Weaver <pauweave@cisco.com>
Peter Bourgon <peter@bourgon.org>
Peter Braden <peterbraden@peterbraden.co.uk>
Peter Waller <p@pwaller.net>
Phil <underscorephil@gmail.com>
Phil Spitler <pspitler@gmail.com>
Phillip Alexander <git@phillipalexander.io>
Piergiuliano Bossi <pgbossi@gmail.com>
Pierre-Alain RIVIERE <pariviere@ippon.fr>
Piotr Bogdan <ppbogdan@gmail.com>
pysqz <randomq@126.com>
Quentin Brossard <qbrossard@gmail.com>
r0n22 <cameron.regan@gmail.com>
Rafal Jeczalik <rjeczalik@gmail.com>
Rajat Pandit <rp@rajatpandit.com>
Rajdeep Dua <dua_rajdeep@yahoo.com>
Ralph Bean <rbean@redhat.com>
Ramkumar Ramachandra <artagnon@gmail.com>
Ramon van Alteren <ramon@vanalteren.nl>
Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
rgstephens <greg@udon.org>
Rhys Hiltner <rhys@twitch.tv>
Richard Harvey <richard@squarecows.com>
Richo Healey <richo@psych0tik.net>
Rick Bradley <rick@users.noreply.github.com>
Rick van de Loo <rickvandeloo@gmail.com>
Robert Bachmann <rb@robertbachmann.at>
Robert Obryk <robryk@gmail.com>
Roberto G. Hashioka <roberto.hashioka@docker.com>
Robin Speekenbrink <robin@kingsquare.nl>
robpc <rpcann@gmail.com>
Rodrigo Vaz <rodrigo.vaz@gmail.com>
Roel Van Nyen <roel.vannyen@gmail.com>
Roger Peppe <rogpeppe@gmail.com>
Rohit Jnagal <jnagal@google.com>
Roland Huß <roland@jolokia.org>
Roland Moriz <rmoriz@users.noreply.github.com>
Ron Smits <ron.smits@gmail.com>
Rovanion Luckey <rovanion.luckey@gmail.com>
Rudolph Gottesheim <r.gottesheim@loot.at>
Ryan Anderson <anderson.ryanc@gmail.com>
Ryan Aslett <github@mixologic.com>
Ryan Fowler <rwfowler@gmail.com>
Ryan O'Donnell <odonnellryanc@gmail.com>
Ryan Seto <ryanseto@yak.net>
Ryan Thomas <rthomas@atlassian.com>
s-ko <aleks@s-ko.net>
Sam Alba <sam.alba@gmail.com>
Sam Bailey <cyprix@cyprix.com.au>
Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
Sam Reis <sreis@atlassian.com>
Sam Rijs <srijs@airpost.net>
Samuel Andaya <samuel@andaya.net>
satoru <satorulogic@gmail.com>
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
Scott Bessler <scottbessler@gmail.com>
Scott Collier <emailscottcollier@gmail.com>
Sean Cronin <seancron@gmail.com>
Sean P. Kane <skane@newrelic.com>
Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn <thaJeztah@users.noreply.github.com>
Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
SeongJae Park <sj38.park@gmail.com>
Shane Canon <scanon@lbl.gov>
shaunol <shaunol@gmail.com>
Shawn Landden <shawn@churchofgit.com>
Shawn Siefkas <shawn.siefkas@meredith.com>
Shih-Yuan Lee <fourdollars@gmail.com>
Silas Sewell <silas@sewell.org>
Simon Taranto <simon.taranto@gmail.com>
Sindhu S <sindhus@live.in>
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
Solomon Hykes <solomon@docker.com>
Song Gao <song@gao.io>
Soulou <leo@unbekandt.eu>
soulshake <amy@gandi.net>
Sridatta Thatipamala <sthatipamala@gmail.com>
Solomon Hykes <solomon@dotcloud.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Steeve Morin <steeve.morin@gmail.com>
Stefan Praszalowicz <stefan@greplin.com>
Stephen Crosby <stevecrozz@gmail.com>
Steven Burgess <steven.a.burgess@hotmail.com>
sudosurootdev <sudosurootdev@gmail.com>
Sven Dowideit <svendowideit@home.org.au>
Sylvain Bellemare <sylvain.bellemare@ezeep.com>
Sébastien <sebastien@yoozio.com>
Sébastien Luttringer <seblu@seblu.net>
Sébastien Stormacq <sebsto@users.noreply.github.com>
tang0th <tang0th@gmx.com>
Tatsuki Sugiura <sugi@nemui.org>
Tehmasp Chaudhri <tehmasp@gmail.com>
Thatcher Peskens <thatcher@docker.com>
Thermionix <bond711@gmail.com>
Thijs Terlouw <thijsterlouw@gmail.com>
Thomas Bikeev <thomas.bikeev@mac.com>
Thomas Frössman <thomasf@jossystem.se>
Thomas Hansen <thomas.hansen@gmail.com>
Thomas LEVEIL <thomasleveil@gmail.com>
Thomas Schroeter <thomas@cliqz.com>
Tianon Gravi <admwiggin@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tim Bosse <taim@bosboot.org>
Tim Ruffles <oi@truffles.me.uk>
Tim Ruffles <timruffles@googlemail.com>
Thatcher Peskens <thatcher@dotcloud.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Timothy Hobbs <timothyhobbs@seznam.cz>
tjmehta <tj@init.me>
Tobias Bieniek <Tobias.Bieniek@gmx.de>
Tobias Gesellchen <tobias@gesellix.de>
Tobias Schmidt <ts@soundcloud.com>
Tobias Schwab <tobias.schwab@dynport.de>
Todd Lunter <tlunter@gmail.com>
Tom Fotherby <tom+github@peopleperhour.com>
Tom Hulihan <hulihan.tom159@gmail.com>
Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
Tommaso Visconti <tommaso.visconti@gmail.com>
Tony Daws <tony@daws.ca>
tpng <benny.tpng@gmail.com>
Travis Cline <travis.cline@gmail.com>
Trent Ogren <tedwardo2@gmail.com>
Tyler Brock <tyler.brock@gmail.com>
Tzu-Jung Lee <roylee17@gmail.com>
Ulysse Carion <ulyssecarion@gmail.com>
Troy Howard <thoward37@gmail.com>
unclejack <unclejacksons@gmail.com>
vgeta <gopikannan.venugopalsamy@gmail.com>
Victor Coisne <victor.coisne@dotcloud.com>
Victor Lyuboslavsky <victor@victoreda.com>
Victor Marmol <vmarmol@google.com>
Victor Vieux <victor.vieux@docker.com>
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
Vincent Batts <vbatts@redhat.com>
Vincent Bernat <bernat@luffy.cx>
Vincent Mayers <vincent.mayers@inbloom.org>
Vincent Woo <me@vincentwoo.com>
Vinod Kulkarni <vinod.kulkarni@gmail.com>
Vishnu Kannan <vishnuk@google.com>
Vitor Monteiro <vmrmonteiro@gmail.com>
Victor Vieux <victor.vieux@dotcloud.com>
Vivek Agarwal <me@vivek.im>
Vladimir Bulyga <xx@ccxx.cc>
Vladimir Kirillov <proger@wilab.org.ua>
Vladimir Rutsky <altsysrq@gmail.com>
waitingkuo <waitingkuo0527@gmail.com>
Walter Leibbrandt <github@wrl.co.za>
Walter Stanish <walter@pratyeka.org>
WarheadsSE <max@warheads.net>
Wes Morgan <cap10morgan@gmail.com>
Will Dietz <w@wdtz.org>
Will Rouesnel <w.rouesnel@gmail.com>
Will Weaver <monkey@buildingbananas.com>
William Delanoue <william.delanoue@gmail.com>
William Henry <whenry@redhat.com>
William Riancho <wr.wllm@gmail.com>
William Thurston <thurstw@amazon.com>
wyc <wayne@neverfear.org>
Xiuming Chen <cc@cxm.cc>
Yang Bai <hamo.by@gmail.com>
Yasunori Mahata <nori@mahata.net>
Yurii Rashkovskii <yrashk@gmail.com>
Zac Dover <zdover@redhat.com>
Zain Memon <zain@inzain.net>
Zaiste! <oh@zaiste.net>
Zane DeGraffenried <zane.deg@gmail.com>
Zilin Du <zilin.du@gmail.com>
zimbatm <zimbatm@zimbatm.com>
Zoltan Tombol <zoltan.tombol@gmail.com>
zqh <zqhxuyuan@gmail.com>
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>

File diff suppressed because it is too large Load Diff

View File

@@ -1,61 +1,10 @@
# Contributing to Docker
Want to hack on Docker? Awesome! Here are instructions to get you
started. They are probably not perfect, please let us know if anything
feels wrong or incomplete.
Want to hack on Docker? Awesome! There are instructions to get you
started on the website: http://docker.io/gettingstarted.html
## Topics
* [Security Reports](#security-reports)
* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
* [Reporting Issues](#reporting-issues)
* [Build Environment](#build-environment)
* [Contribution Guidelines](#contribution-guidelines)
* [Community Guidelines](#docker-community-guidelines)
## Security Reports
Please **DO NOT** file an issue for security related issues. Please send your
reports to [security@docker.com](mailto:security@docker.com) instead.
## Design and Cleanup Proposals
When considering a design proposal, we are looking for:
* A description of the problem this design proposal solves
* An issue -- not a pull request -- that describes what you will take action on
* Please prefix your issue with `Proposal:` in the title
* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open)
before reporting a new issue. You can always pair with someone if you both
have the same idea.
When considering a cleanup task, we are looking for:
* A description of the refactors made
* Please note any logic changes if necessary
* A pull request with the code
* Please prefix your PR's title with `Cleanup:` so we can quickly address it.
* Your pull request must remain up to date with master, so rebase as necessary.
## Reporting Issues
When reporting [issues](https://github.com/docker/docker/issues) on
GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc).
Please include:
* The output of `uname -a`.
* The output of `docker version`.
* The output of `docker -D info`.
Please also include the steps required to reproduce the problem if
possible and applicable. This information will help us review and fix
your issue faster.
## Build Environment
For instructions on setting up your development environment, please
see our dedicated [dev environment setup
docs](http://docs.docker.com/contributing/devenvironment/).
They are probably not perfect, please let us know if anything feels
wrong or incomplete.
## Contribution guidelines
@@ -72,12 +21,12 @@ received feedback on what to improve.
We're trying very hard to keep Docker lean and focused. We don't want it
to do everything for everybody. This means that we might decide against
incorporating a new feature. However, there might be a way to implement
that feature *on top of* Docker.
that feature *on top of* docker.
### Discuss your design on the mailing list
We recommend discussing your plans [on the mailing
list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev)
list](https://groups.google.com/forum/?fromgroups#!forum/docker-club)
before starting to code - especially for more ambitious contributions.
This gives other contributors a chance to point you in the right
direction, give feedback on your design, and maybe point out if someone
@@ -85,8 +34,8 @@ else is working on the same thing.
### Create issues...
Any significant improvement should be documented as [a GitHub
issue](https://github.com/docker/docker/issues) before anybody
Any significant improvement should be documented as [a github
issue](https://github.com/dotcloud/docker/issues) before anybody
starts working on it.
### ...but check for existing issues first!
@@ -98,219 +47,47 @@ help prioritize the most common problems and requests.
### Conventions
Fork the repository and make changes on your fork in a feature branch:
Fork the repo and make changes on your fork in a feature branch:
- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the
issue.
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
issue
- If it's a feature branch, create an enhancement issue to announce your
intentions, and name it XXXX-something where XXXX is the number of the issue.
intentions, and name it XXX-something where XXX is the number of the issue.
Submit unit tests for your changes. Go has a great test framework built in; use
it! Take a look at existing tests for inspiration. Run the full test suite on
your branch before submitting a pull request.
Update the documentation when creating or modifying features. Test
your documentation changes for clarity, concision, and correctness, as
well as a clean documentation build. See `docs/README.md` for more
information on building the docs and how they get released.
Make sure you include relevant updates or additions to documentation when
creating or modifying features.
Write clean code. Universally formatted code promotes ease of writing, reading,
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
committing your changes. Most editors have plug-ins that do this automatically.
and maintenance. Always run `go fmt` before committing your changes. Most
editors have plugins that do this automatically, and there's also a git
pre-commit hook:
```
curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit
```
Pull requests descriptions should be as clear as possible and include a
reference to all the issues that they address.
Commit messages must start with a capitalized and short summary (max. 50
chars) written in the imperative, followed by an optional, more detailed
explanatory text which is separated from the summary by an empty line.
Code review comments may be added to your pull request. Discuss, then make the
suggested modifications and push additional commits to your feature branch. Be
sure to post a comment after pushing. The new commits will show up in the pull
request automatically, but the reviewers will not be notified unless you
comment.
Pull requests must be cleanly rebased ontop of master without multiple branches
mixed into the PR.
**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
feature branch to update your pull request rather than `merge master`.
Before the pull request is merged, make sure that you squash your commits into
logical units of work using `git rebase -i` and `git push -f`. After every
commit the test suite should be passing. Include documentation changes in the
same commit so that a revert would remove all traces of the feature or fix.
Commits that fix or close an issue should include a reference like
`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the
issue when merged.
Please do not add yourself to the `AUTHORS` file, as it is regenerated
regularly from the Git history.
### Merge approval
Docker maintainers use LGTM (Looks Good To Me) in comments on the code review
to indicate acceptance.
A change requires LGTMs from an absolute majority of the maintainers of each
component affected. For example, if a change affects `docs/` and `registry/`, it
needs an absolute majority from the maintainers of `docs/` AND, separately, an
absolute majority of the maintainers of `registry/`.
For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
### Sign your work
The sign-off is a simple line at the end of the explanation for the
patch, which certifies that you wrote it or otherwise have the right to
pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below (from
[developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Using your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still
accepted, so there is no need to update outstanding pull requests to the new
format right away, but please do adjust your processes for future contributions.
#### Small patch exception
There are several exceptions to the signing requirement. Currently these are:
* Your patch fixes spelling or grammar errors.
* Your patch is a single line change to documentation contained in the
`docs` directory.
* Your patch fixes Markdown formatting or syntax errors in the
documentation contained in the `docs` directory.
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.com)
### How can I become a maintainer?
* Step 1: Learn the component inside out
* Step 2: Make yourself useful by contributing code, bug fixes, support etc.
* Step 3: Volunteer on the IRC channel (#docker at Freenode)
* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev
Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
maintainer to make a difference on the project!
### IRC Meetings
There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones.
Anybody can ask for a topic to be discussed prior to the meeting.
If you feel the conversation is going off-topic, feel free to point it out.
For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes).
They also contain all the notes from previous meetings.
## Docker Community Guidelines
We want to keep the Docker community awesome, growing and collaborative. We
need your help to keep it that way. To help with this we've come up with some
general guidelines for the community as a whole:
* Be nice: Be courteous, respectful and polite to fellow community members: no
regional, racial, gender, or other abuse will be tolerated. We like nice people
way better than mean ones!
* Encourage diversity and participation: Make everyone in our community
feel welcome, regardless of their background and the extent of their
contributions, and do everything possible to encourage participation in
our community.
* Keep it legal: Basically, don't get us in trouble. Share only content that
you own, do not share private or sensitive information, and don't break the
law.
* Stay on topic: Make sure that you are posting to the correct channel
and avoid off-topic discussions. Remember when you update an issue or
respond to an email you are potentially sending to a large number of
people. Please consider this before you update. Also remember that
nobody likes spam.
### Guideline Violations — 3 Strikes Method
The point of this section is not to find opportunities to punish people, but we
do need a fair way to deal with people who are making our community suck.
1. First occurrence: We'll give you a friendly, but public reminder that the
behavior is inappropriate according to our guidelines.
2. Second occurrence: We will send you a private message with a warning that
any additional violations will result in removal from the community.
3. Third occurrence: Depending on the violation, we may need to delete or ban
your account.
**Notes:**
* Obvious spammers are banned on first occurrence. If we don't do this, we'll
have spam all over the place.
* Violations are forgiven after 6 months of good behavior, and we won't
hold a grudge.
* People who commit minor infractions will get some education,
rather than hammering them in the 3 strikes process.
* The rules apply equally to everyone in the community, no matter how
much you've contributed.
* Extreme violations of a threatening, abusive, destructive or illegal nature
will be addressed immediately and are not subject to 3 strikes or
forgiveness.
* Contact james@docker.com to report abuse or appeal violations. In the case of
appeals, we know that mistakes happen, and we'll work with you to come up with
a fair solution if there has been a misunderstanding.
Commits that fix or close an issue should include a reference like `Closes #XXX`
or `Fixes #XXX`, which will automatically close the issue when merged.
Add your name to the AUTHORS file, but make sure the list is sorted and your
name and email address match your git configuration. The AUTHORS file is
regenerated occasionally from the git commit history, so a mismatch may result
in your changes being overwritten.

View File

@@ -1,113 +0,0 @@
# This file describes the standard way to build Docker, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test
#
# # Publish a release:
# docker run --privileged \
# -e AWS_S3_BUCKET=baz \
# -e AWS_ACCESS_KEY=foo \
# -e AWS_SECRET_KEY=bar \
# -e GPG_PASSPHRASE=gloubiboulga \
# docker hack/release.sh
#
# Note: Apparmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
docker-version 0.6.1
FROM ubuntu:14.04
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
# Packaged dependencies
RUN apt-get update && apt-get install -y \
aufs-tools \
automake \
btrfs-tools \
build-essential \
curl \
dpkg-sig \
git \
iptables \
libapparmor-dev \
libcap-dev \
libsqlite3-dev \
lxc=1.0* \
mercurial \
parallel \
reprepro \
ruby1.9.1 \
ruby1.9.1-dev \
s3cmd=1.1.0* \
--no-install-recommends
# Get lvm2 source for compiling statically
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
# Compile and install lvm2
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install Go
RUN curl -sSL https://golang.org/dl/go1.3.3.src.tar.gz | tar -v -C /usr/local -xz
ENV PATH /usr/local/go/bin:$PATH
ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
ENV PATH /go/bin:$PATH
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
# Compile Go for cross compilation
ENV DOCKER_CROSSPLATFORMS \
linux/386 linux/arm \
darwin/amd64 darwin/386 \
freebsd/amd64 freebsd/386 freebsd/arm
# (set an explicit GOARM of 5 for maximum compatibility)
ENV GOARM 5
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
# Grab Go's cover tool for dead-simple code coverage testing
RUN go get code.google.com/p/go.tools/cmd/cover
# TODO replace FPM with some very minimal debhelper stuff
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
# Install man page generator
RUN mkdir -p /go/src/github.com/cpuguy83 \
&& git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
&& cd /go/src/github.com/cpuguy83/go-md2man \
&& go get -v ./...
# Get the "busybox" image source so we can build locally instead of pulling
RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
# Get the "cirros" image source so we can import it instead of fetching it during tests
RUN curl -sSL -o /cirros.tar.gz https://github.com/ewindisch/docker-cirros/raw/1cded459668e8b9dbf4ef976c94c05add9bbd8e9/cirros-0.3.0-x86_64-lxc.tar.gz
# Setup s3cmd config
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > $HOME/.s3cfg
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor selinux
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

13
LICENSE
View File

@@ -176,7 +176,18 @@
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,9 +0,0 @@
Solomon Hykes <solomon@docker.com> (@shykes)
Victor Vieux <vieux@docker.com> (@vieux)
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
.mailmap: Tianon Gravi <admwiggin@gmail.com> (@tianon)
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
AUTHORS: Tianon Gravi <admwiggin@gmail.com> (@tianon)
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
.dockerignore: Tianon Gravi <admwiggin@gmail.com> (@tianon)

108
Makefile
View File

@@ -1,68 +1,78 @@
.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
DOCKER_PACKAGE := github.com/dotcloud/docker
RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
SRCRELEASE := docker-$(RELEASE_VERSION)
BINRELEASE := docker-$(RELEASE_VERSION).tgz
# to allow `make BINDDIR=. shell` or `make BINDDIR= test`
# (default to no bind mount if DOCKER_HOST is set)
BINDDIR := $(if $(DOCKER_HOST),,bundles)
# to allow `make DOCSPORT=9000 docs`
DOCSPORT := 8000
GIT_ROOT := $(shell git rev-parse --show-toplevel)
BUILD_DIR := $(CURDIR)/.gopath
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
GOPATH ?= $(BUILD_DIR)
export GOPATH
DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
# to allow `make DOCSDIR=docs docs-shell`
DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET
GO_OPTIONS ?=
ifeq ($(VERBOSE), 1)
GO_OPTIONS += -v
endif
default: binary
GIT_COMMIT = $(shell git rev-parse --short HEAD)
GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
all: build
$(DOCKER_RUN_DOCKER) hack/make.sh
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
binary: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary
SRC_DIR := $(GOPATH)/src
cross: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
DOCKER_DIR := $(SRC_DIR)/$(DOCKER_PACKAGE)
DOCKER_MAIN := $(DOCKER_DIR)/docker
docs: docs-build
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve
DOCKER_BIN_RELATIVE := bin/docker
DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
docs-shell: docs-build
$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
docs-release: docs-build
$(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh
all: $(DOCKER_BIN)
test: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli
$(DOCKER_BIN): $(DOCKER_DIR)
@mkdir -p $(dir $@)
@(cd $(DOCKER_MAIN); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
@echo $(DOCKER_BIN_RELATIVE) is created.
test-unit: build
$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
$(DOCKER_DIR):
@mkdir -p $(dir $@)
@rm -f $@
@ln -sf $(CURDIR)/ $@
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS))
test-integration: build
$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
whichrelease:
echo $(RELEASE_VERSION)
test-integration-cli: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
release: $(BINRELEASE)
srcrelease: $(SRCRELEASE)
deps: $(DOCKER_DIR)
validate: build
$(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco
# A clean checkout of $RELEASE_VERSION, with vendored dependencies
$(SRCRELEASE):
rm -fr $(SRCRELEASE)
git clone $(GIT_ROOT) $(SRCRELEASE)
cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
shell: build
$(DOCKER_RUN_DOCKER) bash
# A binary release ready to be uploaded to a mirror
$(BINRELEASE): $(SRCRELEASE)
rm -f $(BINRELEASE)
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
build: bundles
docker build -t "$(DOCKER_IMAGE)" .
clean:
@rm -rf $(dir $(DOCKER_BIN))
ifeq ($(GOPATH), $(BUILD_DIR))
@rm -rf $(BUILD_DIR)
else ifneq ($(DOCKER_DIR), $(realpath $(DOCKER_DIR)))
@rm -f $(DOCKER_DIR)
endif
docs-build:
cp ./VERSION docs/VERSION
echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
echo "$(GITCOMMIT)" > docs/GITCOMMIT
docker build -t "$(DOCKER_DOCS_IMAGE)" docs
test: all
@(cd $(DOCKER_DIR); sudo -E go test $(GO_OPTIONS))
bundles:
mkdir bundles
fmt:
@gofmt -s -l -w .
hack:
cd $(CURDIR)/buildbot && vagrant up

19
NOTICE
View File

@@ -1,19 +1,6 @@
Docker
Copyright 2012-2014 Docker, Inc.
Copyright 2012-2013 dotCloud, inc.
This product includes software developed at Docker, Inc. (http://www.docker.com).
This product includes software developed at dotCloud, inc. (http://www.dotcloud.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License.

444
README.md
View File

@@ -1,205 +1,317 @@
Docker: the Linux container engine
==================================
Docker: the Linux container runtime
===================================
Docker is an open source project to pack, ship and run any application
as a lightweight container
Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
This means that they can run anywhere, from your laptop to the largest
EC2 compute instance and everything in between - and they don't require
that you use a particular language, framework or packaging system. That
makes them great building blocks for deploying and scaling web apps,
databases and backend services without depending on a particular stack
or provider.
Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
Docker is an open-source implementation of the deployment engine which
powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service.
It benefits directly from the experience accumulated over several years
of large-scale operation and support of hundreds of thousands of
applications and databases.
![Docker L](docs/sources/static_files/lego_docker.jpg "Docker")
![Docker L](docs/theme/mkdocs/images/docker-logo-compressed.png "Docker")
* *Heterogeneous payloads*: any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
## Security Disclosure
* *Any server*: docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
Security is very important to us. If you have any issue regarding security,
please disclose the information responsibly by sending an email to
security@docker.com and not by creating a github issue.
* *Isolation*: docker isolates processes from each other and from the underlying host, using lightweight containers.
## Better than VMs
A common method for distributing applications and sandboxing their
execution is to use virtual machines, or VMs. Typical VM formats are
VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory
these formats should allow every developer to automatically package
their application into a "machine" for easy distribution and deployment.
In practice, that almost never happens, for a few reasons:
* *Size*: VMs are very large which makes them impractical to store
and transfer.
* *Performance*: running VMs consumes significant CPU and memory,
which makes them impractical in many scenarios, for example local
development of multi-tier applications, and large-scale deployment
of cpu and memory-intensive applications on large numbers of
machines.
* *Portability*: competing VM environments don't play well with each
other. Although conversion tools do exist, they are limited and
add even more overhead.
* *Hardware-centric*: VMs were designed with machine operators in
mind, not software developers. As a result, they offer very
limited tooling for what developers need most: building, testing
and running their software. For example, VMs offer no facilities
for application versioning, monitoring, configuration, logging or
service discovery.
By contrast, Docker relies on a different sandboxing method known as
*containerization*. Unlike traditional virtualization, containerization
takes place at the kernel level. Most modern operating system kernels
now support the primitives necessary for containerization, including
Linux with [openvz](http://openvz.org),
[vserver](http://linux-vserver.org) and more recently
[lxc](http://lxc.sourceforge.net), Solaris with
[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc)
and FreeBSD with
[Jails](http://www.freebsd.org/doc/handbook/jails.html).
Docker builds on top of these low-level primitives to offer developers a
portable format and runtime environment that solves all 4 problems.
Docker containers are small (and their transfer can be optimized with
layers), they have basically zero memory and cpu overhead, they are
completely portable and are designed from the ground up with an
application-centric design.
The best part: because Docker operates at the OS level, it can still be
run inside a VM!
## Plays well with others
Docker does not require that you buy into a particular programming
language, framework, packaging system or configuration language.
Is your application a Unix process? Does it use files, tcp connections,
environment variables, standard Unix streams and command-line arguments
as inputs and outputs? Then Docker can run it.
Can your application's build be expressed as a sequence of such
commands? Then Docker can build it.
## Escape dependency hell
A common problem for developers is the difficulty of managing all
their application's dependencies in a simple and automated way.
This is usually difficult for several reasons:
* *Cross-platform dependencies*. Modern applications often depend on
a combination of system libraries and binaries, language-specific
packages, framework-specific modules, internal components
developed for another project, etc. These dependencies live in
different "worlds" and require different tools - these tools
typically don't work well with each other, requiring awkward
custom integrations.
* Conflicting dependencies. Different applications may depend on
different versions of the same dependency. Packaging tools handle
these situations with various degrees of ease - but they all
handle them in different and incompatible ways, which again forces
the developer to do extra work.
* Custom dependencies. A developer may need to prepare a custom
version of their application's dependency. Some packaging systems
can handle custom versions of a dependency, others can't - and all
of them handle it differently.
* *Repeatability*: because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
Docker solves dependency hell by giving the developer a simple way to
express *all* their application's dependencies in one place, and
streamline the process of assembling them. If this makes you think of
[XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
*replace* your favorite packaging systems. It simply orchestrates
their use in a simple and repeatable way. How does it do that? With
layers.
Notable features
-----------------
Docker defines a build as running a sequence of Unix commands, one
after the other, in the same container. Build commands modify the
contents of the container (usually by installing new files on the
filesystem), the next command modifies it some more, etc. Since each
build command inherits the result of the previous commands, the
*order* in which the commands are executed expresses *dependencies*.
* Filesystem isolation: each process container runs in a completely separate root filesystem.
Here's a typical Docker build process:
* Resource isolation: system resources like cpu and memory can be allocated differently to each process container, using cgroups.
* Network isolation: each process container runs in its own network namespace, with a virtual interface and IP address of its own.
* Copy-on-write: root filesystems are created using copy-on-write, which makes deployment extremely fast, memory-cheap and disk-cheap.
* Logging: the standard streams (stdout/stderr/stdin) of each process container are collected and logged for real-time or batch retrieval.
* Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.
* Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.
Install instructions
==================
Quick install on Ubuntu 12.04 and 12.10
---------------------------------------
```bash
FROM ubuntu:12.04
RUN apt-get update && apt-get install -y python python-pip curl
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
RUN cd helloflask-master && pip install -r requirements.txt
curl get.docker.io | sh -x
```
Note that Docker doesn't care *how* dependencies are built - as long
as they can be built by running a Unix command in a container.
Binary installs
----------------
Docker supports the following binary installation methods.
Note that some methods are community contributions and not yet officially supported.
Getting started
===============
* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
Docker can be installed on your local machine as well as servers - both
bare metal and virtualized. It is available as a binary on most modern
Linux systems, or as a VM on Windows, Mac and other systems.
Installing from source
----------------------
We also offer an [interactive tutorial](http://www.docker.com/tryit/)
for quickly learning the basics of using Docker.
1. Make sure you have a [Go language](http://golang.org/doc/install) compiler and [git](http://git-scm.com) installed.
For up-to-date install instructions, see the [Docs](http://docs.docker.com).
2. Checkout the source code
```bash
git clone http://github.com/dotcloud/docker
```
3. Build the docker binary
```bash
cd docker
make VERBOSE=1
sudo cp ./bin/docker /usr/local/bin/docker
```
Usage examples
==============
Docker can be used to run short-lived commands, long-running daemons
(app servers, databases etc.), interactive shell sessions, etc.
First run the docker daemon
---------------------------
You can find a [list of real-world
examples](http://docs.docker.com/examples/) in the
documentation.
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
```bash
# On a production system you want this running in an init script
sudo docker -d &
```
Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client can run from any account.
```bash
# Now you can run docker commands from any account.
docker help
```
Throwaway shell in a base ubuntu image
--------------------------------------
```bash
docker pull ubuntu:12.10
# Run an interactive shell, allocate a tty, attach stdin and stdout
# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q
docker run -i -t ubuntu:12.10 /bin/bash
```
Starting a long-running worker process
--------------------------------------
```bash
# Start a very useful long-running process
JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
# Collect the output of the job so far
docker logs $JOB
# Kill the job
docker kill $JOB
```
Running an irc bouncer
----------------------
```bash
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
```
Running Redis
-------------
```bash
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
```
Share your own image!
---------------------
```bash
CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl)
docker commit -m "Installed curl" $CONTAINER $USER/betterbase
docker push $USER/betterbase
```
A list of publicly available images is [available here](https://github.com/dotcloud/docker/wiki/Public-docker-images).
Expose a service on a TCP port
------------------------------
```bash
# Expose port 4444 of this container, and tell netcat to listen on it
JOB=$(docker run -d -p 4444 base /bin/nc -l -p 4444)
# Which public port is NATed to my container?
PORT=$(docker port $JOB 4444)
# Connect to the public port via the host's public address
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
echo hello world | nc $IP $PORT
# Verify that the network connection worked
echo "Daemon received: $(docker logs $JOB)"
```
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The
[cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c)
and
[namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part)
capabilities of the Linux kernel;
* The [Go](http://golang.org) programming language.
* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
* The [Go](http://golang.org) programming language;
* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
Contributing to Docker
======================
[![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker)
[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker)
Want to hack on Docker? Awesome! There are instructions to get you started on the website: http://docs.docker.io/en/latest/contributing/contributing/
Want to hack on Docker? Awesome! There are instructions to get you
started [here](CONTRIBUTING.md).
They are probably not perfect, please let us know if anything feels
wrong or incomplete.
### Legal
*Brought to you courtesy of our legal counsel. For more context,
please see the Notice document.*
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
They are probably not perfect, please let us know if anything feels wrong or incomplete.
Licensing
=========
Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text.
Note
----
We also keep the documentation in this repository. The website documentation is generated using sphinx using these sources.
Please find it under docs/sources/ and read more about it https://github.com/dotcloud/docker/master/docs/README.md
Please feel free to fix / update the documentation and send us pull requests. More tutorials are also welcome.
Setting up a dev environment
----------------------------
Instructions that have been verified to work on Ubuntu 12.10,
```bash
sudo apt-get -y install lxc wget bsdtar curl golang git
export GOPATH=~/go/
export PATH=$GOPATH/bin:$PATH
mkdir -p $GOPATH/src/github.com/dotcloud
cd $GOPATH/src/github.com/dotcloud
git clone git@github.com:dotcloud/docker.git
cd docker
go get -v github.com/dotcloud/docker/...
go install -v github.com/dotcloud/docker/...
```
Then run the docker daemon,
```bash
sudo $GOPATH/bin/docker -d
```
Run the `go install` command (above) to recompile docker.
What is a Standard Container?
=============================
Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
a format that is self-describing and portable, so that any compliant runtime can run it without extra dependencies, regardless of the underlying machine and the contents of the container.
The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
### 1. STANDARD OPERATIONS
Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
### 2. CONTENT-AGNOSTIC
Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
### 3. INFRASTRUCTURE-AGNOSTIC
Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
### 4. DESIGNED FOR AUTOMATION
Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
### 5. INDUSTRIAL-GRADE DELIVERY
There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
Standard Container Specification
--------------------------------
(TODO)
### Image format
### Standard operations
* Copy
* Run
* Stop
* Wait
* Commit
* Attach standard streams
* List filesystem changes
* ...
### Execution environment
#### Root filesystem
#### Environment variables
#### Process arguments
#### Networking
#### Process namespacing
#### Resource limits
#### Process monitoring
#### Logging
#### Signals
#### Pseudo-terminal allocation
#### Security

71
SPECS/data-volumes.md Normal file
View File

@@ -0,0 +1,71 @@
## Spec for data volumes
Spec owner: Solomon Hykes <solomon@dotcloud.com>
Data volumes (issue #111) are a much-requested feature which trigger much discussion and debate. Below is the current authoritative spec for implementing data volumes.
This spec will be deprecated once the feature is fully implemented.
Discussion, requests, trolls, demands, offerings, threats and other forms of supplications concerning this spec should be addressed to Solomon here: https://github.com/dotcloud/docker/issues/111
### 1. Creating data volumes
At container creation, parts of a container's filesystem can be mounted as separate data volumes. Volumes are defined with the -v flag.
For example:
```bash
$ docker run -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres
```
In this example, a new container is created from the 'postgres' image. At the same time, docker creates 2 new data volumes: one will be mapped to the container at /var/lib/postgres, the other at /var/log.
2 important notes:
1) Volumes don't have top-level names. At no point does the user provide a name, or is a name given to him. Volumes are identified by the path at which they are mounted inside their container.
2) The user doesn't choose the source of the volume. Docker only mounts volumes it created itself, in the same way that it only runs containers that it created itself. That is by design.
### 2. Sharing data volumes
Instead of creating its own volumes, a container can share another container's volumes. For example:
```bash
$ docker run --volumes-from $OTHER_CONTAINER_ID postgres /usr/local/bin/postgres-backup
```
In this example, a new container is created from the 'postgres' example. At the same time, docker will *re-use* the 2 data volumes created in the previous example. One volume will be mounted on the /var/lib/postgres of *both* containers, and the other will be mounted on the /var/log of both containers.
### 3. Under the hood
Docker stores volumes in /var/lib/docker/volumes. Each volume receives a globally unique ID at creation, and is stored at /var/lib/docker/volumes/ID.
At creation, volumes are attached to a single container - the source of truth for this mapping will be the container's configuration.
Mounting a volume consists of calling "mount --bind" from the volume's directory to the appropriate sub-directory of the container mountpoint. This may be done by Docker itself, or farmed out to lxc (which supports mount-binding) if possible.
### 4. Backups, transfers and other volume operations
Volumes sometimes need to be backed up, transfered between hosts, synchronized, etc. These operations typically are application-specific or site-specific, eg. rsync vs. S3 upload vs. replication vs...
Rather than attempting to implement all these scenarios directly, Docker will allow for custom implementations using an extension mechanism.
### 5. Custom volume handlers
Docker allows for arbitrary code to be executed against a container's volumes, to implement any custom action: backup, transfer, synchronization across hosts, etc.
Here's an example:
```bash
$ DB=$(docker run -d -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres)
$ BACKUP_JOB=$(docker run -d --volumes-from $DB shykes/backuper /usr/local/bin/backup-postgres --s3creds=$S3CREDS)
$ docker wait $BACKUP_JOB
```
Congratulations, you just implemented a custom volume handler, using Docker's built-in ability to 1) execute arbitrary code and 2) share volumes between containers.

View File

@@ -1 +0,0 @@
1.3.1

82
Vagrantfile vendored Normal file
View File

@@ -0,0 +1,82 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
def v10(config)
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
# Install ubuntu packaging dependencies and create ubuntu packages
config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker'
end
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
v10(config)
end
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("1") do |config|
v10(config)
end
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
aws.region = "us-east-1"
aws.ami = "ami-d0f89fb9"
aws.ssh_username = "ubuntu"
aws.instance_type = "t1.micro"
end
config.vm.provider :rackspace do |rs|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
rs.flavor = /512MB/
rs.image = /Ubuntu/
end
config.vm.provider :virtualbox do |vb|
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
end
end
Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws, override|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
override.ssh.username = "ubuntu"
aws.region = "us-east-1"
aws.ami = "ami-d0f89fb9"
aws.instance_type = "t1.micro"
end
config.vm.provider :rackspace do |rs|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
rs.flavor = /512MB/
rs.image = /Ubuntu/
end
config.vm.provider :virtualbox do |vb|
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
end
end

View File

@@ -1 +0,0 @@
Victor Vieux <vieux@docker.com> (@vieux)

View File

@@ -1,5 +0,0 @@
This directory contains code pertaining to the Docker API:
- Used by the docker client when communicating with the docker daemon
- Used by third party tools wishing to interface with the docker daemon

View File

@@ -1,19 +0,0 @@
package api
import (
"testing"
)
func TestJsonContentType(t *testing.T) {
if !MatchesContentType("application/json", "application/json") {
t.Fail()
}
if !MatchesContentType("application/json; charset=utf-8", "application/json") {
t.Fail()
}
if MatchesContentType("dockerapplication/json", "application/json") {
t.Fail()
}
}

View File

@@ -1,148 +0,0 @@
package client
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"os"
"reflect"
"strings"
"text/template"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/registry"
"github.com/docker/libtrust"
)
type DockerCli struct {
proto string
addr string
configFile *registry.ConfigFile
in io.ReadCloser
out io.Writer
err io.Writer
key libtrust.PrivateKey
tlsConfig *tls.Config
scheme string
// inFd holds file descriptor of the client's STDIN, if it's a valid file
inFd uintptr
// outFd holds file descriptor of the client's STDOUT, if it's a valid file
outFd uintptr
// isTerminalIn describes if client's STDIN is a TTY
isTerminalIn bool
// isTerminalOut describes if client's STDOUT is a TTY
isTerminalOut bool
}
var funcMap = template.FuncMap{
"json": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}
func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) {
camelArgs := make([]string, len(args))
for i, s := range args {
if len(s) == 0 {
return nil, false
}
camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:])
}
methodName := "Cmd" + strings.Join(camelArgs, "")
method := reflect.ValueOf(cli).MethodByName(methodName)
if !method.IsValid() {
return nil, false
}
return method.Interface().(func(...string) error), true
}
// Cmd executes the specified command
func (cli *DockerCli) Cmd(args ...string) error {
if len(args) > 1 {
method, exists := cli.getMethod(args[:2]...)
if exists {
return method(args[2:]...)
}
}
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Println("Error: Command not found:", args[0])
return cli.CmdHelp(args[1:]...)
}
return method(args[1:]...)
}
return cli.CmdHelp(args...)
}
func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
flags := flag.NewFlagSet(name, flag.ContinueOnError)
flags.Usage = func() {
options := ""
if flags.FlagCountUndeprecated() > 0 {
options = "[OPTIONS] "
}
fmt.Fprintf(cli.err, "\nUsage: docker %s %s%s\n\n%s\n\n", name, options, signature, description)
flags.PrintDefaults()
os.Exit(2)
}
return flags
}
func (cli *DockerCli) LoadConfigFile() (err error) {
cli.configFile, err = registry.LoadConfig(os.Getenv("HOME"))
if err != nil {
fmt.Fprintf(cli.err, "WARNING: %s\n", err)
}
return err
}
func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, proto, addr string, tlsConfig *tls.Config) *DockerCli {
var (
inFd uintptr
outFd uintptr
isTerminalIn = false
isTerminalOut = false
scheme = "http"
)
if tlsConfig != nil {
scheme = "https"
}
if in != nil {
if file, ok := in.(*os.File); ok {
inFd = file.Fd()
isTerminalIn = term.IsTerminal(inFd)
}
}
if out != nil {
if file, ok := out.(*os.File); ok {
outFd = file.Fd()
isTerminalOut = term.IsTerminal(outFd)
}
}
if err == nil {
err = out
}
return &DockerCli{
proto: proto,
addr: addr,
in: in,
out: out,
err: err,
key: key,
inFd: inFd,
outFd: outFd,
isTerminalIn: isTerminalIn,
isTerminalOut: isTerminalOut,
tlsConfig: tlsConfig,
scheme: scheme,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,139 +0,0 @@
package client
import (
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/http/httputil"
"os"
"runtime"
"strings"
"github.com/docker/docker/api"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/log"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/term"
)
func (cli *DockerCli) dial() (net.Conn, error) {
if cli.tlsConfig != nil && cli.proto != "unix" {
return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
}
return net.Dial(cli.proto, cli.addr)
}
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error {
defer func() {
if started != nil {
close(started)
}
}()
params, err := cli.encodeData(data)
if err != nil {
return err
}
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
req.Header.Set("Content-Type", "plain/text")
req.Host = cli.addr
dial, err := cli.dial()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
clientconn.Do(req)
rwc, br := clientconn.Hijack()
defer rwc.Close()
if started != nil {
started <- rwc
}
var receiveStdout chan error
var oldState *term.State
if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" {
oldState, err = term.SetRawTerminal(cli.inFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.inFd, oldState)
}
if stdout != nil || stderr != nil {
receiveStdout = promise.Go(func() (err error) {
defer func() {
if in != nil {
if setRawTerminal && cli.isTerminalIn {
term.RestoreTerminal(cli.inFd, oldState)
}
// For some reason this Close call blocks on darwin..
// As the client exists right after, simply discard the close
// until we find a better solution.
if runtime.GOOS != "darwin" {
in.Close()
}
}
}()
// When TTY is ON, use regular copy
if setRawTerminal && stdout != nil {
_, err = io.Copy(stdout, br)
} else {
_, err = stdcopy.StdCopy(stdout, stderr, br)
}
log.Debugf("[hijack] End of stdout")
return err
})
}
sendStdin := promise.Go(func() error {
if in != nil {
io.Copy(rwc, in)
log.Debugf("[hijack] End of stdin")
}
if tcpc, ok := rwc.(*net.TCPConn); ok {
if err := tcpc.CloseWrite(); err != nil {
log.Debugf("Couldn't send EOF: %s", err)
}
} else if unixc, ok := rwc.(*net.UnixConn); ok {
if err := unixc.CloseWrite(); err != nil {
log.Debugf("Couldn't send EOF: %s", err)
}
}
// Discard errors due to pipe interruption
return nil
})
if stdout != nil || stderr != nil {
if err := <-receiveStdout; err != nil {
log.Debugf("Error receiveStdout: %s", err)
return err
}
}
if !cli.isTerminalIn {
if err := <-sendStdin; err != nil {
log.Debugf("Error sendStdin: %s", err)
return err
}
}
return nil
}

View File

@@ -1,288 +0,0 @@
package client
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
gosignal "os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/engine"
"github.com/docker/docker/pkg/log"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
)
var (
ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
)
func (cli *DockerCli) HTTPClient() *http.Client {
tr := &http.Transport{
TLSClientConfig: cli.tlsConfig,
Dial: func(network, addr string) (net.Conn, error) {
// Why 32? See issue 8035
return net.DialTimeout(cli.proto, cli.addr, 32*time.Second)
},
}
if cli.proto == "unix" {
// XXX workaround for net/http Transport which caches connections, but is
// intended for tcp connections, not unix sockets.
tr.DisableKeepAlives = true
// no need in compressing for local communications
tr.DisableCompression = true
}
return &http.Client{Transport: tr}
}
func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
params := bytes.NewBuffer(nil)
if data != nil {
if env, ok := data.(engine.Env); ok {
if err := env.Encode(params); err != nil {
return nil, err
}
} else {
buf, err := json.Marshal(data)
if err != nil {
return nil, err
}
if _, err := params.Write(buf); err != nil {
return nil, err
}
}
}
return params, nil
}
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
params, err := cli.encodeData(data)
if err != nil {
return nil, -1, err
}
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
if err != nil {
return nil, -1, err
}
if passAuthInfo {
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress())
getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return nil, err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
}
if headers, err := getHeaders(authConfig); err == nil && headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
}
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
req.URL.Host = cli.addr
req.URL.Scheme = cli.scheme
if data != nil {
req.Header.Set("Content-Type", "application/json")
} else if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
resp, err := cli.HTTPClient().Do(req)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, -1, err
}
if len(body) == 0 {
return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL)
}
return nil, resp.StatusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body))
}
return resp.Body, resp.StatusCode, nil
}
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
return cli.streamHelper(method, path, true, in, out, nil, headers)
}
func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error {
if (method == "POST" || method == "PUT") && in == nil {
in = bytes.NewReader([]byte{})
}
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
req.URL.Host = cli.addr
req.URL.Scheme = cli.scheme
if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
if headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
resp, err := cli.HTTPClient().Do(req)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
}
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(body) == 0 {
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
}
return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") {
return utils.DisplayJSONMessagesStream(resp.Body, stdout, cli.outFd, cli.isTerminalOut)
}
if stdout != nil || stderr != nil {
// When TTY is ON, use regular copy
if setRawTerminal {
_, err = io.Copy(stdout, resp.Body)
} else {
_, err = stdcopy.StdCopy(stdout, stderr, resp.Body)
}
log.Debugf("[stream] End of stdout")
return err
}
return nil
}
func (cli *DockerCli) resizeTty(id string, isExec bool) {
height, width := cli.getTtySize()
if height == 0 && width == 0 {
return
}
v := url.Values{}
v.Set("h", strconv.Itoa(height))
v.Set("w", strconv.Itoa(width))
path := ""
if !isExec {
path = "/containers/" + id + "/resize?"
} else {
path = "/exec/" + id + "/resize?"
}
if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, false)); err != nil {
log.Debugf("Error resize: %s", err)
}
}
func waitForExit(cli *DockerCli, containerId string) (int, error) {
stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false)
if err != nil {
return -1, err
}
var out engine.Env
if err := out.Decode(stream); err != nil {
return -1, err
}
return out.GetInt("StatusCode"), nil
}
// getExitCode perform an inspect on the container. It returns
// the running state and the exit code.
func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false)
if err != nil {
// If we can't connect, then the daemon probably died.
if err != ErrConnectionRefused {
return false, -1, err
}
return false, -1, nil
}
var result engine.Env
if err := result.Decode(steam); err != nil {
return false, -1, err
}
state := result.GetSubEnv("State")
return state.GetBool("Running"), state.GetInt("ExitCode"), nil
}
func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
cli.resizeTty(id, isExec)
sigchan := make(chan os.Signal, 1)
gosignal.Notify(sigchan, syscall.SIGWINCH)
go func() {
for _ = range sigchan {
cli.resizeTty(id, isExec)
}
}()
return nil
}
func (cli *DockerCli) getTtySize() (int, int) {
if !cli.isTerminalOut {
return 0, 0
}
ws, err := term.GetWinsize(cli.outFd)
if err != nil {
log.Debugf("Error getting size: %s", err)
if ws == nil {
return 0, 0
}
}
return int(ws.Height), int(ws.Width)
}
func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
if stream != nil {
defer stream.Close()
}
if err != nil {
return nil, statusCode, err
}
body, err := ioutil.ReadAll(stream)
if err != nil {
return nil, -1, err
}
return body, statusCode, nil
}

View File

@@ -1,49 +0,0 @@
package api
import (
"fmt"
"mime"
"strings"
"github.com/docker/docker/engine"
"github.com/docker/docker/pkg/log"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/version"
)
const (
APIVERSION version.Version = "1.15"
DEFAULTHTTPHOST = "127.0.0.1"
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
)
func ValidateHost(val string) (string, error) {
host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
if err != nil {
return val, err
}
return host, nil
}
//TODO remove, used on < 1.5 in getContainersJSON
func DisplayablePorts(ports *engine.Table) string {
result := []string{}
ports.SetKey("PublicPort")
ports.Sort()
for _, port := range ports.Data {
if port.Get("IP") == "" {
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PrivatePort"), port.Get("Type")))
} else {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
}
}
return strings.Join(result, ", ")
}
func MatchesContentType(contentType, expectedType string) bool {
mimetype, _, err := mime.ParseMediaType(contentType)
if err != nil {
log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
}
return err == nil && mimetype == expectedType
}

View File

@@ -1,2 +0,0 @@
Victor Vieux <vieux@docker.com> (@vieux)
Johan Euphrosine <proppy@google.com> (@proppy)

File diff suppressed because it is too large Load Diff

View File

@@ -1,555 +0,0 @@
package server
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"github.com/docker/docker/api"
"github.com/docker/docker/engine"
"github.com/docker/docker/pkg/version"
)
func TestGetBoolParam(t *testing.T) {
if ret, err := getBoolParam("true"); err != nil || !ret {
t.Fatalf("true -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("True"); err != nil || !ret {
t.Fatalf("True -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("1"); err != nil || !ret {
t.Fatalf("1 -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam(""); err != nil || ret {
t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("false"); err != nil || ret {
t.Fatalf("false -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("0"); err != nil || ret {
t.Fatalf("0 -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("faux"); err == nil || ret {
t.Fatalf("faux -> false, err | got %t %s", ret, err)
}
}
func TesthttpError(t *testing.T) {
r := httptest.NewRecorder()
httpError(r, fmt.Errorf("No such method"))
if r.Code != http.StatusNotFound {
t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
}
httpError(r, fmt.Errorf("This accound hasn't been activated"))
if r.Code != http.StatusForbidden {
t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
}
httpError(r, fmt.Errorf("Some error"))
if r.Code != http.StatusInternalServerError {
t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
}
}
func TestGetVersion(t *testing.T) {
eng := engine.New()
var called bool
eng.Register("version", func(job *engine.Job) engine.Status {
called = true
v := &engine.Env{}
v.SetJson("Version", "42.1")
v.Set("ApiVersion", "1.1.1.1.1")
v.Set("GoVersion", "2.42")
v.Set("Os", "Linux")
v.Set("Arch", "x86_64")
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequest("GET", "/version", nil, eng, t)
if !called {
t.Fatalf("handler was not called")
}
v := readEnv(r.Body, t)
if v.Get("Version") != "42.1" {
t.Fatalf("%#v\n", v)
}
if r.HeaderMap.Get("Content-Type") != "application/json" {
t.Fatalf("%#v\n", r)
}
}
func TestGetInfo(t *testing.T) {
eng := engine.New()
var called bool
eng.Register("info", func(job *engine.Job) engine.Status {
called = true
v := &engine.Env{}
v.SetInt("Containers", 1)
v.SetInt("Images", 42000)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequest("GET", "/info", nil, eng, t)
if !called {
t.Fatalf("handler was not called")
}
v := readEnv(r.Body, t)
if v.GetInt("Images") != 42000 {
t.Fatalf("%#v\n", v)
}
if v.GetInt("Containers") != 1 {
t.Fatalf("%#v\n", v)
}
assertContentType(r, "application/json", t)
}
func TestGetImagesJSON(t *testing.T) {
eng := engine.New()
var called bool
eng.Register("images", func(job *engine.Job) engine.Status {
called = true
v := createEnvFromGetImagesJSONStruct(sampleImage)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequest("GET", "/images/json", nil, eng, t)
if !called {
t.Fatal("handler was not called")
}
assertHttpNotError(r, t)
assertContentType(r, "application/json", t)
var observed getImagesJSONStruct
if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(observed, sampleImage) {
t.Errorf("Expected %#v but got %#v", sampleImage, observed)
}
}
func TestGetImagesJSONFilter(t *testing.T) {
eng := engine.New()
filter := "nothing"
eng.Register("images", func(job *engine.Job) engine.Status {
filter = job.Getenv("filter")
return engine.StatusOK
})
serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t)
if filter != "aaaa" {
t.Errorf("%#v", filter)
}
}
func TestGetImagesJSONFilters(t *testing.T) {
eng := engine.New()
filter := "nothing"
eng.Register("images", func(job *engine.Job) engine.Status {
filter = job.Getenv("filters")
return engine.StatusOK
})
serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t)
if filter != "nnnn" {
t.Errorf("%#v", filter)
}
}
func TestGetImagesJSONAll(t *testing.T) {
eng := engine.New()
allFilter := "-1"
eng.Register("images", func(job *engine.Job) engine.Status {
allFilter = job.Getenv("all")
return engine.StatusOK
})
serveRequest("GET", "/images/json?all=1", nil, eng, t)
if allFilter != "1" {
t.Errorf("%#v", allFilter)
}
}
func TestGetImagesJSONLegacyFormat(t *testing.T) {
eng := engine.New()
var called bool
eng.Register("images", func(job *engine.Job) engine.Status {
called = true
outsLegacy := engine.NewTable("Created", 0)
outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage))
if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t)
if !called {
t.Fatal("handler was not called")
}
assertHttpNotError(r, t)
assertContentType(r, "application/json", t)
images := engine.NewTable("Created", 0)
if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil {
t.Fatal(err)
}
if images.Len() != 1 {
t.Fatalf("Expected 1 image, %d found", images.Len())
}
image := images.Data[0]
if image.Get("Tag") != "test-tag" {
t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag"))
}
if image.Get("Repository") != "test-name" {
t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository"))
}
}
func TestGetContainersByName(t *testing.T) {
eng := engine.New()
name := "container_name"
var called bool
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
called = true
if job.Args[0] != name {
t.Errorf("name != '%s': %#v", name, job.Args[0])
}
if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
t.Errorf("dirty env variable not set")
} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
t.Errorf("dirty env variable set when it shouldn't")
}
v := &engine.Env{}
v.SetBool("dirty", true)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t)
if !called {
t.Fatal("handler was not called")
}
assertContentType(r, "application/json", t)
var stdoutJson interface{}
if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
t.Fatalf("%#v", err)
}
if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
t.Fatalf("%#v", stdoutJson)
}
}
func TestGetEvents(t *testing.T) {
eng := engine.New()
var called bool
eng.Register("events", func(job *engine.Job) engine.Status {
called = true
since := job.Getenv("since")
if since != "1" {
t.Fatalf("'since' should be 1, found %#v instead", since)
}
until := job.Getenv("until")
if until != "0" {
t.Fatalf("'until' should be 0, found %#v instead", until)
}
v := &engine.Env{}
v.Set("since", since)
v.Set("until", until)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t)
if !called {
t.Fatal("handler was not called")
}
assertContentType(r, "application/json", t)
var stdout_json struct {
Since int
Until int
}
if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
t.Fatal(err)
}
if stdout_json.Since != 1 {
t.Errorf("since != 1: %#v", stdout_json.Since)
}
if stdout_json.Until != 0 {
t.Errorf("until != 0: %#v", stdout_json.Until)
}
}
func TestLogs(t *testing.T) {
eng := engine.New()
var inspect bool
var logs bool
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
inspect = true
if len(job.Args) == 0 {
t.Fatal("Job arguments is empty")
}
if job.Args[0] != "test" {
t.Fatalf("Container name %s, must be test", job.Args[0])
}
return engine.StatusOK
})
expected := "logs"
eng.Register("logs", func(job *engine.Job) engine.Status {
logs = true
if len(job.Args) == 0 {
t.Fatal("Job arguments is empty")
}
if job.Args[0] != "test" {
t.Fatalf("Container name %s, must be test", job.Args[0])
}
follow := job.Getenv("follow")
if follow != "1" {
t.Fatalf("follow: %s, must be 1", follow)
}
stdout := job.Getenv("stdout")
if stdout != "1" {
t.Fatalf("stdout %s, must be 1", stdout)
}
stderr := job.Getenv("stderr")
if stderr != "" {
t.Fatalf("stderr %s, must be empty", stderr)
}
timestamps := job.Getenv("timestamps")
if timestamps != "1" {
t.Fatalf("timestamps %s, must be 1", timestamps)
}
job.Stdout.Write([]byte(expected))
return engine.StatusOK
})
r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1&timestamps=1", nil, eng, t)
if r.Code != http.StatusOK {
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
}
if !inspect {
t.Fatal("container_inspect job was not called")
}
if !logs {
t.Fatal("logs job was not called")
}
res := r.Body.String()
if res != expected {
t.Fatalf("Output %s, expected %s", res, expected)
}
}
func TestLogsNoStreams(t *testing.T) {
eng := engine.New()
var inspect bool
var logs bool
eng.Register("container_inspect", func(job *engine.Job) engine.Status {
inspect = true
if len(job.Args) == 0 {
t.Fatal("Job arguments is empty")
}
if job.Args[0] != "test" {
t.Fatalf("Container name %s, must be test", job.Args[0])
}
return engine.StatusOK
})
eng.Register("logs", func(job *engine.Job) engine.Status {
logs = true
return engine.StatusOK
})
r := serveRequest("GET", "/containers/test/logs", nil, eng, t)
if r.Code != http.StatusBadRequest {
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest)
}
if inspect {
t.Fatal("container_inspect job was called, but it shouldn't")
}
if logs {
t.Fatal("logs job was called, but it shouldn't")
}
res := strings.TrimSpace(r.Body.String())
expected := "Bad parameters: you must choose at least one stream"
if !strings.Contains(res, expected) {
t.Fatalf("Output %s, expected %s in it", res, expected)
}
}
func TestGetImagesHistory(t *testing.T) {
eng := engine.New()
imageName := "docker-test-image"
var called bool
eng.Register("history", func(job *engine.Job) engine.Status {
called = true
if len(job.Args) == 0 {
t.Fatal("Job arguments is empty")
}
if job.Args[0] != imageName {
t.Fatalf("name != '%s': %#v", imageName, job.Args[0])
}
v := &engine.Env{}
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
if !called {
t.Fatalf("handler was not called")
}
if r.Code != http.StatusOK {
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
}
if r.HeaderMap.Get("Content-Type") != "application/json" {
t.Fatalf("%#v\n", r)
}
}
func TestGetImagesByName(t *testing.T) {
eng := engine.New()
name := "image_name"
var called bool
eng.Register("image_inspect", func(job *engine.Job) engine.Status {
called = true
if job.Args[0] != name {
t.Fatalf("name != '%s': %#v", name, job.Args[0])
}
if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
t.Fatal("dirty env variable not set")
} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
t.Fatal("dirty env variable set when it shouldn't")
}
v := &engine.Env{}
v.SetBool("dirty", true)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
})
r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t)
if !called {
t.Fatal("handler was not called")
}
if r.HeaderMap.Get("Content-Type") != "application/json" {
t.Fatalf("%#v\n", r)
}
var stdoutJson interface{}
if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
t.Fatalf("%#v", err)
}
if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
t.Fatalf("%#v", stdoutJson)
}
}
func TestDeleteContainers(t *testing.T) {
eng := engine.New()
name := "foo"
var called bool
eng.Register("rm", func(job *engine.Job) engine.Status {
called = true
if len(job.Args) == 0 {
t.Fatalf("Job arguments is empty")
}
if job.Args[0] != name {
t.Fatalf("name != '%s': %#v", name, job.Args[0])
}
return engine.StatusOK
})
r := serveRequest("DELETE", "/containers/"+name, nil, eng, t)
if !called {
t.Fatalf("handler was not called")
}
if r.Code != http.StatusNoContent {
t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent)
}
}
func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t)
}
func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
r := httptest.NewRecorder()
req, err := http.NewRequest(method, target, body)
if err != nil {
t.Fatal(err)
}
if err := ServeRequest(eng, version, r, req); err != nil {
t.Fatal(err)
}
return r
}
func readEnv(src io.Reader, t *testing.T) *engine.Env {
out := engine.NewOutput()
v, err := out.AddEnv()
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(out, src); err != nil {
t.Fatal(err)
}
out.Close()
return v
}
func toJson(data interface{}, t *testing.T) io.Reader {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(data); err != nil {
t.Fatal(err)
}
return &buf
}
func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) {
if recorder.HeaderMap.Get("Content-Type") != content_type {
t.Fatalf("%#v\n", recorder)
}
}
// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that
// should die as soon as we converted all integration tests?
// assertHttpNotError expect the given response to not have an error.
// Otherwise the it causes the test to fail.
func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) {
// Non-error http status are [200, 400)
if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
}
}
func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env {
v := &engine.Env{}
v.SetList("RepoTags", data.RepoTags)
v.Set("Id", data.Id)
v.SetInt64("Created", data.Created)
v.SetInt64("Size", data.Size)
v.SetInt64("VirtualSize", data.VirtualSize)
return v
}
type getImagesJSONStruct struct {
RepoTags []string
Id string
Created int64
Size int64
VirtualSize int64
}
var sampleImage getImagesJSONStruct = getImagesJSONStruct{
RepoTags: []string{"test-name:test-tag"},
Id: "ID",
Created: 999,
Size: 777,
VirtualSize: 666,
}

124
archive.go Normal file
View File

@@ -0,0 +1,124 @@
package docker
import (
"errors"
"io"
"io/ioutil"
"os"
"os/exec"
)
type Archive io.Reader
type Compression uint32
const (
Uncompressed Compression = iota
Bzip2
Gzip
Xz
)
func (compression *Compression) Flag() string {
switch *compression {
case Bzip2:
return "j"
case Gzip:
return "z"
case Xz:
return "J"
}
return ""
}
func Tar(path string, compression Compression) (io.Reader, error) {
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-c"+compression.Flag(), ".")
return CmdStream(cmd)
}
func Untar(archive io.Reader, path string) error {
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-x")
cmd.Stdin = archive
output, err := cmd.CombinedOutput()
if err != nil {
return errors.New(err.Error() + ": " + string(output))
}
return nil
}
// CmdStream executes a command, and returns its stdout as a stream.
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr.
func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
pipeR, pipeW := io.Pipe()
errChan := make(chan []byte)
// Collect stderr, we will use it in case of an error
go func() {
errText, e := ioutil.ReadAll(stderr)
if e != nil {
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
}
errChan <- errText
}()
// Copy stdout to the returned pipe
go func() {
_, err := io.Copy(pipeW, stdout)
if err != nil {
pipeW.CloseWithError(err)
}
errText := <-errChan
if err := cmd.Wait(); err != nil {
pipeW.CloseWithError(errors.New(err.Error() + ": " + string(errText)))
} else {
pipeW.Close()
}
}()
// Run the command and return the pipe
if err := cmd.Start(); err != nil {
return nil, err
}
return pipeR, nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
f, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size()
return &TempArchive{f, size}, nil
}
type TempArchive struct {
*os.File
Size int64 // Pre-computed from Stat().Size() as a convenience
}
func (archive *TempArchive) Read(data []byte) (int, error) {
n, err := archive.File.Read(data)
if err != nil {
os.Remove(archive.File.Name())
}
return n, err
}

77
archive_test.go Normal file
View File

@@ -0,0 +1,77 @@
package docker
import (
"io"
"io/ioutil"
"os"
"os/exec"
"testing"
"time"
)
func TestCmdStreamLargeStderr(t *testing.T) {
cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
out, err := CmdStream(cmd)
if err != nil {
t.Fatalf("Failed to start command: " + err.Error())
}
errCh := make(chan error)
go func() {
_, err := io.Copy(ioutil.Discard, out)
errCh <- err
}()
select {
case err := <-errCh:
if err != nil {
t.Fatalf("Command should not have failed (err=%s...)", err.Error()[:100])
}
case <-time.After(5 * time.Second):
t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
}
}
func TestCmdStreamBad(t *testing.T) {
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
out, err := CmdStream(badCmd)
if err != nil {
t.Fatalf("Failed to start command: " + err.Error())
}
if output, err := ioutil.ReadAll(out); err == nil {
t.Fatalf("Command should have failed")
} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
t.Fatalf("Wrong error value (%s)", err.Error())
} else if s := string(output); s != "hello\n" {
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
}
}
func TestCmdStreamGood(t *testing.T) {
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
out, err := CmdStream(cmd)
if err != nil {
t.Fatal(err)
}
if output, err := ioutil.ReadAll(out); err != nil {
t.Fatalf("Command should not have failed (err=%s)", err)
} else if s := string(output); s != "hello\n" {
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
}
}
func TestTarUntar(t *testing.T) {
archive, err := Tar(".", Uncompressed)
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-untar")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
if err := Untar(archive, tmp); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(tmp); err != nil {
t.Fatalf("Error stating %s: %s", tmp, err.Error())
}
}

168
auth/auth.go Normal file
View File

@@ -0,0 +1,168 @@
package auth
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"strings"
)
// Where we store the config file
const CONFIGFILE = ".dockercfg"
// the registry server we want to login against
const REGISTRY_SERVER = "https://registry.docker.io"
type AuthConfig struct {
Username string `json:"username"`
Password string `json:"password"`
Email string `json:"email"`
rootPath string `json:-`
}
func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
return &AuthConfig{
Username: username,
Password: password,
Email: email,
rootPath: rootPath,
}
}
// create a base64 encoded auth string to store in config
func EncodeAuth(authConfig *AuthConfig) string {
authStr := authConfig.Username + ":" + authConfig.Password
msg := []byte(authStr)
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
base64.StdEncoding.Encode(encoded, msg)
return string(encoded)
}
// decode the auth string
func DecodeAuth(authStr string) (*AuthConfig, error) {
decLen := base64.StdEncoding.DecodedLen(len(authStr))
decoded := make([]byte, decLen)
authByte := []byte(authStr)
n, err := base64.StdEncoding.Decode(decoded, authByte)
if err != nil {
return nil, err
}
if n > decLen {
return nil, fmt.Errorf("Something went wrong decoding auth config")
}
arr := strings.Split(string(decoded), ":")
if len(arr) != 2 {
return nil, fmt.Errorf("Invalid auth configuration file")
}
password := strings.Trim(arr[1], "\x00")
return &AuthConfig{Username: arr[0], Password: password}, nil
}
// load up the auth config information and return values
// FIXME: use the internal golang config parser
func LoadConfig(rootPath string) (*AuthConfig, error) {
confFile := path.Join(rootPath, CONFIGFILE)
if _, err := os.Stat(confFile); err != nil {
return &AuthConfig{}, fmt.Errorf("The Auth config file is missing")
}
b, err := ioutil.ReadFile(confFile)
if err != nil {
return nil, err
}
arr := strings.Split(string(b), "\n")
origAuth := strings.Split(arr[0], " = ")
origEmail := strings.Split(arr[1], " = ")
authConfig, err := DecodeAuth(origAuth[1])
if err != nil {
return nil, err
}
authConfig.Email = origEmail[1]
authConfig.rootPath = rootPath
return authConfig, nil
}
// save the auth config
func saveConfig(rootPath, authStr string, email string) error {
lines := "auth = " + authStr + "\n" + "email = " + email + "\n"
b := []byte(lines)
err := ioutil.WriteFile(path.Join(rootPath, CONFIGFILE), b, 0600)
if err != nil {
return err
}
return nil
}
// try to register/login to the registry server
func Login(authConfig *AuthConfig) (string, error) {
storeConfig := false
reqStatusCode := 0
var status string
var errMsg string
var reqBody []byte
jsonBody, err := json.Marshal(authConfig)
if err != nil {
errMsg = fmt.Sprintf("Config Error: %s", err)
return "", errors.New(errMsg)
}
// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
b := strings.NewReader(string(jsonBody))
req1, err := http.Post(REGISTRY_SERVER+"/v1/users", "application/json; charset=utf-8", b)
if err != nil {
errMsg = fmt.Sprintf("Server Error: %s", err)
return "", errors.New(errMsg)
}
reqStatusCode = req1.StatusCode
defer req1.Body.Close()
reqBody, err = ioutil.ReadAll(req1.Body)
if err != nil {
errMsg = fmt.Sprintf("Server Error: [%#v] %s", reqStatusCode, err)
return "", errors.New(errMsg)
}
if reqStatusCode == 201 {
status = "Account Created\n"
storeConfig = true
} else if reqStatusCode == 400 {
// FIXME: This should be 'exists', not 'exist'. Need to change on the server first.
if string(reqBody) == "Username or email already exist" {
client := &http.Client{}
req, err := http.NewRequest("GET", REGISTRY_SERVER+"/v1/users", nil)
req.SetBasicAuth(authConfig.Username, authConfig.Password)
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode == 200 {
status = "Login Succeeded\n"
storeConfig = true
} else {
status = fmt.Sprintf("Login: %s", body)
return "", errors.New(status)
}
} else {
status = fmt.Sprintf("Registration: %s", reqBody)
return "", errors.New(status)
}
} else {
status = fmt.Sprintf("[%s] : %s", reqStatusCode, reqBody)
return "", errors.New(status)
}
if storeConfig {
authStr := EncodeAuth(authConfig)
saveConfig(authConfig.rootPath, authStr, authConfig.Email)
}
return status, nil
}

23
auth/auth_test.go Normal file
View File

@@ -0,0 +1,23 @@
package auth
import (
"testing"
)
func TestEncodeAuth(t *testing.T) {
newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
authStr := EncodeAuth(newAuthConfig)
decAuthConfig, err := DecodeAuth(authStr)
if err != nil {
t.Fatal(err)
}
if newAuthConfig.Username != decAuthConfig.Username {
t.Fatal("Encode Username doesn't match decoded Username")
}
if newAuthConfig.Password != decAuthConfig.Password {
t.Fatal("Encode Password doesn't match decoded Password")
}
if authStr != "a2VuOnRlc3Q=" {
t.Fatal("AuthString encoding isn't correct.")
}
}

20
buildbot/README.rst Normal file
View File

@@ -0,0 +1,20 @@
Buildbot
========
Buildbot is a continuous integration system designed to automate the
build/test cycle. By automatically rebuilding and testing the tree each time
something has changed, build problems are pinpointed quickly, before other
developers are inconvenienced by the failure.
When running 'make hack' at the docker root directory, it spawns a virtual
machine in the background running a buildbot instance and adds a git
post-commit hook that automatically run docker tests for you.
You can check your buildbot instance at http://192.168.33.21:8010/waterfall
Buildbot dependencies
---------------------
vagrant, virtualbox packages and python package requests

28
buildbot/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,28 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
$BUILDBOT_IP = '192.168.33.21'
def v10(config)
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..'
config.vm.network :hostonly, $BUILDBOT_IP
# Ensure puppet is installed on the instance
config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet'
config.vm.provision :puppet do |puppet|
puppet.manifests_path = '.'
puppet.manifest_file = 'buildbot.pp'
puppet.options = ['--templatedir','.']
end
end
Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
v10(config)
end
Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
v10(config)
end

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Auto setup of buildbot configuration. Package installation is being done
# on buildbot.pp
# Dependencies: buildbot, buildbot-slave, supervisor
SLAVE_NAME='buildworker'
SLAVE_SOCKET='localhost:9989'
BUILDBOT_PWD='pass-docker'
USER='vagrant'
ROOT_PATH='/data/buildbot'
DOCKER_PATH='/data/docker'
BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg"
IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }')
function run { su $USER -c "$1"; }
export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin
# Exit if buildbot has already been installed
[ -d "$ROOT_PATH" ] && exit 0
# Setup buildbot
run "mkdir -p ${ROOT_PATH}"
cd ${ROOT_PATH}
run "buildbot create-master master"
run "cp $BUILDBOT_CFG/master.cfg master"
run "sed -i 's/localhost/$IP/' master/master.cfg"
run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
# Allow buildbot subprocesses (docker tests) to properly run in containers,
# in particular with docker -u
run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac"
# Setup supervisor
cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"`
# Add git hook
cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks
sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit

View File

@@ -0,0 +1,18 @@
[program:buildmaster]
command=su vagrant -c "buildbot start master"
directory=/data/buildbot
chown= root:root
redirect_stderr=true
stdout_logfile=/var/log/supervisor/buildbot-master.log
stderr_logfile=/var/log/supervisor/buildbot-master.log
[program:buildworker]
command=buildslave start slave
directory=/data/buildbot
chown= root:root
redirect_stderr=true
stdout_logfile=/var/log/supervisor/buildbot-slave.log
stderr_logfile=/var/log/supervisor/buildbot-slave.log
[group:buildbot]
programs=buildmaster,buildworker

View File

@@ -0,0 +1,46 @@
import os
from buildbot.buildslave import BuildSlave
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
from buildbot.steps.shell import ShellCommand
from buildbot.status import html
from buildbot.status.web import authz, auth
PORT_WEB = 8010 # Buildbot webserver port
PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
TEST_USER = 'buildbot' # Credential to authenticate build triggers
TEST_PWD = 'docker' # Credential to authenticate build triggers
BUILDER_NAME = 'docker'
BUILDPASSWORD = 'pass-docker' # Credential to authenticate buildworkers
DOCKER_PATH = '/data/docker'
c = BuildmasterConfig = {}
c['title'] = "Docker"
c['titleURL'] = "waterfall"
c['buildbotURL'] = "http://localhost:{0}/".format(PORT_WEB)
c['db'] = {'db_url':"sqlite:///state.sqlite"}
c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)]
c['slavePortnum'] = PORT_MASTER
c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
# Docker test command
test_cmd = """(
cd {0}/..; rm -rf docker-tmp; git clone docker docker-tmp;
cd docker-tmp; make test; exit_status=$?;
cd ..; rm -rf docker-tmp; exit $exit_status)""".format(DOCKER_PATH)
# Builder
factory = BuildFactory()
factory.addStep(ShellCommand(description='Docker',logEnviron=False,
usePTY=True,command=test_cmd))
c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
factory=factory)]
# Status
authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]),
forceBuild='auth')
c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env python
'''Trigger buildbot docker test build
post-commit git hook designed to automatically trigger buildbot on
the provided vagrant docker VM.'''
import requests
USERNAME = 'buildbot'
PASSWORD = 'docker'
BASE_URL = 'http://localhost:8010'
path = lambda s: BASE_URL + '/' + s
try:
session = requests.session()
session.post(path('login'),data={'username':USERNAME,'passwd':PASSWORD})
session.post(path('builders/docker/force'),
data={'forcescheduler':'trigger','reason':'Test commit'})
except:
pass

32
buildbot/buildbot.pp Normal file
View File

@@ -0,0 +1,32 @@
node default {
$USER = 'vagrant'
$ROOT_PATH = '/data/buildbot'
$DOCKER_PATH = '/data/docker'
exec {'apt_update': command => '/usr/bin/apt-get update' }
Package { require => Exec['apt_update'] }
group {'puppet': ensure => 'present'}
# Install dependencies
Package { ensure => 'installed' }
package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: }
file{[ '/data' ]:
owner => $USER, group => $USER, ensure => 'directory' }
file {'/var/tmp/requirements.txt':
content => template('requirements.txt') }
exec {'requirements':
require => [ Package['python-dev'], Package['python-pip'],
File['/var/tmp/requirements.txt'] ],
cwd => '/var/tmp',
command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt;
rm /var/tmp/requirements.txt)'" }
exec {'buildbot-cfg-sh':
require => [ Package['supervisor'], Exec['requirements']],
path => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin',
cwd => '/data',
command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" }
}

View File

@@ -0,0 +1,6 @@
sqlalchemy<=0.7.9
sqlalchemy-migrate>=0.7.2
buildbot==0.8.7p1
buildbot_slave==0.8.7p1
nose==1.2.1
requests==1.1.0

169
builder.go Normal file
View File

@@ -0,0 +1,169 @@
package docker
import (
"bufio"
"fmt"
"io"
"strings"
)
type Builder struct {
runtime *Runtime
}
func NewBuilder(runtime *Runtime) *Builder {
return &Builder{
runtime: runtime,
}
}
func (builder *Builder) Run(image *Image, cmd ...string) (*Container, error) {
// FIXME: pass a NopWriter instead of nil
config, err := ParseRun(append([]string{"-d", image.Id}, cmd...), nil, builder.runtime.capabilities)
if config.Image == "" {
return nil, fmt.Errorf("Image not specified")
}
if len(config.Cmd) == 0 {
return nil, fmt.Errorf("Command not specified")
}
if config.Tty {
return nil, fmt.Errorf("The tty mode is not supported within the builder")
}
// Create new container
container, err := builder.runtime.Create(config)
if err != nil {
return nil, err
}
if err := container.Start(); err != nil {
return nil, err
}
return container, nil
}
func (builder *Builder) Commit(container *Container, repository, tag, comment, author string) (*Image, error) {
return builder.runtime.Commit(container.Id, repository, tag, comment, author)
}
func (builder *Builder) clearTmp(containers, images map[string]struct{}) {
for c := range containers {
tmp := builder.runtime.Get(c)
builder.runtime.Destroy(tmp)
Debugf("Removing container %s", c)
}
for i := range images {
builder.runtime.graph.Delete(i)
Debugf("Removing image %s", i)
}
}
func (builder *Builder) Build(dockerfile io.Reader, stdout io.Writer) error {
var (
image, base *Image
tmpContainers map[string]struct{} = make(map[string]struct{})
tmpImages map[string]struct{} = make(map[string]struct{})
)
defer builder.clearTmp(tmpContainers, tmpImages)
file := bufio.NewReader(dockerfile)
for {
line, err := file.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
return err
}
line = strings.TrimSpace(line)
// Skip comments and empty line
if len(line) == 0 || line[0] == '#' {
continue
}
tmp := strings.SplitN(line, " ", 2)
if len(tmp) != 2 {
return fmt.Errorf("Invalid Dockerfile format")
}
switch tmp[0] {
case "from":
fmt.Fprintf(stdout, "FROM %s\n", tmp[1])
image, err = builder.runtime.repositories.LookupImage(tmp[1])
if err != nil {
return err
}
break
case "run":
fmt.Fprintf(stdout, "RUN %s\n", tmp[1])
if image == nil {
return fmt.Errorf("Please provide a source image with `from` prior to run")
}
// Create the container and start it
c, err := builder.Run(image, "/bin/sh", "-c", tmp[1])
if err != nil {
return err
}
tmpContainers[c.Id] = struct{}{}
// Wait for it to finish
if result := c.Wait(); result != 0 {
return fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", tmp[1], result)
}
// Commit the container
base, err = builder.Commit(c, "", "", "", "")
if err != nil {
return err
}
tmpImages[base.Id] = struct{}{}
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
break
case "copy":
if image == nil {
return fmt.Errorf("Please provide a source image with `from` prior to copy")
}
tmp2 := strings.SplitN(tmp[1], " ", 2)
if len(tmp) != 2 {
return fmt.Errorf("Invalid COPY format")
}
fmt.Fprintf(stdout, "COPY %s to %s in %s\n", tmp2[0], tmp2[1], base.ShortId())
file, err := Download(tmp2[0], stdout)
if err != nil {
return err
}
defer file.Body.Close()
c, err := builder.Run(base, "echo", "insert", tmp2[0], tmp2[1])
if err != nil {
return err
}
if err := c.Inject(file.Body, tmp2[1]); err != nil {
return err
}
base, err = builder.Commit(c, "", "", "", "")
if err != nil {
return err
}
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
break
default:
fmt.Fprintf(stdout, "Skipping unknown op %s\n", tmp[0])
}
}
if base != nil {
// The build is successful, keep the temporary containers and images
for i := range tmpImages {
delete(tmpImages, i)
}
for i := range tmpContainers {
delete(tmpContainers, i)
}
fmt.Fprintf(stdout, "Build finished. image id: %s\n", base.ShortId())
} else {
fmt.Fprintf(stdout, "An error occured during the build\n")
}
return nil
}

View File

@@ -1,2 +0,0 @@
Tibor Vass <teabee89@gmail.com> (@tiborvass)
Erik Hollensbe <github@hollensbe.org> (@erikh)

View File

@@ -1,353 +0,0 @@
package builder
// This file contains the dispatchers for each command. Note that
// `nullDispatch` is not actually a command, but support for commands we parse
// but do nothing with.
//
// See evaluator.go for a higher level discussion of the whole evaluator
// package.
import (
"fmt"
"io/ioutil"
"path/filepath"
"regexp"
"strings"
"github.com/docker/docker/nat"
"github.com/docker/docker/pkg/log"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/runconfig"
)
// dispatch with no layer / parsing. This is effectively not a command.
func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error {
return nil
}
// ENV foo bar
//
// Sets the environment variable foo to bar, also makes interpolation
// in the dockerfile available from the next statement on via ${foo}.
//
func env(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 2 {
return fmt.Errorf("ENV accepts two arguments")
}
fullEnv := fmt.Sprintf("%s=%s", args[0], args[1])
for i, envVar := range b.Config.Env {
envParts := strings.SplitN(envVar, "=", 2)
if args[0] == envParts[0] {
b.Config.Env[i] = fullEnv
return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv))
}
}
b.Config.Env = append(b.Config.Env, fullEnv)
return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv))
}
// MAINTAINER some text <maybe@an.email.address>
//
// Sets the maintainer metadata.
func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return fmt.Errorf("MAINTAINER requires only one argument")
}
b.maintainer = args[0]
return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
}
// ADD foo /path
//
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
// exist here. If you do not wish to have this automatic handling, use COPY.
//
func add(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) < 2 {
return fmt.Errorf("ADD requires at least two arguments")
}
return b.runContextCommand(args, true, true, "ADD")
}
// COPY foo /path
//
// Same as 'ADD' but without the tar and remote url handling.
//
func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) < 2 {
return fmt.Errorf("COPY requires at least two arguments")
}
return b.runContextCommand(args, false, false, "COPY")
}
// FROM imagename
//
// This sets the image the dockerfile will build on top of.
//
func from(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return fmt.Errorf("FROM requires one argument")
}
name := args[0]
image, err := b.Daemon.Repositories().LookupImage(name)
if err != nil {
if b.Daemon.Graph().IsNotExist(err) {
image, err = b.pullImage(name)
}
// note that the top level err will still be !nil here if IsNotExist is
// not the error. This approach just simplifies hte logic a bit.
if err != nil {
return err
}
}
return b.processImageFrom(image)
}
// ONBUILD RUN echo yo
//
// ONBUILD triggers run when the image is used in a FROM statement.
//
// ONBUILD handling has a lot of special-case functionality, the heading in
// evaluator.go and comments around dispatch() in the same file explain the
// special cases. search for 'OnBuild' in internals.go for additional special
// cases.
//
func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error {
triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
switch triggerInstruction {
case "ONBUILD":
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
case "MAINTAINER", "FROM":
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
}
original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "")
b.Config.OnBuild = append(b.Config.OnBuild, original)
return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
}
// WORKDIR /tmp
//
// Set the working directory for future RUN/CMD/etc statements.
//
func workdir(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return fmt.Errorf("WORKDIR requires exactly one argument")
}
workdir := args[0]
if workdir[0] == '/' {
b.Config.WorkingDir = workdir
} else {
if b.Config.WorkingDir == "" {
b.Config.WorkingDir = "/"
}
b.Config.WorkingDir = filepath.Join(b.Config.WorkingDir, workdir)
}
return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
}
// RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
// 'sh -c' in the event there is only one argument. The difference in
// processing:
//
// RUN echo hi # sh -c echo hi
// RUN [ "echo", "hi" ] # echo hi
//
func run(b *Builder, args []string, attributes map[string]bool, original string) error {
if b.image == "" {
return fmt.Errorf("Please provide a source image with `from` prior to run")
}
args = handleJsonArgs(args, attributes)
if len(args) == 1 {
args = append([]string{"/bin/sh", "-c"}, args[0])
}
runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
runCmd.SetOutput(ioutil.Discard)
runCmd.Usage = nil
config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...), nil)
if err != nil {
return err
}
cmd := b.Config.Cmd
// set Cmd manually, this is special case only for Dockerfiles
b.Config.Cmd = config.Cmd
runconfig.Merge(b.Config, config)
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
log.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
hit, err := b.probeCache()
if err != nil {
return err
}
if hit {
return nil
}
c, err := b.create()
if err != nil {
return err
}
// Ensure that we keep the container mounted until the commit
// to avoid unmounting and then mounting directly again
c.Mount()
defer c.Unmount()
err = b.run(c)
if err != nil {
return err
}
if err := b.commit(c.ID, cmd, "run"); err != nil {
return err
}
return nil
}
// CMD foo
//
// Set the default command to run in the container (which may be empty).
// Argument handling is the same as RUN.
//
func cmd(b *Builder, args []string, attributes map[string]bool, original string) error {
b.Config.Cmd = handleJsonArgs(args, attributes)
if !attributes["json"] {
b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...)
}
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", b.Config.Cmd)); err != nil {
return err
}
if len(args) != 0 {
b.cmdSet = true
}
return nil
}
// ENTRYPOINT /usr/sbin/nginx
//
// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will
// accept the CMD as the arguments to /usr/sbin/nginx.
//
// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
// is initialized at NewBuilder time instead of through argument parsing.
//
func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error {
parsed := handleJsonArgs(args, attributes)
switch {
case attributes["json"]:
// ENTRYPOINT ["echo", "hi"]
b.Config.Entrypoint = parsed
case len(parsed) == 0:
// ENTRYPOINT []
b.Config.Entrypoint = nil
default:
// ENTRYPOINT echo hi
b.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]}
}
// when setting the entrypoint if a CMD was not explicitly set then
// set the command to nil
if !b.cmdSet {
b.Config.Cmd = nil
}
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", b.Config.Entrypoint)); err != nil {
return err
}
return nil
}
// EXPOSE 6667/tcp 7000/tcp
//
// Expose ports for links and port mappings. This all ends up in
// b.Config.ExposedPorts for runconfig.
//
func expose(b *Builder, args []string, attributes map[string]bool, original string) error {
portsTab := args
if b.Config.ExposedPorts == nil {
b.Config.ExposedPorts = make(nat.PortSet)
}
ports, _, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...))
if err != nil {
return err
}
for port := range ports {
if _, exists := b.Config.ExposedPorts[port]; !exists {
b.Config.ExposedPorts[port] = struct{}{}
}
}
b.Config.PortSpecs = nil
return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
}
// USER foo
//
// Set the user to 'foo' for future commands and when running the
// ENTRYPOINT/CMD at container run time.
//
func user(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) != 1 {
return fmt.Errorf("USER requires exactly one argument")
}
b.Config.User = args[0]
return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
}
// VOLUME /foo
//
// Expose the volume /foo for use. Will also accept the JSON array form.
//
func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
if len(args) == 0 {
return fmt.Errorf("Volume cannot be empty")
}
if b.Config.Volumes == nil {
b.Config.Volumes = map[string]struct{}{}
}
for _, v := range args {
b.Config.Volumes[v] = struct{}{}
}
if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
return err
}
return nil
}
// INSERT is no longer accepted, but we still parse it.
func insert(b *Builder, args []string, attributes map[string]bool, original string) error {
return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
}

View File

@@ -1,236 +0,0 @@
// builder is the evaluation step in the Dockerfile parse/evaluate pipeline.
//
// It incorporates a dispatch table based on the parser.Node values (see the
// parser package for more information) that are yielded from the parser itself.
// Calling NewBuilder with the BuildOpts struct can be used to customize the
// experience for execution purposes only. Parsing is controlled in the parser
// package, and this division of resposibility should be respected.
//
// Please see the jump table targets for the actual invocations, most of which
// will call out to the functions in internals.go to deal with their tasks.
//
// ONBUILD is a special case, which is covered in the onbuild() func in
// dispatchers.go.
//
// The evaluator uses the concept of "steps", which are usually each processable
// line in the Dockerfile. Each step is numbered and certain actions are taken
// before and after each step, such as creating an image ID and removing temporary
// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which
// includes its own set of steps (usually only one of them).
package builder
import (
"errors"
"fmt"
"io"
"os"
"path"
"strings"
"github.com/docker/docker/builder/parser"
"github.com/docker/docker/daemon"
"github.com/docker/docker/engine"
"github.com/docker/docker/pkg/log"
"github.com/docker/docker/pkg/tarsum"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
)
var (
ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
)
// Environment variable interpolation will happen on these statements only.
var replaceEnvAllowed = map[string]struct{}{
"env": {},
"add": {},
"copy": {},
"workdir": {},
"expose": {},
"volume": {},
"user": {},
}
var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error
func init() {
evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{
"env": env,
"maintainer": maintainer,
"add": add,
"copy": dispatchCopy, // copy() is a go builtin
"from": from,
"onbuild": onbuild,
"workdir": workdir,
"run": run,
"cmd": cmd,
"entrypoint": entrypoint,
"expose": expose,
"volume": volume,
"user": user,
"insert": insert,
}
}
// internal struct, used to maintain configuration of the Dockerfile's
// processing as it evaluates the parsing result.
type Builder struct {
Daemon *daemon.Daemon
Engine *engine.Engine
// effectively stdio for the run. Because it is not stdio, I said
// "Effectively". Do not use stdio anywhere in this package for any reason.
OutStream io.Writer
ErrStream io.Writer
Verbose bool
UtilizeCache bool
// controls how images and containers are handled between steps.
Remove bool
ForceRemove bool
AuthConfig *registry.AuthConfig
AuthConfigFile *registry.ConfigFile
// Deprecated, original writer used for ImagePull. To be removed.
OutOld io.Writer
StreamFormatter *utils.StreamFormatter
Config *runconfig.Config // runconfig for cmd, run, entrypoint etc.
// both of these are controlled by the Remove and ForceRemove options in BuildOpts
TmpContainers map[string]struct{} // a map of containers used for removes
dockerfile *parser.Node // the syntax tree of the dockerfile
image string // image name for commit processing
maintainer string // maintainer name. could probably be removed.
cmdSet bool // indicates is CMD was set in current Dockerfile
context tarsum.TarSum // the context is a tarball that is uploaded by the client
contextPath string // the path of the temporary directory the local context is unpacked to (server side)
}
// Run the builder with the context. This is the lynchpin of this package. This
// will (barring errors):
//
// * call readContext() which will set up the temporary directory and unpack
// the context into it.
// * read the dockerfile
// * parse the dockerfile
// * walk the parse tree and execute it by dispatching to handlers. If Remove
// or ForceRemove is set, additional cleanup around containers happens after
// processing.
// * Print a happy message and return the image ID.
//
func (b *Builder) Run(context io.Reader) (string, error) {
if err := b.readContext(context); err != nil {
return "", err
}
defer func() {
if err := os.RemoveAll(b.contextPath); err != nil {
log.Debugf("[BUILDER] failed to remove temporary context: %s", err)
}
}()
filename := path.Join(b.contextPath, "Dockerfile")
fi, err := os.Stat(filename)
if os.IsNotExist(err) {
return "", fmt.Errorf("Cannot build a directory without a Dockerfile")
}
if fi.Size() == 0 {
return "", ErrDockerfileEmpty
}
f, err := os.Open(filename)
if err != nil {
return "", err
}
defer f.Close()
ast, err := parser.Parse(f)
if err != nil {
return "", err
}
b.dockerfile = ast
// some initializations that would not have been supplied by the caller.
b.Config = &runconfig.Config{}
b.TmpContainers = map[string]struct{}{}
for i, n := range b.dockerfile.Children {
if err := b.dispatch(i, n); err != nil {
if b.ForceRemove {
b.clearTmp()
}
return "", err
}
fmt.Fprintf(b.OutStream, " ---> %s\n", utils.TruncateID(b.image))
if b.Remove {
b.clearTmp()
}
}
if b.image == "" {
return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n")
}
fmt.Fprintf(b.OutStream, "Successfully built %s\n", utils.TruncateID(b.image))
return b.image, nil
}
// This method is the entrypoint to all statement handling routines.
//
// Almost all nodes will have this structure:
// Child[Node, Node, Node] where Child is from parser.Node.Children and each
// node comes from parser.Node.Next. This forms a "line" with a statement and
// arguments and we process them in this normalized form by hitting
// evaluateTable with the leaf nodes of the command and the Builder object.
//
// ONBUILD is a special case; in this case the parser will emit:
// Child[Node, Child[Node, Node...]] where the first node is the literal
// "onbuild" and the child entrypoint is the command of the ONBUILD statmeent,
// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
// deal with that, at least until it becomes more of a general concern with new
// features.
func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
cmd := ast.Value
attrs := ast.Attributes
original := ast.Original
strs := []string{}
msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
if cmd == "onbuild" {
ast = ast.Next.Children[0]
strs = append(strs, ast.Value)
msg += " " + ast.Value
}
for ast.Next != nil {
ast = ast.Next
var str string
str = ast.Value
if _, ok := replaceEnvAllowed[cmd]; ok {
str = b.replaceEnv(ast.Value)
}
strs = append(strs, str)
msg += " " + ast.Value
}
fmt.Fprintln(b.OutStream, msg)
// XXX yes, we skip any cmds that are not valid; the parser should have
// picked these out already.
if f, ok := evaluateTable[cmd]; ok {
return f(b, strs, attrs, original)
}
fmt.Fprintf(b.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd))
return nil
}

View File

@@ -1,687 +0,0 @@
package builder
// internals for handling commands. Covers many areas and a lot of
// non-contiguous functionality. Please read the comments.
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/docker/docker/builder/parser"
"github.com/docker/docker/daemon"
imagepkg "github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/log"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/tarsum"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
)
func (b *Builder) readContext(context io.Reader) error {
tmpdirPath, err := ioutil.TempDir("", "docker-build")
if err != nil {
return err
}
decompressedStream, err := archive.DecompressStream(context)
if err != nil {
return err
}
if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
return err
}
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
return err
}
b.contextPath = tmpdirPath
return nil
}
func (b *Builder) commit(id string, autoCmd []string, comment string) error {
if b.image == "" {
return fmt.Errorf("Please provide a source image with `from` prior to commit")
}
b.Config.Image = b.image
if id == "" {
cmd := b.Config.Cmd
b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
hit, err := b.probeCache()
if err != nil {
return err
}
if hit {
return nil
}
container, err := b.create()
if err != nil {
return err
}
id = container.ID
if err := container.Mount(); err != nil {
return err
}
defer container.Unmount()
}
container := b.Daemon.Get(id)
if container == nil {
return fmt.Errorf("An error occured while creating the container")
}
// Note: Actually copy the struct
autoConfig := *b.Config
autoConfig.Cmd = autoCmd
// Commit the container
image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
if err != nil {
return err
}
b.image = image.ID
return nil
}
type copyInfo struct {
origPath string
destPath string
hash string
decompress bool
tmpDir string
}
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
if b.context == nil {
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
}
if len(args) < 2 {
return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
}
dest := args[len(args)-1] // last one is always the dest
copyInfos := []*copyInfo{}
b.Config.Image = b.image
defer func() {
for _, ci := range copyInfos {
if ci.tmpDir != "" {
os.RemoveAll(ci.tmpDir)
}
}
}()
// Loop through each src file and calculate the info we need to
// do the copy (e.g. hash value if cached). Don't actually do
// the copy until we've looked at all src files
for _, orig := range args[0 : len(args)-1] {
err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
if err != nil {
return err
}
}
if len(copyInfos) == 0 {
return fmt.Errorf("No source files were specified")
}
if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
}
// For backwards compat, if there's just one CI then use it as the
// cache look-up string, otherwise hash 'em all into one
var srcHash string
var origPaths string
if len(copyInfos) == 1 {
srcHash = copyInfos[0].hash
origPaths = copyInfos[0].origPath
} else {
var hashs []string
var origs []string
for _, ci := range copyInfos {
hashs = append(hashs, ci.hash)
origs = append(origs, ci.origPath)
}
hasher := sha256.New()
hasher.Write([]byte(strings.Join(hashs, ",")))
srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
origPaths = strings.Join(origs, " ")
}
cmd := b.Config.Cmd
b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
hit, err := b.probeCache()
if err != nil {
return err
}
// If we do not have at least one hash, never use the cache
if hit && b.UtilizeCache {
return nil
}
container, _, err := b.Daemon.Create(b.Config, nil, "")
if err != nil {
return err
}
b.TmpContainers[container.ID] = struct{}{}
if err := container.Mount(); err != nil {
return err
}
defer container.Unmount()
for _, ci := range copyInfos {
if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
return err
}
}
if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
return err
}
return nil
}
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
origPath = origPath[1:]
}
origPath = strings.TrimPrefix(origPath, "./")
// In the remote/URL case, download it and gen its hashcode
if utils.IsURL(origPath) {
if !allowRemote {
return fmt.Errorf("Source can't be a URL for %s", cmdName)
}
ci := copyInfo{}
ci.origPath = origPath
ci.hash = origPath // default to this but can change
ci.destPath = destPath
ci.decompress = false
*cInfos = append(*cInfos, &ci)
// Initiate the download
resp, err := utils.Download(ci.origPath)
if err != nil {
return err
}
// Create a tmp dir
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
if err != nil {
return err
}
ci.tmpDir = tmpDirName
// Create a tmp file within our tmp dir
tmpFileName := path.Join(tmpDirName, "tmp")
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
// Download and dump result to tmp file
if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
tmpFile.Close()
return err
}
fmt.Fprintf(b.OutStream, "\n")
tmpFile.Close()
// Remove the mtime of the newly created tmp file
if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
return err
}
ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
// If the destination is a directory, figure out the filename.
if strings.HasSuffix(ci.destPath, "/") {
u, err := url.Parse(origPath)
if err != nil {
return err
}
path := u.Path
if strings.HasSuffix(path, "/") {
path = path[:len(path)-1]
}
parts := strings.Split(path, "/")
filename := parts[len(parts)-1]
if filename == "" {
return fmt.Errorf("cannot determine filename from url: %s", u)
}
ci.destPath = ci.destPath + filename
}
// Calc the checksum, only if we're using the cache
if b.UtilizeCache {
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
if err != nil {
return err
}
tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
if err != nil {
return err
}
if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
return err
}
ci.hash = tarSum.Sum(nil)
r.Close()
}
return nil
}
// Deal with wildcards
if ContainsWildcards(origPath) {
for _, fileInfo := range b.context.GetSums() {
if fileInfo.Name() == "" {
continue
}
match, _ := path.Match(origPath, fileInfo.Name())
if !match {
continue
}
calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
}
return nil
}
// Must be a dir or a file
if err := b.checkPathForAddition(origPath); err != nil {
return err
}
fi, _ := os.Stat(path.Join(b.contextPath, origPath))
ci := copyInfo{}
ci.origPath = origPath
ci.hash = origPath
ci.destPath = destPath
ci.decompress = allowDecompression
*cInfos = append(*cInfos, &ci)
// If not using cache don't need to do anything else.
// If we are using a cache then calc the hash for the src file/dir
if !b.UtilizeCache {
return nil
}
// Deal with the single file case
if !fi.IsDir() {
// This will match first file in sums of the archive
fis := b.context.GetSums().GetFile(ci.origPath)
if fis != nil {
ci.hash = "file:" + fis.Sum()
}
return nil
}
// Must be a dir
var subfiles []string
absOrigPath := path.Join(b.contextPath, ci.origPath)
// Add a trailing / to make sure we only pick up nested files under
// the dir and not sibling files of the dir that just happen to
// start with the same chars
if !strings.HasSuffix(absOrigPath, "/") {
absOrigPath += "/"
}
// Need path w/o / too to find matching dir w/o trailing /
absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
for _, fileInfo := range b.context.GetSums() {
absFile := path.Join(b.contextPath, fileInfo.Name())
if strings.HasPrefix(absFile, absOrigPath) || absFile == absOrigPathNoSlash {
subfiles = append(subfiles, fileInfo.Sum())
}
}
sort.Strings(subfiles)
hasher := sha256.New()
hasher.Write([]byte(strings.Join(subfiles, ",")))
ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
return nil
}
func ContainsWildcards(name string) bool {
for i := 0; i < len(name); i++ {
ch := name[i]
if ch == '\\' {
i++
} else if ch == '*' || ch == '?' || ch == '[' {
return true
}
}
return false
}
func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
remote, tag := parsers.ParseRepositoryTag(name)
if tag == "" {
tag = "latest"
}
pullRegistryAuth := b.AuthConfig
if len(b.AuthConfigFile.Configs) > 0 {
// The request came with a full auth config file, we prefer to use that
endpoint, _, err := registry.ResolveRepositoryName(remote)
if err != nil {
return nil, err
}
resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint)
pullRegistryAuth = &resolvedAuth
}
job := b.Engine.Job("pull", remote, tag)
job.SetenvBool("json", b.StreamFormatter.Json())
job.SetenvBool("parallel", true)
job.SetenvJson("authConfig", pullRegistryAuth)
job.Stdout.Add(b.OutOld)
if err := job.Run(); err != nil {
return nil, err
}
image, err := b.Daemon.Repositories().LookupImage(name)
if err != nil {
return nil, err
}
return image, nil
}
func (b *Builder) processImageFrom(img *imagepkg.Image) error {
b.image = img.ID
if img.Config != nil {
b.Config = img.Config
}
if len(b.Config.Env) == 0 {
b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
}
// Process ONBUILD triggers if they exist
if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
}
// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
onBuildTriggers := b.Config.OnBuild
b.Config.OnBuild = []string{}
// parse the ONBUILD triggers by invoking the parser
for stepN, step := range onBuildTriggers {
ast, err := parser.Parse(strings.NewReader(step))
if err != nil {
return err
}
for i, n := range ast.Children {
switch strings.ToUpper(n.Value) {
case "ONBUILD":
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
case "MAINTAINER", "FROM":
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
}
fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
if err := b.dispatch(i, n); err != nil {
return err
}
}
}
return nil
}
// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
// and if so attempts to look up the current `b.image` and `b.Config` pair
// in the current server `b.Daemon`. If an image is found, probeCache returns
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
// is any error, it returns `(false, err)`.
func (b *Builder) probeCache() (bool, error) {
if b.UtilizeCache {
if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil {
return false, err
} else if cache != nil {
fmt.Fprintf(b.OutStream, " ---> Using cache\n")
log.Debugf("[BUILDER] Use cached version")
b.image = cache.ID
return true, nil
} else {
log.Debugf("[BUILDER] Cache miss")
}
}
return false, nil
}
func (b *Builder) create() (*daemon.Container, error) {
if b.image == "" {
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
}
b.Config.Image = b.image
config := *b.Config
// Create the container
c, warnings, err := b.Daemon.Create(b.Config, nil, "")
if err != nil {
return nil, err
}
for _, warning := range warnings {
fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
}
b.TmpContainers[c.ID] = struct{}{}
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
// override the entry point that may have been picked up from the base image
c.Path = config.Cmd[0]
c.Args = config.Cmd[1:]
return c, nil
}
func (b *Builder) run(c *daemon.Container) error {
var errCh chan error
if b.Verbose {
errCh = promise.Go(func() error {
// FIXME: call the 'attach' job so that daemon.Attach can be made private
//
// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
// but without hijacking for stdin. Also, with attach there can be race
// condition because of some output already was printed before it.
return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream)
})
}
//start the container
if err := c.Start(); err != nil {
return err
}
if errCh != nil {
if err := <-errCh; err != nil {
return err
}
}
// Wait for it to finish
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
err := &utils.JSONError{
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
Code: ret,
}
return err
}
return nil
}
func (b *Builder) checkPathForAddition(orig string) error {
origPath := path.Join(b.contextPath, orig)
origPath, err := filepath.EvalSymlinks(origPath)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("%s: no such file or directory", orig)
}
return err
}
if !strings.HasPrefix(origPath, b.contextPath) {
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
}
if _, err := os.Stat(origPath); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("%s: no such file or directory", orig)
}
return err
}
return nil
}
func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
var (
err error
destExists = true
origPath = path.Join(b.contextPath, orig)
destPath = path.Join(container.RootfsPath(), dest)
)
if destPath != container.RootfsPath() {
destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
if err != nil {
return err
}
}
// Preserve the trailing '/'
if strings.HasSuffix(dest, "/") || dest == "." {
destPath = destPath + "/"
}
destStat, err := os.Stat(destPath)
if err != nil {
if !os.IsNotExist(err) {
return err
}
destExists = false
}
fi, err := os.Stat(origPath)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("%s: no such file or directory", orig)
}
return err
}
if fi.IsDir() {
return copyAsDirectory(origPath, destPath, destExists)
}
// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
if decompress {
// First try to unpack the source as an archive
// to support the untar feature we need to clean up the path a little bit
// because tar is very forgiving. First we need to strip off the archive's
// filename from the path but this is only added if it does not end in / .
tarDest := destPath
if strings.HasSuffix(tarDest, "/") {
tarDest = filepath.Dir(destPath)
}
// try to successfully untar the orig
if err := archive.UntarPath(origPath, tarDest); err == nil {
return nil
} else if err != io.EOF {
log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
}
}
if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
return err
}
if err := archive.CopyWithTar(origPath, destPath); err != nil {
return err
}
resPath := destPath
if destExists && destStat.IsDir() {
resPath = path.Join(destPath, path.Base(origPath))
}
return fixPermissions(resPath, 0, 0)
}
func copyAsDirectory(source, destination string, destinationExists bool) error {
if err := archive.CopyWithTar(source, destination); err != nil {
return err
}
if destinationExists {
files, err := ioutil.ReadDir(source)
if err != nil {
return err
}
for _, file := range files {
if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
return err
}
}
return nil
}
return fixPermissions(destination, 0, 0)
}
func fixPermissions(destination string, uid, gid int) error {
return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
return err
}
return nil
})
}
func (b *Builder) clearTmp() {
for c := range b.TmpContainers {
tmp := b.Daemon.Get(c)
if err := b.Daemon.Destroy(tmp); err != nil {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
return
}
b.Daemon.DeleteVolumes(tmp.VolumePaths())
delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
}
}

View File

@@ -1,130 +0,0 @@
package builder
import (
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"github.com/docker/docker/daemon"
"github.com/docker/docker/engine"
"github.com/docker/docker/graph"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
)
type BuilderJob struct {
Engine *engine.Engine
Daemon *daemon.Daemon
}
func (b *BuilderJob) Install() {
b.Engine.Register("build", b.CmdBuild)
}
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
if len(job.Args) != 0 {
return job.Errorf("Usage: %s\n", job.Name)
}
var (
remoteURL = job.Getenv("remote")
repoName = job.Getenv("t")
suppressOutput = job.GetenvBool("q")
noCache = job.GetenvBool("nocache")
rm = job.GetenvBool("rm")
forceRm = job.GetenvBool("forcerm")
authConfig = &registry.AuthConfig{}
configFile = &registry.ConfigFile{}
tag string
context io.ReadCloser
)
job.GetenvJson("authConfig", authConfig)
job.GetenvJson("configFile", configFile)
repoName, tag = parsers.ParseRepositoryTag(repoName)
if repoName != "" {
if _, _, err := registry.ResolveRepositoryName(repoName); err != nil {
return job.Error(err)
}
if len(tag) > 0 {
if err := graph.ValidateTagName(tag); err != nil {
return job.Error(err)
}
}
}
if remoteURL == "" {
context = ioutil.NopCloser(job.Stdin)
} else if utils.IsGIT(remoteURL) {
if !strings.HasPrefix(remoteURL, "git://") {
remoteURL = "https://" + remoteURL
}
root, err := ioutil.TempDir("", "docker-build-git")
if err != nil {
return job.Error(err)
}
defer os.RemoveAll(root)
if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
return job.Errorf("Error trying to use git: %s (%s)", err, output)
}
c, err := archive.Tar(root, archive.Uncompressed)
if err != nil {
return job.Error(err)
}
context = c
} else if utils.IsURL(remoteURL) {
f, err := utils.Download(remoteURL)
if err != nil {
return job.Error(err)
}
defer f.Body.Close()
dockerFile, err := ioutil.ReadAll(f.Body)
if err != nil {
return job.Error(err)
}
c, err := archive.Generate("Dockerfile", string(dockerFile))
if err != nil {
return job.Error(err)
}
context = c
}
defer context.Close()
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
builder := &Builder{
Daemon: b.Daemon,
Engine: b.Engine,
OutStream: &utils.StdoutFormater{
Writer: job.Stdout,
StreamFormatter: sf,
},
ErrStream: &utils.StderrFormater{
Writer: job.Stdout,
StreamFormatter: sf,
},
Verbose: !suppressOutput,
UtilizeCache: !noCache,
Remove: rm,
ForceRemove: forceRm,
OutOld: job.Stdout,
StreamFormatter: sf,
AuthConfig: authConfig,
AuthConfigFile: configFile,
}
id, err := builder.Run(context)
if err != nil {
return job.Error(err)
}
if repoName != "" {
b.Daemon.Repositories().Set(repoName, tag, id, false)
}
return engine.StatusOK
}

View File

@@ -1,32 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/docker/docker/builder/parser"
)
func main() {
var f *os.File
var err error
if len(os.Args) < 2 {
fmt.Println("please supply filename(s)")
os.Exit(1)
}
for _, fn := range os.Args[1:] {
f, err = os.Open(fn)
if err != nil {
panic(err)
}
ast, err := parser.Parse(f)
if err != nil {
panic(err)
} else {
fmt.Println(ast.Dump())
}
}
}

View File

@@ -1,155 +0,0 @@
package parser
// line parsers are dispatch calls that parse a single unit of text into a
// Node object which contains the whole statement. Dockerfiles have varied
// (but not usually unique, see ONBUILD for a unique example) parsing rules
// per-command, and these unify the processing in a way that makes it
// manageable.
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
)
var (
errDockerfileJSONNesting = errors.New("You may not nest arrays in Dockerfile statements.")
)
// ignore the current argument. This will still leave a command parsed, but
// will not incorporate the arguments into the ast.
func parseIgnore(rest string) (*Node, map[string]bool, error) {
return &Node{}, nil, nil
}
// used for onbuild. Could potentially be used for anything that represents a
// statement with sub-statements.
//
// ONBUILD RUN foo bar -> (onbuild (run foo bar))
//
func parseSubCommand(rest string) (*Node, map[string]bool, error) {
_, child, err := parseLine(rest)
if err != nil {
return nil, nil, err
}
return &Node{Children: []*Node{child}}, nil, nil
}
// parse environment like statements. Note that this does *not* handle
// variable interpolation, which will be handled in the evaluator.
func parseEnv(rest string) (*Node, map[string]bool, error) {
node := &Node{}
rootnode := node
strs := TOKEN_WHITESPACE.Split(rest, 2)
if len(strs) < 2 {
return nil, nil, fmt.Errorf("ENV must have two arguments")
}
node.Value = strs[0]
node.Next = &Node{}
node.Next.Value = strs[1]
return rootnode, nil, nil
}
// parses a whitespace-delimited set of arguments. The result is effectively a
// linked list of string arguments.
func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) {
node := &Node{}
rootnode := node
prevnode := node
for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp
prevnode = node
node.Value = str
node.Next = &Node{}
node = node.Next
}
// XXX to get around regexp.Split *always* providing an empty string at the
// end due to how our loop is constructed, nil out the last node in the
// chain.
prevnode.Next = nil
return rootnode, nil, nil
}
// parsestring just wraps the string in quotes and returns a working node.
func parseString(rest string) (*Node, map[string]bool, error) {
n := &Node{}
n.Value = rest
return n, nil, nil
}
// parseJSON converts JSON arrays to an AST.
func parseJSON(rest string) (*Node, map[string]bool, error) {
var (
myJson []interface{}
next = &Node{}
orignext = next
prevnode = next
)
if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
return nil, nil, err
}
for _, str := range myJson {
switch str.(type) {
case string:
case float64:
str = strconv.FormatFloat(str.(float64), 'G', -1, 64)
default:
return nil, nil, errDockerfileJSONNesting
}
next.Value = str.(string)
next.Next = &Node{}
prevnode = next
next = next.Next
}
prevnode.Next = nil
return orignext, map[string]bool{"json": true}, nil
}
// parseMaybeJSON determines if the argument appears to be a JSON array. If
// so, passes to parseJSON; if not, quotes the result and returns a single
// node.
func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
rest = strings.TrimSpace(rest)
node, attrs, err := parseJSON(rest)
if err == nil {
return node, attrs, nil
}
if err == errDockerfileJSONNesting {
return nil, nil, err
}
node = &Node{}
node.Value = rest
return node, nil, nil
}
// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
// so, passes to parseJSON; if not, attmpts to parse it as a whitespace
// delimited string.
func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) {
rest = strings.TrimSpace(rest)
node, attrs, err := parseJSON(rest)
if err == nil {
return node, attrs, nil
}
if err == errDockerfileJSONNesting {
return nil, nil, err
}
return parseStringsWhitespaceDelimited(rest)
}

View File

@@ -1,140 +0,0 @@
// This package implements a parser and parse tree dumper for Dockerfiles.
package parser
import (
"bufio"
"io"
"regexp"
"strings"
"unicode"
)
// Node is a structure used to represent a parse tree.
//
// In the node there are three fields, Value, Next, and Children. Value is the
// current token's string value. Next is always the next non-child token, and
// children contains all the children. Here's an example:
//
// (value next (child child-next child-next-next) next-next)
//
// This data structure is frankly pretty lousy for handling complex languages,
// but lucky for us the Dockerfile isn't very complicated. This structure
// works a little more effectively than a "proper" parse tree for our needs.
//
type Node struct {
Value string // actual content
Next *Node // the next item in the current sexp
Children []*Node // the children of this sexp
Attributes map[string]bool // special attributes for this node
Original string // original line used before parsing
}
var (
dispatch map[string]func(string) (*Node, map[string]bool, error)
TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`)
TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\\s*$`)
TOKEN_COMMENT = regexp.MustCompile(`^#.*$`)
)
func init() {
// Dispatch Table. see line_parsers.go for the parse functions.
// The command is parsed and mapped to the line parser. The line parser
// recieves the arguments but not the command, and returns an AST after
// reformulating the arguments according to the rules in the parser
// functions. Errors are propogated up by Parse() and the resulting AST can
// be incorporated directly into the existing AST as a next.
dispatch = map[string]func(string) (*Node, map[string]bool, error){
"user": parseString,
"onbuild": parseSubCommand,
"workdir": parseString,
"env": parseEnv,
"maintainer": parseString,
"from": parseString,
"add": parseStringsWhitespaceDelimited,
"copy": parseStringsWhitespaceDelimited,
"run": parseMaybeJSON,
"cmd": parseMaybeJSON,
"entrypoint": parseMaybeJSON,
"expose": parseStringsWhitespaceDelimited,
"volume": parseMaybeJSONToList,
"insert": parseIgnore,
}
}
// parse a line and return the remainder.
func parseLine(line string) (string, *Node, error) {
if line = stripComments(line); line == "" {
return "", nil, nil
}
if TOKEN_LINE_CONTINUATION.MatchString(line) {
line = TOKEN_LINE_CONTINUATION.ReplaceAllString(line, "")
return line, nil, nil
}
cmd, args, err := splitCommand(line)
if err != nil {
return "", nil, err
}
node := &Node{}
node.Value = cmd
sexp, attrs, err := fullDispatch(cmd, args)
if err != nil {
return "", nil, err
}
if sexp.Value != "" || sexp.Next != nil || sexp.Children != nil {
node.Next = sexp
}
node.Attributes = attrs
node.Original = line
return "", node, nil
}
// The main parse routine. Handles an io.ReadWriteCloser and returns the root
// of the AST.
func Parse(rwc io.Reader) (*Node, error) {
root := &Node{}
scanner := bufio.NewScanner(rwc)
for scanner.Scan() {
scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace)
if stripComments(scannedLine) == "" {
continue
}
line, child, err := parseLine(scannedLine)
if err != nil {
return nil, err
}
if line != "" && child == nil {
for scanner.Scan() {
newline := scanner.Text()
if stripComments(strings.TrimSpace(newline)) == "" {
continue
}
line, child, err = parseLine(line + newline)
if err != nil {
return nil, err
}
if child != nil {
break
}
}
}
if child != nil {
root.Children = append(root.Children, child)
}
}
return root, nil
}

View File

@@ -1,82 +0,0 @@
package parser
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
)
const testDir = "testfiles"
const negativeTestDir = "testfiles-negative"
func getDirs(t *testing.T, dir string) []os.FileInfo {
f, err := os.Open(dir)
if err != nil {
t.Fatal(err)
}
defer f.Close()
dirs, err := f.Readdir(0)
if err != nil {
t.Fatal(err)
}
return dirs
}
func TestTestNegative(t *testing.T) {
for _, dir := range getDirs(t, negativeTestDir) {
dockerfile := filepath.Join(negativeTestDir, dir.Name(), "Dockerfile")
df, err := os.Open(dockerfile)
if err != nil {
t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error())
}
_, err = Parse(df)
if err == nil {
t.Fatalf("No error parsing broken dockerfile for %s", dir.Name())
}
df.Close()
}
}
func TestTestData(t *testing.T) {
for _, dir := range getDirs(t, testDir) {
dockerfile := filepath.Join(testDir, dir.Name(), "Dockerfile")
resultfile := filepath.Join(testDir, dir.Name(), "result")
df, err := os.Open(dockerfile)
if err != nil {
t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error())
}
rf, err := os.Open(resultfile)
if err != nil {
t.Fatalf("Result file missing for %s: %s", dir.Name(), err.Error())
}
ast, err := Parse(df)
if err != nil {
t.Fatalf("Error parsing %s's dockerfile: %s", dir.Name(), err.Error())
}
content, err := ioutil.ReadAll(rf)
if err != nil {
t.Fatalf("Error reading %s's result file: %s", dir.Name(), err.Error())
}
if ast.Dump()+"\n" != string(content) {
fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump())
fmt.Fprintln(os.Stderr, "Expected:\n"+string(content))
t.Fatalf("%s: AST dump of dockerfile does not match result", dir.Name())
}
df.Close()
rf.Close()
}
}

View File

@@ -1,3 +0,0 @@
FROM busybox
ENV PATH=PATH

View File

@@ -1 +0,0 @@
CMD [ "echo", [ "nested json" ] ]

View File

@@ -1,25 +0,0 @@
FROM brimstone/ubuntu:14.04
MAINTAINER brimstone@the.narro.ws
# TORUN -v /var/run/docker.sock:/var/run/docker.sock
ENV GOPATH /go
# Set our command
ENTRYPOINT ["/usr/local/bin/consuldock"]
# Install the packages we need, clean up after them and us
RUN apt-get update \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
&& apt-get install -y --no-install-recommends git golang ca-certificates \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists \
&& go get -v github.com/brimstone/consuldock \
&& mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
&& rm /tmp/dpkg.* \
&& rm -rf $GOPATH

View File

@@ -1,5 +0,0 @@
(from "brimstone/ubuntu:14.04")
(maintainer "brimstone@the.narro.ws")
(env "GOPATH" "/go")
(entrypoint "/usr/local/bin/consuldock")
(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH")

View File

@@ -1,52 +0,0 @@
FROM brimstone/ubuntu:14.04
CMD []
ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"]
EXPOSE 8500 8600 8400 8301 8302
RUN apt-get update \
&& apt-get install -y unzip wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists
RUN cd /tmp \
&& wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
-O web_ui.zip \
&& unzip web_ui.zip \
&& mv dist /webui \
&& rm web_ui.zip
RUN apt-get update \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
&& apt-get install -y --no-install-recommends unzip wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists \
&& cd /tmp \
&& wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
-O web_ui.zip \
&& unzip web_ui.zip \
&& mv dist /webui \
&& rm web_ui.zip \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
&& rm /tmp/dpkg.*
ENV GOPATH /go
RUN apt-get update \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
&& apt-get install -y --no-install-recommends git golang ca-certificates build-essential \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists \
&& go get -v github.com/hashicorp/consul \
&& mv $GOPATH/bin/consul /usr/bin/consul \
&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
&& rm /tmp/dpkg.* \
&& rm -rf $GOPATH

View File

@@ -1,9 +0,0 @@
(from "brimstone/ubuntu:14.04")
(cmd)
(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
(expose "8500" "8600" "8400" "8301" "8302")
(run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists")
(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip")
(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.*")
(env "GOPATH" "/go")
(run "apt-get update && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists && go get -v github.com/hashicorp/consul && mv $GOPATH/bin/consul /usr/bin/consul && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') && rm /tmp/dpkg.* && rm -rf $GOPATH")

View File

@@ -1,36 +0,0 @@
FROM ubuntu:14.04
RUN echo hello\
world\
goodnight \
moon\
light\
ning
RUN echo hello \
world
RUN echo hello \
world
RUN echo hello \
goodbye\
frog
RUN echo hello \
world
RUN echo hi \
\
world \
\
good\
\
night
RUN echo goodbye\
frog
RUN echo good\
bye\
frog
RUN echo hello \
# this is a comment
# this is a comment with a blank line surrounding it
this is some more useful stuff

View File

@@ -1,10 +0,0 @@
(from "ubuntu:14.04")
(run "echo hello world goodnight moon lightning")
(run "echo hello world")
(run "echo hello world")
(run "echo hello goodbyefrog")
(run "echo hello world")
(run "echo hi world goodnight")
(run "echo goodbyefrog")
(run "echo goodbyefrog")
(run "echo hello this is some more useful stuff")

View File

@@ -1,54 +0,0 @@
FROM cpuguy83/ubuntu
ENV NAGIOS_HOME /opt/nagios
ENV NAGIOS_USER nagios
ENV NAGIOS_GROUP nagios
ENV NAGIOS_CMDUSER nagios
ENV NAGIOS_CMDGROUP nagios
ENV NAGIOSADMIN_USER nagiosadmin
ENV NAGIOSADMIN_PASS nagios
ENV APACHE_RUN_USER nagios
ENV APACHE_RUN_GROUP nagios
ENV NAGIOS_TIMEZONE UTC
RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list
RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx
RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP )
RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )
ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz
RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf
ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/
RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install
RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars
RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default
RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo
RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf
RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs
RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg
RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg
RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf
RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \
sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg
RUN cp /etc/services /var/spool/postfix/etc/
RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix
ADD nagios.init /etc/sv/nagios/run
ADD apache.init /etc/sv/apache/run
ADD postfix.init /etc/sv/postfix/run
ADD postfix.stop /etc/sv/postfix/finish
ADD start.sh /usr/local/bin/start_nagios
ENV APACHE_LOCK_DIR /var/run
ENV APACHE_LOG_DIR /var/log/apache2
EXPOSE 80
VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"]
CMD ["/usr/local/bin/start_nagios"]

View File

@@ -1,40 +0,0 @@
(from "cpuguy83/ubuntu")
(env "NAGIOS_HOME" "/opt/nagios")
(env "NAGIOS_USER" "nagios")
(env "NAGIOS_GROUP" "nagios")
(env "NAGIOS_CMDUSER" "nagios")
(env "NAGIOS_CMDGROUP" "nagios")
(env "NAGIOSADMIN_USER" "nagiosadmin")
(env "NAGIOSADMIN_PASS" "nagios")
(env "APACHE_RUN_USER" "nagios")
(env "APACHE_RUN_GROUP" "nagios")
(env "NAGIOS_TIMEZONE" "UTC")
(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list")
(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx")
(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )")
(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )")
(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz")
(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf")
(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/")
(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install")
(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars")
(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default")
(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo")
(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf")
(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs")
(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf")
(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg")
(run "cp /etc/services /var/spool/postfix/etc/")
(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix")
(add "nagios.init" "/etc/sv/nagios/run")
(add "apache.init" "/etc/sv/apache/run")
(add "postfix.init" "/etc/sv/postfix/run")
(add "postfix.stop" "/etc/sv/postfix/finish")
(add "start.sh" "/usr/local/bin/start_nagios")
(env "APACHE_LOCK_DIR" "/var/run")
(env "APACHE_LOG_DIR" "/var/log/apache2")
(expose "80")
(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
(cmd "/usr/local/bin/start_nagios")

View File

@@ -1,105 +0,0 @@
# This file describes the standard way to build Docker, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test
#
# # Publish a release:
# docker run --privileged \
# -e AWS_S3_BUCKET=baz \
# -e AWS_ACCESS_KEY=foo \
# -e AWS_SECRET_KEY=bar \
# -e GPG_PASSPHRASE=gloubiboulga \
# docker hack/release.sh
#
# Note: Apparmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
docker-version 0.6.1
FROM ubuntu:14.04
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
# Packaged dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
apt-utils \
aufs-tools \
automake \
btrfs-tools \
build-essential \
curl \
dpkg-sig \
git \
iptables \
libapparmor-dev \
libcap-dev \
libsqlite3-dev \
lxc=1.0* \
mercurial \
pandoc \
parallel \
reprepro \
ruby1.9.1 \
ruby1.9.1-dev \
s3cmd=1.1.0* \
--no-install-recommends
# Get lvm2 source for compiling statically
RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
# Compile and install lvm2
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install Go
RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz
ENV PATH /usr/local/go/bin:$PATH
ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
# Compile Go for cross compilation
ENV DOCKER_CROSSPLATFORMS \
linux/386 linux/arm \
darwin/amd64 darwin/386 \
freebsd/amd64 freebsd/386 freebsd/arm
# (set an explicit GOARM of 5 for maximum compatibility)
ENV GOARM 5
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
# Grab Go's cover tool for dead-simple code coverage testing
RUN go get code.google.com/p/go.tools/cmd/cover
# TODO replace FPM with some very minimal debhelper stuff
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
# Get the "busybox" image source so we can build locally instead of pulling
RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
# Setup s3cmd config
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor selinux
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View File

@@ -1,25 +0,0 @@
(docker-version)
(from "ubuntu:14.04")
(maintainer "Tianon Gravi <admwiggin@gmail.com> (@tianon)")
(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq apt-utils aufs-tools automake btrfs-tools build-essential curl dpkg-sig git iptables libapparmor-dev libcap-dev libsqlite3-dev lxc=1.0* mercurial pandoc parallel reprepro ruby1.9.1 ruby1.9.1-dev s3cmd=1.1.0* --no-install-recommends")
(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103")
(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper")
(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz")
(env "PATH" "/usr/local/go/bin:$PATH")
(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor")
(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1")
(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm")
(env "GOARM" "5")
(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
(run "go get code.google.com/p/go.tools/cmd/cover")
(run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")
(run "git config --global user.email 'docker-dummy@example.com'")
(run "groupadd -r docker")
(run "useradd --create-home --gid docker unprivilegeduser")
(volume "/var/lib/docker")
(workdir "/go/src/github.com/docker/docker")
(env "DOCKER_BUILDTAGS" "apparmor selinux")
(entrypoint "hack/dind")
(copy "." "/go/src/github.com/docker/docker")

View File

@@ -1,14 +0,0 @@
FROM ubuntu:14.04
MAINTAINER Erik \\Hollensbe <erik@hollensbe.org>\"
RUN apt-get \update && \
apt-get \"install znc -y
ADD \conf\\" /.znc
RUN foo \
bar \
baz
CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ]

View File

@@ -1,6 +0,0 @@
(from "ubuntu:14.04")
(maintainer "Erik \\\\Hollensbe <erik@hollensbe.org>\\\"")
(run "apt-get \\update && apt-get \\\"install znc -y")
(add "\\conf\\\\\"" "/.znc")
(run "foo bar baz")
(cmd "/usr\\\"/bin/znc" "-f" "-r")

View File

@@ -1,15 +0,0 @@
FROM ubuntu:14.04
RUN apt-get update && apt-get install wget -y
RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb
RUN dpkg -i influxdb_latest_amd64.deb
RUN rm -r /opt/influxdb/shared
VOLUME /opt/influxdb/shared
CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml
EXPOSE 8083
EXPOSE 8086
EXPOSE 8090
EXPOSE 8099

View File

@@ -1,11 +0,0 @@
(from "ubuntu:14.04")
(run "apt-get update && apt-get install wget -y")
(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb")
(run "dpkg -i influxdb_latest_amd64.deb")
(run "rm -r /opt/influxdb/shared")
(volume "/opt/influxdb/shared")
(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml")
(expose "8083")
(expose "8086")
(expose "8090")
(expose "8099")

View File

@@ -1 +0,0 @@
CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]"

View File

@@ -1 +0,0 @@
(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"")

View File

@@ -1 +0,0 @@
CMD '["echo", "Well, JSON in a string is JSON too?"]'

View File

@@ -1 +0,0 @@
(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'")

View File

@@ -1 +0,0 @@
CMD ['echo','single quotes are invalid JSON']

View File

@@ -1 +0,0 @@
(cmd "['echo','single quotes are invalid JSON']")

View File

@@ -1 +0,0 @@
CMD ["echo", "Please, close the brackets when you're done"

View File

@@ -1 +0,0 @@
(cmd "[\"echo\", \"Please, close the brackets when you're done\"")

View File

@@ -1 +0,0 @@
CMD ["echo", "look ma, no quote!]

View File

@@ -1 +0,0 @@
(cmd "[\"echo\", \"look ma, no quote!]")

View File

@@ -1,7 +0,0 @@
FROM ubuntu:14.04
MAINTAINER James Turnbull "james@example.com"
ENV REFRESHED_AT 2014-06-01
RUN apt-get update
RUN apt-get -y install redis-server redis-tools
EXPOSE 6379
ENTRYPOINT [ "/usr/bin/redis-server" ]

View File

@@ -1,7 +0,0 @@
(from "ubuntu:14.04")
(maintainer "James Turnbull \"james@example.com\"")
(env "REFRESHED_AT" "2014-06-01")
(run "apt-get update")
(run "apt-get -y install redis-server redis-tools")
(expose "6379")
(entrypoint "/usr/bin/redis-server")

View File

@@ -1,48 +0,0 @@
FROM busybox:buildroot-2014.02
MAINTAINER docker <docker@docker.io>
ONBUILD RUN ["echo", "test"]
ONBUILD RUN echo test
ONBUILD COPY . /
# RUN Commands \
# linebreak in comment \
RUN ["ls", "-la"]
RUN ["echo", "'1234'"]
RUN echo "1234"
RUN echo 1234
RUN echo '1234' && \
echo "456" && \
echo 789
RUN sh -c 'echo root:testpass \
> /tmp/passwd'
RUN mkdir -p /test /test2 /test3/test
# ENV \
ENV SCUBA 1 DUBA 3
ENV SCUBA "1 DUBA 3"
# CMD \
CMD ["echo", "test"]
CMD echo test
CMD echo "test"
CMD echo 'test'
CMD echo 'test' | wc -
#EXPOSE\
EXPOSE 3000
EXPOSE 9000 5000 6000
USER docker
USER docker:root
VOLUME ["/test"]
VOLUME ["/test", "/test2"]
VOLUME /test3
WORKDIR /test
ADD . /
COPY . copy

View File

@@ -1,29 +0,0 @@
(from "busybox:buildroot-2014.02")
(maintainer "docker <docker@docker.io>")
(onbuild (run "echo" "test"))
(onbuild (run "echo test"))
(onbuild (copy "." "/"))
(run "ls" "-la")
(run "echo" "'1234'")
(run "echo \"1234\"")
(run "echo 1234")
(run "echo '1234' && echo \"456\" && echo 789")
(run "sh -c 'echo root:testpass > /tmp/passwd'")
(run "mkdir -p /test /test2 /test3/test")
(env "SCUBA" "1 DUBA 3")
(env "SCUBA" "\"1 DUBA 3\"")
(cmd "echo" "test")
(cmd "echo test")
(cmd "echo \"test\"")
(cmd "echo 'test'")
(cmd "echo 'test' | wc -")
(expose "3000")
(expose "9000" "5000" "6000")
(user "docker")
(user "docker:root")
(volume "/test")
(volume "/test" "/test2")
(volume "/test3")
(workdir "/test")
(add "." "/")
(copy "." "copy")

View File

@@ -1,16 +0,0 @@
FROM ubuntu:14.04
RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y
ADD .muttrc /
ADD .offlineimaprc /
ADD .tmux.conf /
ADD mutt /.mutt
ADD vim /.vim
ADD vimrc /.vimrc
ADD crontab /etc/crontab
RUN chmod 644 /etc/crontab
RUN mkdir /Mail
RUN mkdir /.offlineimap
RUN echo "export TERM=screen-256color" >/.zshenv
CMD setsid cron; tmux -2

View File

@@ -1,14 +0,0 @@
(from "ubuntu:14.04")
(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y")
(add ".muttrc" "/")
(add ".offlineimaprc" "/")
(add ".tmux.conf" "/")
(add "mutt" "/.mutt")
(add "vim" "/.vim")
(add "vimrc" "/.vimrc")
(add "crontab" "/etc/crontab")
(run "chmod 644 /etc/crontab")
(run "mkdir /Mail")
(run "mkdir /.offlineimap")
(run "echo \"export TERM=screen-256color\" >/.zshenv")
(cmd "setsid cron; tmux -2")

View File

@@ -1,3 +0,0 @@
FROM foo
VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs

View File

@@ -1,2 +0,0 @@
(from "foo")
(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")

View File

@@ -1,7 +0,0 @@
FROM ubuntu:14.04
RUN apt-get update && apt-get install libcap2-bin mumble-server -y
ADD ./mumble-server.ini /etc/mumble-server.ini
CMD /usr/sbin/murmurd

View File

@@ -1,4 +0,0 @@
(from "ubuntu:14.04")
(run "apt-get update && apt-get install libcap2-bin mumble-server -y")
(add "./mumble-server.ini" "/etc/mumble-server.ini")
(cmd "/usr/sbin/murmurd")

View File

@@ -1,14 +0,0 @@
FROM ubuntu:14.04
MAINTAINER Erik Hollensbe <erik@hollensbe.org>
RUN apt-get update && apt-get install nginx-full -y
RUN rm -rf /etc/nginx
ADD etc /etc/nginx
RUN chown -R root:root /etc/nginx
RUN /usr/sbin/nginx -qt
RUN mkdir /www
CMD ["/usr/sbin/nginx"]
VOLUME /www
EXPOSE 80

View File

@@ -1,11 +0,0 @@
(from "ubuntu:14.04")
(maintainer "Erik Hollensbe <erik@hollensbe.org>")
(run "apt-get update && apt-get install nginx-full -y")
(run "rm -rf /etc/nginx")
(add "etc" "/etc/nginx")
(run "chown -R root:root /etc/nginx")
(run "/usr/sbin/nginx -qt")
(run "mkdir /www")
(cmd "/usr/sbin/nginx")
(volume "/www")
(expose "80")

View File

@@ -1,23 +0,0 @@
FROM ubuntu:12.04
EXPOSE 27015
EXPOSE 27005
EXPOSE 26901
EXPOSE 27020
RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y
RUN mkdir -p /steam
RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam
ADD ./script /steam/script
RUN /steam/steamcmd.sh +runscript /steam/script
RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf
RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf
ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg
ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg
ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg
RUN rm -r /steam/tf2/tf/addons/sourcemod/configs
ADD ./configs /steam/tf2/tf/addons/sourcemod/configs
RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en
RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en
CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill

View File

@@ -1,20 +0,0 @@
(from "ubuntu:12.04")
(expose "27015")
(expose "27005")
(expose "26901")
(expose "27020")
(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y")
(run "mkdir -p /steam")
(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam")
(add "./script" "/steam/script")
(run "/steam/steamcmd.sh +runscript /steam/script")
(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf")
(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf")
(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg")
(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg")
(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg")
(run "rm -r /steam/tf2/tf/addons/sourcemod/configs")
(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs")
(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en")
(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en")
(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill")

View File

@@ -1,9 +0,0 @@
FROM ubuntu:14.04
RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y
ADD .weechat /.weechat
ADD .tmux.conf /
RUN echo "export TERM=screen-256color" >/.zshenv
CMD zsh -c weechat

View File

@@ -1,6 +0,0 @@
(from "ubuntu:14.04")
(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y")
(add ".weechat" "/.weechat")
(add ".tmux.conf" "/")
(run "echo \"export TERM=screen-256color\" >/.zshenv")
(cmd "zsh -c weechat")

View File

@@ -1,7 +0,0 @@
FROM ubuntu:14.04
MAINTAINER Erik Hollensbe <erik@hollensbe.org>
RUN apt-get update && apt-get install znc -y
ADD conf /.znc
CMD [ "/usr/bin/znc", "-f", "-r" ]

View File

@@ -1,5 +0,0 @@
(from "ubuntu:14.04")
(maintainer "Erik Hollensbe <erik@hollensbe.org>")
(run "apt-get update && apt-get install znc -y")
(add "conf" "/.znc")
(cmd "/usr/bin/znc" "-f" "-r")

View File

@@ -1,94 +0,0 @@
package parser
import (
"fmt"
"strings"
)
// QuoteString walks characters (after trimming), escapes any quotes and
// escapes, then wraps the whole thing in quotes. Very useful for generating
// argument output in nodes.
func QuoteString(str string) string {
result := ""
chars := strings.Split(strings.TrimSpace(str), "")
for _, char := range chars {
switch char {
case `"`:
result += `\"`
case `\`:
result += `\\`
default:
result += char
}
}
return `"` + result + `"`
}
// dumps the AST defined by `node` as a list of sexps. Returns a string
// suitable for printing.
func (node *Node) Dump() string {
str := ""
str += node.Value
for _, n := range node.Children {
str += "(" + n.Dump() + ")\n"
}
if node.Next != nil {
for n := node.Next; n != nil; n = n.Next {
if len(n.Children) > 0 {
str += " " + n.Dump()
} else {
str += " " + QuoteString(n.Value)
}
}
}
return strings.TrimSpace(str)
}
// performs the dispatch based on the two primal strings, cmd and args. Please
// look at the dispatch table in parser.go to see how these dispatchers work.
func fullDispatch(cmd, args string) (*Node, map[string]bool, error) {
fn := dispatch[cmd]
// Ignore invalid Dockerfile instructions
if fn == nil {
fn = parseIgnore
}
sexp, attrs, err := fn(args)
if err != nil {
return nil, nil, err
}
return sexp, attrs, nil
}
// splitCommand takes a single line of text and parses out the cmd and args,
// which are used for dispatching to more exact parsing functions.
func splitCommand(line string) (string, string, error) {
cmdline := TOKEN_WHITESPACE.Split(line, 2)
if len(cmdline) != 2 {
return "", "", fmt.Errorf("We do not understand this file. Please ensure it is a valid Dockerfile. Parser error at %q", line)
}
cmd := strings.ToLower(cmdline[0])
// the cmd should never have whitespace, but it's possible for the args to
// have trailing whitespace.
return cmd, strings.TrimSpace(cmdline[1]), nil
}
// covers comments and empty lines. Lines should be trimmed before passing to
// this function.
func stripComments(line string) string {
// string is already trimmed at this point
if TOKEN_COMMENT.MatchString(line) {
return TOKEN_COMMENT.ReplaceAllString(line, "")
}
return line
}

View File

@@ -1,59 +0,0 @@
package builder
import (
"regexp"
"strings"
)
var (
// `\\\\+|[^\\]|\b|\A` - match any number of "\\" (ie, properly-escaped backslashes), or a single non-backslash character, or a word boundary, or beginning-of-line
// `\$` - match literal $
// `[[:alnum:]_]+` - match things like `$SOME_VAR`
// `{[[:alnum:]_]+}` - match things like `${SOME_VAR}`
tokenEnvInterpolation = regexp.MustCompile(`(\\|\\\\+|[^\\]|\b|\A)\$([[:alnum:]_]+|{[[:alnum:]_]+})`)
// this intentionally punts on more exotic interpolations like ${SOME_VAR%suffix} and lets the shell handle those directly
)
// handle environment replacement. Used in dispatcher.
func (b *Builder) replaceEnv(str string) string {
for _, match := range tokenEnvInterpolation.FindAllString(str, -1) {
idx := strings.Index(match, "\\$")
if idx != -1 {
if idx+2 >= len(match) {
str = strings.Replace(str, match, "\\$", -1)
continue
}
prefix := match[:idx]
stripped := match[idx+2:]
str = strings.Replace(str, match, prefix+"$"+stripped, -1)
continue
}
match = match[strings.Index(match, "$"):]
matchKey := strings.Trim(match, "${}")
for _, keyval := range b.Config.Env {
tmp := strings.SplitN(keyval, "=", 2)
if tmp[0] == matchKey {
str = strings.Replace(str, match, tmp[1], -1)
break
}
}
}
return str
}
func handleJsonArgs(args []string, attributes map[string]bool) []string {
if len(args) == 0 {
return []string{}
}
if attributes != nil && attributes["json"] {
return args
}
// literal string command, not an exec array
return []string{strings.Join(args, " ")}
}

View File

@@ -1,75 +0,0 @@
package builtins
import (
"runtime"
"github.com/docker/docker/api"
apiserver "github.com/docker/docker/api/server"
"github.com/docker/docker/daemon/networkdriver/bridge"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/engine"
"github.com/docker/docker/events"
"github.com/docker/docker/pkg/parsers/kernel"
)
func Register(eng *engine.Engine) error {
if err := daemon(eng); err != nil {
return err
}
if err := remote(eng); err != nil {
return err
}
if err := events.New().Install(eng); err != nil {
return err
}
if err := eng.Register("version", dockerVersion); err != nil {
return err
}
return nil
}
// remote: a RESTful api for cross-docker communication
func remote(eng *engine.Engine) error {
if err := eng.Register("serveapi", apiserver.ServeApi); err != nil {
return err
}
return eng.Register("acceptconnections", apiserver.AcceptConnections)
}
// daemon: a default execution and storage backend for Docker on Linux,
// with the following underlying components:
//
// * Pluggable storage drivers including aufs, vfs, lvm and btrfs.
// * Pluggable execution drivers including lxc and chroot.
//
// In practice `daemon` still includes most core Docker components, including:
//
// * The reference registry client implementation
// * Image management
// * The build facility
// * Logging
//
// These components should be broken off into plugins of their own.
//
func daemon(eng *engine.Engine) error {
return eng.Register("init_networkdriver", bridge.InitDriver)
}
// builtins jobs independent of any subsystem
func dockerVersion(job *engine.Job) engine.Status {
v := &engine.Env{}
v.SetJson("Version", dockerversion.VERSION)
v.SetJson("ApiVersion", api.APIVERSION)
v.SetJson("GitCommit", dockerversion.GITCOMMIT)
v.Set("GoVersion", runtime.Version())
v.Set("Os", runtime.GOOS)
v.Set("Arch", runtime.GOARCH)
if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
v.Set("KernelVersion", kernelVersion.String())
}
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
return engine.StatusOK
}

106
changes.go Normal file
View File

@@ -0,0 +1,106 @@
package docker
import (
"fmt"
"os"
"path/filepath"
"strings"
)
type ChangeType int
const (
ChangeModify = iota
ChangeAdd
ChangeDelete
)
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
var kind string
switch change.Kind {
case ChangeModify:
kind = "C"
case ChangeAdd:
kind = "A"
case ChangeDelete:
kind = "D"
}
return fmt.Sprintf("%s %s", kind, change.Path)
}
func Changes(layers []string, rw string) ([]Change, error) {
var changes []Change
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
path = filepath.Join("/", path)
// Skip root
if path == "/" {
return nil
}
// Skip AUFS metadata
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
return err
}
change := Change{
Path: path,
}
// Find out what kind of modification happened
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, ".wh.") {
originalFile := strings.TrimLeft(file, ".wh.")
change.Path = filepath.Join(filepath.Dir(path), originalFile)
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && f.ModTime() == stat.ModTime() {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil {
return nil, err
}
return changes, nil
}

1083
commands.go Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More