Compare commits

..

19 Commits

Author SHA1 Message Date
Guillaume J. Charmes
c97a1aada6 Better varibale names 2013-05-01 13:45:50 -07:00
Guillaume J. Charmes
803a8d86e5 Change dockerbulder format, no more tabs and COPY becomes INSERT to avoid conflict with contrib script 2013-05-01 13:45:35 -07:00
Guillaume J. Charmes
5fd1ff014a Add doc for the builder 2013-05-01 13:37:32 -07:00
Guillaume J. Charmes
3c9ed5cdd6 Remove the open from CmdBuild 2013-04-30 18:03:15 -07:00
Guillaume J. Charmes
59b6a93504 Fix image pipe with Builder COPY 2013-04-27 21:45:05 -07:00
Guillaume J. Charmes
72d7c3847a Add builder_test.go 2013-04-25 11:20:56 -07:00
Guillaume J. Charmes
6e7b8efa92 Make Builder.Build return the builded image 2013-04-25 11:20:45 -07:00
Guillaume J. Charmes
fa401da0ff Merge pull request #475 from justone/builder
use new image as base of next command
2013-04-25 08:56:46 -07:00
Nate Jones
26ec7b2e77 use new image as base of next command 2013-04-25 08:08:05 -07:00
Guillaume J. Charmes
03a9e41245 Update the unit tests to reflect the new API 2013-04-24 15:35:28 -07:00
Guillaume J. Charmes
55869531f5 Move runtime.Commit to builder.Commit 2013-04-24 15:24:14 -07:00
Guillaume J. Charmes
9193585d66 Moving runtime.Create to builder.Create 2013-04-24 15:14:10 -07:00
Guillaume J. Charmes
38b8373434 Implement the COPY operator within the builder 2013-04-24 14:28:51 -07:00
Guillaume J. Charmes
03b5f8a585 Make sure the destination directory exists when using docker insert 2013-04-24 13:51:28 -07:00
Guillaume J. Charmes
bc260f0225 Add insert command in order to insert external files within an image 2013-04-24 13:37:00 -07:00
Guillaume J. Charmes
45dcd1125b Add a Builder.Commit method 2013-04-24 13:35:57 -07:00
Guillaume J. Charmes
d2e063d9e1 make builder.Run public it now runs only given arguments without sh -c 2013-04-24 12:31:20 -07:00
Guillaume J. Charmes
567a484b66 Clear the containers/images upon failure 2013-04-24 12:02:00 -07:00
Guillaume J. Charmes
5d4b886ad6 Add build command 2013-04-24 11:03:01 -07:00
4351 changed files with 26714 additions and 1079396 deletions

View File

@@ -1,4 +0,0 @@
bundles
.gopath
vendor/pkg
.go-pkg-cache

View File

@@ -1,64 +0,0 @@
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#reporting-other-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support can be found at the following locations:
- Docker Support Forums - https://forums.docker.com
- IRC - irc.freenode.net #docker channel
- Post a question on StackOverflow, using the Docker tag
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
**Steps to reproduce the issue:**
1.
2.
3.
**Describe the results you received:**
**Describe the results you expected:**
**Additional information you deem important (e.g. issue happens only occasionally):**
**Output of `docker version`:**
```
(paste your output here)
```
**Output of `docker info`:**
```
(paste your output here)
```
**Additional environment details (AWS, VirtualBox, physical, etc.):**

View File

@@ -1,30 +0,0 @@
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/docker/docker/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**

40
.gitignore vendored
View File

@@ -1,33 +1,17 @@
# Docker project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
*.exe
*.exe~
*.orig
*.test
.vagrant*
bin
docker/docker
.*.swp
a.out
*.orig
build_src
command-line-arguments.test
.flymake*
docker.test
auth/auth.test
.idea
.DS_Store
# a .bashrc may be added to customize the build environment
.bashrc
.editorconfig
.gopath/
.go-pkg-cache/
autogen/
bundles/
cmd/dockerd/dockerd
cmd/docker/docker
dockerversion/version_autogen.go
dockerversion/version_autogen_unix.go
docs/AWS_S3_BUCKET
docs/GITCOMMIT
docs/GIT_BRANCH
docs/VERSION
docs/_build
docs/_static
docs/_templates
docs/changed-files
# generated by man/md2man-all.sh
man/man1
man/man5
man/man8
vendor/pkg/
.gopath/

276
.mailmap
View File

@@ -1,275 +1,19 @@
# Generate AUTHORS: hack/generate-authors.sh
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
# duplicates that aren't also email duplicates): scan the output of:
# git log --format='%aE - %aN' | sort -uf
#
# For explanation on this file format: man git-shortlog
Patrick Stapleton <github@gdi2290.com>
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
Erwin van der Koogh <info@erronis.nl>
Ahmed Kamal <email.ahmedkamal@googlemail.com>
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
Cristian Staretu <cristian.staretu@gmail.com>
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
Marcus Linke <marcus.linke@gmx.de>
Aleksandrs Fadins <aleks@s-ko.net>
Christopher Latham <sudosurootdev@gmail.com>
Hu Keping <hukeping@huawei.com>
Wayne Chang <wayne@neverfear.org>
Chen Chao <cc272309126@gmail.com>
Daehyeok Mun <daehyeok@gmail.com>
<daehyeok@gmail.com> <daehyeok@daehyeokui-MacBook-Air.local>
<jt@yadutaf.fr> <admin@jtlebi.fr>
<jeff@docker.com> <jefferya@programmerq.net>
<charles.hooper@dotcloud.com> <chooper@plumata.com>
# Generate AUTHORS: git log --all --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
<charles.hooper@dotcloud.com> <chooper@plumata.com>
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@docker.com>
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
<guillaume.charmes@docker.com> <guillaume@charmes.net>
Guillaume J. Charmes <guillaume.charmes@dotcloud.com> creack <charmes.guillaume@gmail.com>
<guillaume.charmes@dotcloud.com> <guillaume@dotcloud.com>
<kencochrane@gmail.com> <KenCochrane@gmail.com>
Thatcher Peskens <thatcher@docker.com>
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
<sridharr@activestate.com> <github@srid.name>
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@dotcloud.com>
Thatcher Peskens <thatcher@dotcloud.com> dhrp <thatcher@gmx.net>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
Joffrey F <joffrey@docker.com>
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
Joffrey F <joffrey@dotcloud.com>
<joffrey@dotcloud.com> <f.joffrey@gmail.com>
Tim Terhorst <mynamewastaken+git@gmail.com>
Andy Smith <github@anarkystic.com>
<kalessin@kalessin.fr> <louis@dotcloud.com>
<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
<victor.vieux@docker.com> <victor@dotcloud.com>
<victor.vieux@docker.com> <dev@vvieux.com>
<victor.vieux@docker.com> <victor@docker.com>
<victor.vieux@docker.com> <vieux@docker.com>
<victor.vieux@docker.com> <victorvieux@gmail.com>
<victor.vieux@dotcloud.com> <victor@dotcloud.com>
<dominik@honnef.co> <dominikh@fork-bomb.org>
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
Walter Stanish <walter@pratyeka.org>
<daniel@gasienica.ch> <dgasienica@zynga.com>
Roberto Hashioka <roberto_hashioka@hotmail.com>
Konstantin Pelykh <kpelykh@zettaset.com>
David Sissitka <me@dsissitka.com>
Nolan Darilek <nolan@thewordnerd.info>
<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
Benoit Chesneau <bchesneau@gmail.com>
Jordan Arentsen <blissdev@gmail.com>
Daniel Garcia <daniel@danielgarcia.info>
Miguel Angel Fernández <elmendalerenda@gmail.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
Faiz Khan <faizkhan00@gmail.com>
Victor Lyuboslavsky <victor@victoreda.com>
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
Matthew Mueller <mattmuelle@gmail.com>
<mosoni@ebay.com> <mohitsoni1989@gmail.com>
Shih-Yuan Lee <fourdollars@gmail.com>
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
<proppy@google.com> <proppy@aminche.com>
<michael@docker.com> <michael@crosbymichael.com>
<michael@docker.com> <crosby.michael@gmail.com>
<michael@docker.com> <crosbymichael@gmail.com>
<github@developersupport.net> <github@metaliveblog.com>
<brandon@ifup.org> <brandon@ifup.co>
<dano@spotify.com> <daniel.norberg@gmail.com>
<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
<shawn@churchofgit.com> <shawnlandden@gmail.com>
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
<solomon@docker.com> <solomon.hykes@dotcloud.com>
<solomon@docker.com> <solomon@dotcloud.com>
<solomon@docker.com> <s@docker.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
<alexl@redhat.com> <alexander.larsson@gmail.com>
Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
Alexander Morozov <lk4d4@docker.com>
<git.nivoc@neverbox.com> <kuehnle@online.de>
O.S. Tezer <ostezer@gmail.com>
<ostezer@gmail.com> <ostezer@users.noreply.github.com>
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
<justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
<taim@bosboot.org> <maztaim@users.noreply.github.com>
<viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
<vbatts@redhat.com> <vbatts@hashbangbash.com>
<altsysrq@gmail.com> <iamironbob@gmail.com>
Sridhar Ratnakumar <sridharr@activestate.com>
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
Liang-Chi Hsieh <viirya@gmail.com>
Aleksa Sarai <asarai@suse.de>
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
Will Weaver <monkey@buildingbananas.com>
Timothy Hobbs <timothyhobbs@seznam.cz>
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
<github@hollensbe.org> <erik+github@hollensbe.org>
<github@albersweb.de> <albers@users.noreply.github.com>
<lsm5@fedoraproject.org> <lsm5@redhat.com>
<marc@marc-abramowitz.com> <msabramo@gmail.com>
Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
<bernat@luffy.cx> <vincent@bernat.im>
<bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
<p@pwaller.net> <peter@scraperwiki.com>
<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
Francisco Carriedo <fcarriedo@gmail.com>
<julienbordellier@gmail.com> <git@julienbordellier.com>
<ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
<arnaud.porterie@docker.com> <icecrime@gmail.com>
<baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
Brian Goff <cpuguy83@gmail.com>
<cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
<eric@windisch.us> <ewindisch@docker.com>
<frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
Hollie Teal <hollie@docker.com>
<hollie@docker.com> <hollie.teal@docker.com>
<hollie@docker.com> <hollietealok@users.noreply.github.com>
<huu@prismskylabs.com> <whoshuu@gmail.com>
Jessica Frazelle <jessfraz@google.com>
Jessica Frazelle <jessfraz@google.com> <me@jessfraz.com>
Jessica Frazelle <jessfraz@google.com> <jess@mesosphere.com>
Jessica Frazelle <jessfraz@google.com> <jfrazelle@users.noreply.github.com>
Jessica Frazelle <jessfraz@google.com> <acidburn@docker.com>
Jessica Frazelle <jessfraz@google.com> <jess@docker.com>
Jessica Frazelle <jessfraz@google.com> <princess@docker.com>
<konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
<tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
<estesp@linux.vnet.ibm.com> <estesp@gmail.com>
<github@gone.nl> <thaJeztah@users.noreply.github.com>
Thomas LEVEIL <thomasleveil@gmail.com> Thomas LÉVEIL <thomasleveil@users.noreply.github.com>
<oi@truffles.me.uk> <timruffles@googlemail.com>
<Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@redhat.com>
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@linux.com>
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
Darren Shepherd <darren.s.shepherd@gmail.com> <darren@rancher.com>
Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
John Howard (VM) <John.Howard@microsoft.com>
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
mattyw <mattyw@me.com> <gh@mattyw.net>
resouer <resouer@163.com> <resouer@gmail.com>
AJ Bowen <aj@gandi.net> soulshake <amy@gandi.net>
AJ Bowen <aj@gandi.net> soulshake <aj@gandi.net>
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
Vincent Bernat <bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
Yestin Sun <sunyi0804@gmail.com> <yestin.sun@polyera.com>
bin liu <liubin0329@users.noreply.github.com> <liubin0329@gmail.com>
John Howard (VM) <John.Howard@microsoft.com> jhowardmsft <jhoward@microsoft.com>
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
Tangi COLIN <tangicolin@gmail.com> tangicolin <tangicolin@gmail.com>
Allen Sun <allen.sun@daocloud.io>
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
<aanm90@gmail.com> <martins@noironetworks.com>
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
Chander G <chandergovind@gmail.com>
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
Ying Li <cyli@twistedmatrix.com>
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
<dqminh@cloudflare.com> <dqminh89@gmail.com>
Daniel, Dao Quang Minh <dqminh@cloudflare.com>
Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
Doug Tangren <d.tangren@gmail.com>
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
Ben Golub <ben.golub@dotcloud.com>
Harold Cooper <hrldcpr@gmail.com>
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
Josh Hawn <josh.hawn@docker.com> <jlhawn@berkeley.edu>
Justin Cormack <justin.cormack@docker.com>
<justin.cormack@docker.com> <justin.cormack@unikernel.com>
<justin.cormack@docker.com> <justin@specialbusservice.com>
Kamil Domański <kamil@domanski.co>
Lei Jitang <leijitang@huawei.com>
<leijitang@huawei.com> <leijitang@gmail.com>
Linus Heckemann <lheckemann@twig-world.com>
<lheckemann@twig-world.com> <anonymouse2048@gmail.com>
Lynda O'Leary <lyndaoleary29@gmail.com>
<lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
Marianna Tessel <mtesselh@gmail.com>
Michael Huettermann <michael@huettermann.net>
Moysés Borges <moysesb@gmail.com>
<moysesb@gmail.com> <moyses.furtado@wplex.com.br>
Nigel Poulton <nigelpoulton@hotmail.com>
Qiang Huang <h.huangqiang@huawei.com>
<h.huangqiang@huawei.com> <qhuang@10.0.2.15>
Boaz Shuster <ripcurld.github@gmail.com>
Shuwei Hao <haosw@cn.ibm.com>
<haosw@cn.ibm.com> <haoshuwei24@gmail.com>
Soshi Katsuta <soshi.katsuta@gmail.com>
<soshi.katsuta@gmail.com> <katsuta_soshi@cyberagent.co.jp>
Stefan Berger <stefanb@linux.vnet.ibm.com>
<stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
Stephen Day <stephen.day@docker.com>
<stephen.day@docker.com> <stevvooe@users.noreply.github.com>
Toli Kuznets <toli@docker.com>
Tristan Carel <tristan@cogniteev.com>
<tristan@cogniteev.com> <tristan.carel@gmail.com>
Vincent Demeester <vincent@sbr.pm>
<vincent@sbr.pm> <vincent+github@demeester.fr>
Vishnu Kannan <vishnuk@google.com>
xlgao-zju <xlgao@zju.edu.cn> xlgao <xlgao@zju.edu.cn>
yuchangchun <yuchangchun1@huawei.com> y00277921 <yuchangchun1@huawei.com>
<zij@case.edu> <zjaffee@us.ibm.com>
<anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
<eungjun.yi@navercorp.com> <semtlenori@gmail.com>
<haosw@cn.ibm.com> <haoshuwei1989@163.com>
Hao Shu Wei <haosw@cn.ibm.com>
<matt.bentley@docker.com> <mbentley@mbentley.net>
<MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
<redmond.martin@gmail.com> <xgithub@redmond5.com>
<redmond.martin@gmail.com> <martin@tinychat.com>
<srbrahma@us.ibm.com> <sbrahma@us.ibm.com>
<suda.akihiro@lab.ntt.co.jp> <suda.kyoto@gmail.com>
<thomas@gazagnaire.org> <thomas@gazagnaire.com>
Shengbo Song <thomassong@tencent.com> mYmNeo <mymneo@163.com>
Shengbo Song <thomassong@tencent.com>
<sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
Sylvain Bellemare <sylvain@ascribe.io>
<alexandre.beslic@gmail.com> <abronan@docker.com>
<bilal.amarni@gmail.com> <bamarni@users.noreply.github.com>
<misty@docker.com> <misty@apache.org>
Arnaud Porterie <arnaud.porterie@docker.com>
<cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.local>
David M. Karr <davidmichaelkarr@gmail.com>
<diogo@docker.com> <diogo.monica@gmail.com>
<horwitz@addthis.com> <horwitzja@gmail.com>
<kherner@progress.com> <chosenken@gmail.com>
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
<mauricio@medallia.com> <mauriciogaravaglia@gmail.com>
<dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
<wkq5325@gmail.com> <wkqwu@cn.ibm.com>
<mike.goelzer@docker.com> <mgoelzer@docker.com>
<nicholasjamesrusso@gmail.com> <nicholasrusso@icloud.com>
Runshen Zhu <runshen.zhu@gmail.com>
Tom Barlow <tomwbarlow@gmail.com>
Xianlu Bird <xianlubird@gmail.com>
Dan Feldman <danf@jfrog.com>
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>

1625
AUTHORS

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,401 +1,93 @@
# Contributing to Docker
Want to hack on Docker? Awesome! We have a contributor's guide that explains
[setting up a Docker development environment and the contribution
process](https://docs.docker.com/opensource/project/who-written-for/).
Want to hack on Docker? Awesome! There are instructions to get you
started on the website: http://docker.io/gettingstarted.html
[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/)
They are probably not perfect, please let us know if anything feels
wrong or incomplete.
This page contains information about reporting issues as well as some tips and
guidelines useful to experienced open source contributors. Finally, make sure
you read our [community guidelines](#docker-community-guidelines) before you
start participating.
## Topics
* [Reporting Security Issues](#reporting-security-issues)
* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
* [Reporting Issues](#reporting-other-issues)
* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines)
* [Community Guidelines](#docker-community-guidelines)
## Reporting security issues
The Docker maintainers take security seriously. If you discover a security
issue, please bring it to their attention right away!
Please **DO NOT** file a public issue, instead send your report privately to
[security@docker.com](mailto:security@docker.com).
Security reports are greatly appreciated and we will publicly thank you for it.
We also like to send gifts&mdash;if you're into Docker schwag, make sure to let
us know. We currently do not offer a paid security bounty program, but are not
ruling it out in the future.
## Reporting other issues
A great way to contribute to the project is to send a detailed report when you
encounter an issue. We always appreciate a well-written, thorough bug report,
and will thank you for it!
Check that [our issue database](https://github.com/docker/docker/issues)
doesn't already include that problem or suggestion before submitting an issue.
If you find a match, you can use the "subscribe" button to get notified on
updates. Do *not* leave random "+1" or "I have this too" comments, as they
only clutter the discussion, and don't help resolving it. However, if you
have ways to reproduce the issue or have additional information that may help
resolving the issue, please leave a comment.
When reporting issues, always include:
* The output of `docker version`.
* The output of `docker info`.
Also include the steps required to reproduce the problem if possible and
applicable. This information will help us review and fix your issue faster.
When sending lengthy log-files, consider posting them as a gist (https://gist.github.com).
Don't forget to remove sensitive data from your logfiles before posting (you can
replace those parts with "REDACTED").
## Quick contribution tips and guidelines
This section gives the experienced contributor some tips and guidelines.
## Contribution guidelines
### Pull requests are always welcome
Not sure if that typo is worth a pull request? Found a bug and know how to fix
it? Do it! We will appreciate it. Any significant improvement should be
documented as [a GitHub issue](https://github.com/docker/docker/issues) before
anybody starts working on it.
We are always thrilled to receive pull requests, and do our best to
process them as fast as possible. Not sure if that typo is worth a pull
request? Do it! We will appreciate it.
We are always thrilled to receive pull requests. We do our best to process them
quickly. If your pull request is not accepted on the first try,
don't get discouraged! Our contributor's guide explains [the review process we
use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/).
If your pull request is not accepted on the first try, don't be
discouraged! If there's a problem with the implementation, hopefully you
received feedback on what to improve.
### Design and cleanup proposals
We're trying very hard to keep Docker lean and focused. We don't want it
to do everything for everybody. This means that we might decide against
incorporating a new feature. However, there might be a way to implement
that feature *on top of* docker.
You can propose new designs for existing Docker features. You can also design
entirely new features. We really appreciate contributors who want to refactor or
otherwise cleanup our project. For information on making these types of
contributions, see [the advanced contribution
section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in
the contributors guide.
### Discuss your design on the mailing list
We try hard to keep Docker lean and focused. Docker can't do everything for
everybody. This means that we might decide against incorporating a new feature.
However, there might be a way to implement that feature *on top of* Docker.
We recommend discussing your plans [on the mailing
list](https://groups.google.com/forum/?fromgroups#!forum/docker-club)
before starting to code - especially for more ambitious contributions.
This gives other contributors a chance to point you in the right
direction, give feedback on your design, and maybe point out if someone
else is working on the same thing.
### Talking to other Docker users and contributors
### Create issues...
<table class="tg">
<col width="45%">
<col width="65%">
<tr>
<td>Forums</td>
<td>
A public forum for users to discuss questions and explore current design patterns and
best practices about Docker and related projects in the Docker Ecosystem. To participate,
just log in with your Docker Hub account on <a href="https://forums.docker.com" target="_blank">https://forums.docker.com</a>.
</td>
</tr>
<tr>
<td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
<td>
<p>
IRC a direct line to our most knowledgeable Docker users; we have
both the <code>#docker</code> and <code>#docker-dev</code> group on
<strong>irc.freenode.net</strong>.
IRC is a rich chat protocol but it can overwhelm new users. You can search
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
</p>
<p>
Read our <a href="https://docs.docker.com/opensource/get-help/#irc-quickstart" target="_blank">IRC quickstart guide</a>
for an easy way to get started.
</p>
</td>
</tr>
<tr>
<td>Google Group</td>
<td>
The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a>
group is for contributors and other people contributing to the Docker project.
You can join them without a google account by sending an email to
<a href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
After receiving the join-request message, you can simply reply to that to confirm the subscription.
</td>
</tr>
<tr>
<td>Twitter</td>
<td>
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
to get updates on our products. You can also tweet us questions or just
share blogs or stories.
</td>
</tr>
<tr>
<td>Stack Overflow</td>
<td>
Stack Overflow has over 17000 Docker questions listed. We regularly
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
and so do many other knowledgeable Docker users.
</td>
</tr>
</table>
Any significant improvement should be documented as [a github
issue](https://github.com/dotcloud/docker/issues) before anybody
starts working on it.
### ...but check for existing issues first!
Please take a moment to check that an issue doesn't already exist
documenting your bug report or improvement proposal. If it does, it
never hurts to add a quick "+1" or "I have this problem too". This will
help prioritize the most common problems and requests.
### Conventions
Fork the repository and make changes on your fork in a feature branch:
Fork the repo and make changes on your fork in a feature branch:
- If it's a bug fix branch, name it XXXX-something where XXXX is the number of
the issue.
- If it's a feature branch, create an enhancement issue to announce
your intentions, and name it XXXX-something where XXXX is the number of the
issue.
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
issue
- If it's a feature branch, create an enhancement issue to announce your
intentions, and name it XXX-something where XXX is the number of the issue.
Submit unit tests for your changes. Go has a great test framework built in; use
it! Take a look at existing tests for inspiration. [Run the full test
suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before
submitting a pull request.
Submit unit tests for your changes. Go has a great test framework built in; use
it! Take a look at existing tests for inspiration. Run the full test suite on
your branch before submitting a pull request.
Update the documentation when creating or modifying features. Test your
documentation changes for clarity, concision, and correctness, as well as a
clean documentation build. See our contributors guide for [our style
guide](https://docs.docker.com/opensource/doc-style) and instructions on [building
the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation).
Make sure you include relevant updates or additions to documentation when
creating or modifying features.
Write clean code. Universally formatted code promotes ease of writing, reading,
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
committing your changes. Most editors have plug-ins that do this automatically.
and maintenance. Always run `go fmt` before committing your changes. Most
editors have plugins that do this automatically, and there's also a git
pre-commit hook:
Pull request descriptions should be as clear as possible and include a reference
to all the issues that they address.
```
curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit
```
Commit messages must start with a capitalized and short summary (max. 50 chars)
written in the imperative, followed by an optional, more detailed explanatory
text which is separated from the summary by an empty line.
Pull requests descriptions should be as clear as possible and include a
reference to all the issues that they address.
Code review comments may be added to your pull request. Discuss, then make the
suggested modifications and push additional commits to your feature branch. Post
a comment after pushing. New commits show up in the pull request automatically,
but the reviewers are notified only when you comment.
suggested modifications and push additional commits to your feature branch. Be
sure to post a comment after pushing. The new commits will show up in the pull
request automatically, but the reviewers will not be notified unless you
comment.
Pull requests must be cleanly rebased on top of master without multiple branches
mixed into the PR.
Before the pull request is merged, make sure that you squash your commits into
logical units of work using `git rebase -i` and `git push -f`. After every
commit the test suite should be passing. Include documentation changes in the
same commit so that a revert would remove all traces of the feature or fix.
**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
feature branch to update your pull request rather than `merge master`.
Commits that fix or close an issue should include a reference like `Closes #XXX`
or `Fixes #XXX`, which will automatically close the issue when merged.
Before you make a pull request, squash your commits into logical units of work
using `git rebase -i` and `git push -f`. A logical unit of work is a consistent
set of patches that should be reviewed together: for example, upgrading the
version of a vendored dependency and taking advantage of its now available new
feature constitute two separate units of work. Implementing a new function and
calling it in another file constitute a single logical unit of work. The very
high majority of submissions should have a single commit, so if in doubt: squash
down to one.
After every commit, [make sure the test suite passes]
(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation
changes in the same pull request so that a revert would remove all traces of
the feature or fix.
Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that
close an issue. Including references automatically closes the issue on a merge.
Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly
from the Git history.
Please see the [Coding Style](#coding-style) for further guidelines.
### Merge approval
Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to
indicate acceptance.
A change requires LGTMs from an absolute majority of the maintainers of each
component affected. For example, if a change affects `docs/` and `registry/`, it
needs an absolute majority from the maintainers of `docs/` AND, separately, an
absolute majority of the maintainers of `registry/`.
For more details, see the [MAINTAINERS](MAINTAINERS) page.
### Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
### How can I become a maintainer?
The procedures for adding new maintainers are explained in the
global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS)
file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/)
repository.
Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
maintainer to make a difference on the project!
## Docker community guidelines
We want to keep the Docker community awesome, growing and collaborative. We need
your help to keep it that way. To help with this we've come up with some general
guidelines for the community as a whole:
* Be nice: Be courteous, respectful and polite to fellow community members:
no regional, racial, gender, or other abuse will be tolerated. We like
nice people way better than mean ones!
* Encourage diversity and participation: Make everyone in our community feel
welcome, regardless of their background and the extent of their
contributions, and do everything possible to encourage participation in
our community.
* Keep it legal: Basically, don't get us in trouble. Share only content that
you own, do not share private or sensitive information, and don't break
the law.
* Stay on topic: Make sure that you are posting to the correct channel and
avoid off-topic discussions. Remember when you update an issue or respond
to an email you are potentially sending to a large number of people. Please
consider this before you update. Also remember that nobody likes spam.
* Don't send email to the maintainers: There's no need to send email to the
maintainers to ask them to investigate an issue or to take a look at a
pull request. Instead of sending an email, GitHub mentions should be
used to ping maintainers to review a pull request, a proposal or an
issue.
### Guideline violations — 3 strikes method
The point of this section is not to find opportunities to punish people, but we
do need a fair way to deal with people who are making our community suck.
1. First occurrence: We'll give you a friendly, but public reminder that the
behavior is inappropriate according to our guidelines.
2. Second occurrence: We will send you a private message with a warning that
any additional violations will result in removal from the community.
3. Third occurrence: Depending on the violation, we may need to delete or ban
your account.
**Notes:**
* Obvious spammers are banned on first occurrence. If we don't do this, we'll
have spam all over the place.
* Violations are forgiven after 6 months of good behavior, and we won't hold a
grudge.
* People who commit minor infractions will get some education, rather than
hammering them in the 3 strikes process.
* The rules apply equally to everyone in the community, no matter how much
you've contributed.
* Extreme violations of a threatening, abusive, destructive or illegal nature
will be addressed immediately and are not subject to 3 strikes or forgiveness.
* Contact abuse@docker.com to report abuse or appeal violations. In the case of
appeals, we know that mistakes happen, and we'll work with you to come up with a
fair solution if there has been a misunderstanding.
## Coding Style
Unless explicitly stated, we follow all coding guidelines from the Go
community. While some of these standards may seem arbitrary, they somehow seem
to result in a solid, consistent codebase.
It is possible that the code base does not currently comply with these
guidelines. We are not looking for a massive PR that fixes this, since that
goes against the spirit of the guidelines. All new contributions should make a
best effort to clean up and make the code base better than they left it.
Obviously, apply your best judgement. Remember, the goal here is to make the
code base easier for humans to navigate and understand. Always keep that in
mind when nudging others to comply.
The rules:
1. All code should be formatted with `gofmt -s`.
2. All code should pass the default levels of
[`golint`](https://github.com/golang/lint).
3. All code should follow the guidelines covered in [Effective
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
4. Comment the code. Tell us the why, the history and the context.
5. Document _all_ declarations and methods, even private ones. Declare
expectations, caveats and anything else that may be important. If a type
gets exported, having the comments already there will ensure it's ready.
6. Variable name length should be proportional to its context and no longer.
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
In practice, short methods will have short variable names and globals will
have longer names.
7. No underscores in package names. If you need a compound name, step back,
and re-examine why you need a compound name. If you still think you need a
compound name, lose the underscore.
8. No utils or helpers packages. If a function is not general enough to
warrant its own package, it has not been written generally enough to be a
part of a util package. Just leave it unexported and well-documented.
9. All tests should run with `go test` and outside tooling should not be
required. No, we don't need another unit testing framework. Assertion
packages are acceptable if they provide _real_ incremental value.
10. Even though we call these "rules" above, they are actually just
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
reading through [Effective Go](https://golang.org/doc/effective_go.html). The
[Go Blog](https://blog.golang.org) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.
Add your name to the AUTHORS file, but make sure the list is sorted and your
name and email address match your git configuration. The AUTHORS file is
regenerated occasionally from the git commit history, so a mismatch may result
in your changes being overwritten.

View File

@@ -1,246 +0,0 @@
# This file describes the standard way to build Docker, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# # Publish a release:
# docker run --privileged \
# -e AWS_S3_BUCKET=baz \
# -e AWS_ACCESS_KEY=foo \
# -e AWS_SECRET_KEY=bar \
# -e GPG_PASSPHRASE=gloubiboulga \
# docker hack/release.sh
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Add zfs ppa
RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 \
|| apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61
RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
apt-utils \
aufs-tools \
automake \
bash-completion \
binutils-mingw-w64 \
bsdmainutils \
btrfs-tools \
build-essential \
clang \
cmake \
createrepo \
curl \
dpkg-sig \
gcc-mingw-w64 \
git \
iptables \
jq \
libapparmor-dev \
libcap-dev \
libltdl-dev \
libnl-3-dev \
libprotobuf-c0-dev \
libprotobuf-dev \
libsqlite3-dev \
libsystemd-journal-dev \
libtool \
mercurial \
net-tools \
pkg-config \
protobuf-compiler \
protobuf-c-compiler \
python-dev \
python-mock \
python-pip \
python-websocket \
ubuntu-zfs \
xfsprogs \
vim-common \
libzfs-dev \
tar \
zip \
--no-install-recommends \
&& pip install awscli==1.10.15
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Configure the container for OSX cross compilation
ENV OSX_SDK MacOSX10.11.sdk
ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953
RUN set -x \
&& export OSXCROSS_PATH="/osxcross" \
&& git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \
&& ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \
&& curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \
&& UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh
ENV PATH /osxcross/target/bin:$PATH
# Install seccomp: the version shipped in trusty is too old
ENV SECCOMP_VERSION 2.3.1
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install Go
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
# with a heads-up.
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
# Compile Go for cross compilation
ENV DOCKER_CROSSPLATFORMS \
linux/386 linux/arm \
darwin/amd64 \
freebsd/amd64 freebsd/386 freebsd/arm \
windows/amd64 windows/386 \
solaris/amd64
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Install CRIU for checkpoint/restore support
ENV CRIU_VERSION 2.2
RUN mkdir -p /usr/src/criu \
&& curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \
&& cd /usr/src/criu \
&& make \
&& make install-criu
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.4.2
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Install yamllint for validating swagger.yaml
RUN pip install yamllint==1.5.0
# Install go-swagger for validating swagger.yaml
ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb
RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \
&& (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \
&& go install -v github.com/go-swagger/go-swagger/cmd/swagger
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Add integration helps to bashrc
RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \
busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \
debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \
hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View File

@@ -1,175 +0,0 @@
# This file describes the standard way to build Docker on aarch64, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker -f Dockerfile.aarch64 .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM aarch64/ubuntu:wily
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
aufs-tools \
automake \
bash-completion \
btrfs-tools \
build-essential \
cmake \
createrepo \
curl \
dpkg-sig \
g++ \
gcc \
git \
iptables \
jq \
libapparmor-dev \
libc6-dev \
libcap-dev \
libltdl-dev \
libsqlite3-dev \
libsystemd-dev \
mercurial \
net-tools \
parallel \
pkg-config \
python-dev \
python-mock \
python-pip \
python-websocket \
gccgo \
iproute2 \
iputils-ping \
vim-common \
--no-install-recommends
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Fix platform enablement in lvm2 to support aarch64 properly
RUN set -e \
&& for f in config.guess config.sub; do \
curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \
done
# "arch.c:78:2: error: #error the arch code needs to know about your machine type"
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install seccomp: the version shipped in trusty is too old
ENV SECCOMP_VERSION 2.3.1
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install Go
# We don't have official binary tarballs for ARM64, eigher for Go or bootstrap,
# so we use gccgo as bootstrap to build Go from source code.
# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because
# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8.
ENV GO_VERSION 1.7.5
RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
&& cd /usr/src/go/src \
&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
ENV PATH /usr/src/go/bin:$PATH
ENV GOPATH /go
# Only install one version of the registry, because old version which support
# schema1 manifests is not working on ARM64, we should skip integration-cli
# tests for schema1 manifests on ARM64.
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.4.2
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \
aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \
aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \
aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View File

@@ -1,182 +0,0 @@
# This file describes the standard way to build Docker on ARMv7, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker -f Dockerfile.armhf .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM armhf/debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
aufs-tools \
automake \
bash-completion \
btrfs-tools \
build-essential \
createrepo \
curl \
cmake \
dpkg-sig \
git \
iptables \
jq \
net-tools \
libapparmor-dev \
libcap-dev \
libltdl-dev \
libsqlite3-dev \
libsystemd-journal-dev \
libtool \
mercurial \
pkg-config \
python-dev \
python-mock \
python-pip \
python-websocket \
xfsprogs \
tar \
vim-common \
--no-install-recommends \
&& pip install awscli==1.10.15
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install Go
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
# We're building for armhf, which is ARMv7, so let's be explicit about that
ENV GOARCH arm
ENV GOARM 7
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Install seccomp: the version shipped in trusty is too old
ENV SECCOMP_VERSION 2.3.1
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.4.2
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \
armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \
armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \
armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View File

@@ -1,188 +0,0 @@
# This file describes the standard way to build Docker on ppc64le, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker -f Dockerfile.ppc64le .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM ppc64le/debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
aufs-tools \
automake \
bash-completion \
btrfs-tools \
build-essential \
cmake \
createrepo \
curl \
dpkg-sig \
git \
iptables \
jq \
net-tools \
libapparmor-dev \
libcap-dev \
libltdl-dev \
libsqlite3-dev \
libsystemd-journal-dev \
libtool \
mercurial \
pkg-config \
python-dev \
python-mock \
python-pip \
python-websocket \
xfsprogs \
tar \
vim-common \
--no-install-recommends
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Fix platform enablement in lvm2 to support ppc64le properly
RUN set -e \
&& for f in config.guess config.sub; do \
curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \
done
# "arch.c:78:2: error: #error the arch code needs to know about your machine type"
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install seccomp: the version shipped in jessie is too old
ENV SECCOMP_VERSION 2.3.1
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install Go
# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.4.2
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \
ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \
ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \
ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View File

@@ -1,190 +0,0 @@
# This file describes the standard way to build Docker on s390x, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker -f Dockerfile.s390x .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM s390x/gcc:6.1
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
aufs-tools \
automake \
bash-completion \
btrfs-tools \
build-essential \
cmake \
createrepo \
curl \
dpkg-sig \
git \
iptables \
jq \
net-tools \
libapparmor-dev \
libcap-dev \
libltdl-dev \
libsqlite3-dev \
libsystemd-journal-dev \
libtool \
mercurial \
pkg-config \
python-dev \
python-mock \
python-pip \
python-websocket \
xfsprogs \
tar \
vim-common \
--no-install-recommends
# glibc in Debian has a bug specific to s390x that won't be fixed until Debian 8.6 is released
# - https://github.com/docker/docker/issues/24748
# - https://sourceware.org/git/?p=glibc.git;a=commit;h=890b7a4b33d482b5c768ab47d70758b80227e9bc
# - https://sourceware.org/git/?p=glibc.git;a=commit;h=2e807f29595eb5b1e5d0decc6e356a3562ecc58e
RUN echo 'deb http://httpredir.debian.org/debian jessie-proposed-updates main' >> /etc/apt/sources.list.d/pu.list \
&& apt-get update \
&& apt-get install -y libc6 \
&& rm -rf /var/lib/apt/lists/*
# Install seccomp: the version shipped in jessie is too old
ENV SECCOMP_VERSION 2.3.1
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Fix platform enablement in lvm2 to support s390x properly
RUN set -e \
&& for f in config.guess config.sub; do \
curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \
done
# "arch.c:78:2: error: #error the arch code needs to know about your machine type"
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.4.2
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor selinux seccomp
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \
s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \
s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \
s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View File

@@ -1,73 +0,0 @@
# docker build -t docker:simple -f Dockerfile.simple .
# docker run --rm docker:simple hack/make.sh dynbinary
# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit
# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli
# This represents the bare minimum required to build and test Docker.
FROM debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Compile and runtime deps
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
btrfs-tools \
build-essential \
curl \
cmake \
gcc \
git \
libapparmor-dev \
libdevmapper-dev \
libsqlite3-dev \
\
ca-certificates \
e2fsprogs \
iptables \
procps \
xfsprogs \
xz-utils \
\
aufs-tools \
vim-common \
&& rm -rf /var/lib/apt/lists/*
# Install seccomp: the version shipped in trusty is too old
ENV SECCOMP_VERSION 2.3.1
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install Go
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
# with a heads-up.
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
ENV CGO_LDFLAGS -L/lib
# Install runc, containerd, tini and docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh runc containerd tini proxy
ENV AUTO_GOPATH 1
WORKDIR /usr/src/docker
COPY . /usr/src/docker

View File

@@ -1,20 +0,0 @@
# Defines an image that hosts a native Docker build environment for Solaris
# TODO: Improve stub
FROM solaris:latest
# compile and runtime deps
RUN pkg install --accept \
git \
gnu-coreutils \
gnu-make \
gnu-tar \
diagnostic/top \
golang \
library/golang/* \
developer/gcc-*
ENV GOPATH /go/:/usr/lib/gocode/1.5/
ENV DOCKER_CROSSPLATFORMS solaris/amd64
WORKDIR /go/src/github.com/docker/docker
COPY . /go/src/github.com/docker/docker

View File

@@ -1,267 +0,0 @@
# escape=`
# -----------------------------------------------------------------------------------------
# This file describes the standard way to build Docker in a container on Windows
# Server 2016 or Windows 10.
#
# Maintainer: @jhowardmsft
# -----------------------------------------------------------------------------------------
# Prerequisites:
# --------------
#
# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major
# build number must be at least 14393. This can be confirmed, for example, by
# running the following from an elevated PowerShell prompt - this sample output
# is from a fully up to date machine as at mid-November 2016:
#
# >> PS C:\> $(gin).WindowsBuildLabEx
# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100
#
# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win.
#
# 3. The machine must be configured to run containers. For example, by following
# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or
# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md
#
# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server
# containers as the default option, it is recommended you have at least 1GB
# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you
# should have at least 4GB of memory assigned. Note also, to run Hyper-V
# containers in a VM, it is necessary to configure the VM for nested virtualization.
# -----------------------------------------------------------------------------------------
# Usage:
# -----
#
# The following steps should be run from an (elevated*) Windows PowerShell prompt.
#
# (*In a default installation of containers on Windows following the quick-start guidance at
# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start,
# the docker.exe client must run elevated to be able to connect to the daemon).
#
# 1. Clone the sources from github.com:
#
# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker
# >> Cloning into 'C:\go\src\github.com\docker\docker'...
# >> remote: Counting objects: 186216, done.
# >> remote: Compressing objects: 100% (21/21), done.
# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195
# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done.
# >> Resolving deltas: 100% (123139/123139), done.
# >> Checking connectivity... done.
# >> Checking out files: 100% (3912/3912), done.
# >> PS C:\>
#
#
# 2. Change directory to the cloned docker sources:
#
# >> cd C:\go\src\github.com\docker\docker
#
#
# 3. Build a docker image with the components required to build the docker binaries from source
# by running one of the following:
#
# >> docker build -t nativebuildimage -f Dockerfile.windows .
# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers)
#
#
# 4. Build the docker executable binaries by running one of the following:
#
# >> docker run --name binaries nativebuildimage hack\make.ps1 -Binary
# >> docker run --name binaries -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers)
#
#
# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination
# folder on the host system where you want the binaries to be located.
#
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
#
#
# 6. (Optional) Remove the interim container holding the built executable binaries:
#
# >> docker rm binaries
#
#
# 7. (Optional) Remove the image used for the container in which the executable
# binaries are build. Tip - it may be useful to keep this image around if you need to
# build multiple times. Then you can take advantage of the builder cache to have an
# image which has all the components required to build the binaries already installed.
#
# >> docker rmi nativebuildimage
#
# -----------------------------------------------------------------------------------------
# The validation tests can either run in a container, or directly on the host. To run in a
# container, ensure you have created the nativebuildimage above. Then run one of the
# following from an (elevated) Windows PowerShell prompt:
#
# >> docker run --rm nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat
# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat (if using Hyper-V containers)
# To run the validation tests on the host, from the root of the repository, run the
# following from a Windows PowerShell prompt (elevation is not required): (Note Go
# must be installed to run these tests)
#
# >> hack\make.ps1 -DCO -PkgImports -GoFormat
# -----------------------------------------------------------------------------------------
# To run unit tests, ensure you have created the nativebuildimage above. Then run one of
# the following from an (elevated) Windows PowerShell prompt:
#
# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit
# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers)
# -----------------------------------------------------------------------------------------
# To run all tests and binary build, ensure you have created the nativebuildimage above. Then
# run one of the following from an (elevated) Windows PowerShell prompt:
#
# >> docker run nativebuildimage hack\make.ps1 -All
# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers)
# -----------------------------------------------------------------------------------------
# Important notes:
# ---------------
#
# Don't attempt to use a bind-mount to pass a local directory as the bundles target
# directory. It does not work (golang attempts for follow a mapped folder incorrectly).
# Instead, use docker cp as per the example.
#
# go.zip is not removed from the image as it is used by the Windows CI servers
# to ensure the host and image are running consistent versions of go.
#
# Nanoserver support is a work in progress. Although the image will build if the
# FROM statement is updated, it will not work when running autogen through hack\make.ps1.
# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently
# quit due to the use of console hooks which are not available.
#
# The docker integration tests do not currently run in a container on Windows, predominantly
# due to Windows not supporting privileged mode, so anything using a volume would fail.
# They (along with the rest of the docker CI suite) can be run using
# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
#
# -----------------------------------------------------------------------------------------
# The number of build steps below are explicitly minimised to improve performance.
FROM microsoft/windowsservercore
# Use PowerShell as the default shell
SHELL ["powershell", "-command"]
# Environment variable notes:
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
# - FROM_DOCKERFILE is used for detection of building within a container.
ENV GO_VERSION=1.7.5 `
GIT_VERSION=2.11.0 `
GOPATH=C:\go `
FROM_DOCKERFILE=1
RUN `
$ErrorActionPreference = 'Stop'; `
$ProgressPreference = 'SilentlyContinue'; `
`
Function Test-Nano() { `
$EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; `
return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); `
}`
`
Function Download-File([string] $source, [string] $target) { `
if (Test-Nano) { `
$handler = New-Object System.Net.Http.HttpClientHandler; `
$client = New-Object System.Net.Http.HttpClient($handler); `
$client.Timeout = New-Object System.TimeSpan(0, 30, 0); `
$cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); `
$responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); `
$responseMsg.Wait(); `
if (!$responseMsg.IsCanceled) { `
$response = $responseMsg.Result; `
if ($response.IsSuccessStatusCode) { `
$downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); `
$copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); `
$copyStreamOp.Wait(); `
$downloadedFileStream.Close(); `
if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } `
} `
} else { `
Throw ("Failed to download " + $source) `
}`
} else { `
$webClient = New-Object System.Net.WebClient; `
$webClient.DownloadFile($source, $target); `
} `
} `
`
setx /M PATH $('C:\git\bin;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); `
`
Write-Host INFO: Downloading git...; `
$location='https://github.com/git-for-windows/git/releases/download/v'+$env:GIT_VERSION+'.windows.1/PortableGit-'+$env:GIT_VERSION+'-64-bit.7z.exe'; `
Download-File $location C:\gitsetup.7z.exe; `
`
Write-Host INFO: Downloading go...; `
Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; `
`
Write-Host INFO: Downloading compiler 1 of 3...; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
`
Write-Host INFO: Downloading compiler 2 of 3...; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
`
Write-Host INFO: Downloading compiler 3 of 3...; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
`
Write-Host INFO: Installing PS7Zip package...; `
Install-Package PS7Zip -Force | Out-Null; `
Write-Host INFO: Importing PS7Zip...; `
Import-Module PS7Zip -Force; `
New-Item C:\git -ItemType Directory | Out-Null ; `
cd C:\git; `
Write-Host INFO: Extracting git...; `
Expand-7Zip C:\gitsetup.7z.exe | Out-Null; `
cd C:\; `
`
Write-Host INFO: Expanding go...; `
Expand-Archive C:\go.zip -DestinationPath C:\; `
`
Write-Host INFO: Expanding compiler 1 of 3...; `
Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; `
Write-Host INFO: Expanding compiler 2 of 3...; `
Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; `
Write-Host INFO: Expanding compiler 3 of 3...; `
Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; `
`
Write-Host INFO: Removing downloaded files...; `
Remove-Item C:\gcc.zip; `
Remove-Item C:\runtime.zip; `
Remove-Item C:\binutils.zip; `
Remove-Item C:\gitsetup.7z.exe; `
`
Write-Host INFO: Creating source directory...; `
New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; `
`
Write-Host INFO: Configuring git core.autocrlf...; `
C:\git\bin\git config --global core.autocrlf true; `
`
Write-Host INFO: Completed
# Make PowerShell the default entrypoint
ENTRYPOINT ["powershell.exe"]
# Set the working directory to the location of the sources
WORKDIR C:\go\src\github.com\docker\docker
# Copy the sources into the container
COPY . .

17
LICENSE
View File

@@ -1,7 +1,7 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@@ -176,13 +176,24 @@
END OF TERMS AND CONDITIONS
Copyright 2013-2016 Docker, Inc.
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -1,376 +0,0 @@
# Docker maintainers file
#
# This file describes who runs the docker/docker project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
# The Core maintainers are the ghostbusters of the project: when there's a problem others
# can't solve, they show up and fix it with bizarre devices and weaponry.
# They have final say on technical implementation and coding style.
# They are ultimately responsible for quality in all its forms: usability polish,
# bugfixes, performance, stability, etc. When ownership can cleanly be passed to
# a subsystem, they are responsible for doing so and holding the
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
# For each release (including minor releases), a "release captain" is assigned from the
# pool of core maintainers. Rotation is encouraged across all maintainers, to ensure
# the release process is clear and up-to-date.
people = [
"aaronlehmann",
"akihirosuda",
"aluzzardi",
"anusha",
"coolljt0725",
"cpuguy83",
"crosbymichael",
"dnephin",
"duglin",
"estesp",
"icecrime",
"jhowardmsft",
"justincormack",
"lk4d4",
"mavenugo",
"mhbauer",
"mlaventure",
"mrjana",
"runcom",
"stevvooe",
"tianon",
"tibor",
"tonistiigi",
"unclejack",
"vdemeester",
"vieux"
]
[Org."Docs maintainers"]
# TODO Describe the docs maintainers role.
people = [
"jamtur01",
"misty",
"sven",
"thajeztah"
]
[Org.Curators]
# The curators help ensure that incoming issues and pull requests are properly triaged and
# that our various contribution and reviewing processes are respected. With their knowledge of
# the repository activity, they can also guide contributors to relevant material or
# discussions.
#
# They are neither code nor docs reviewers, so they are never expected to merge. They can
# however:
# - close an issue or pull request when it's an exact duplicate
# - close an issue or pull request when it's inappropriate or off-topic
people = [
"aboch",
"andrewhsu",
"ehazlett",
"mgoelzer",
"programmerq",
"thajeztah"
]
[Org.Alumni]
# This list contains maintainers that are no longer active on the project.
# It is thanks to these people that the project has become what it is today.
# Thank you!
people = [
# David Calavera contributed many features to Docker, such as an improved
# event system, dynamic configuration reloading, volume plugins, fancy
# new templating options, and an external client credential store. As a
# maintainer, David was release captain for Docker 1.8, and competing
# with Jess Frazelle to be "top dream killer".
# David is now doing amazing stuff as CTO for https://www.netlify.com,
# and tweets as @calavera.
"calavera",
# As a maintainer, Erik was responsible for the "builder", and
# started the first designs for the new networking model in
# Docker. Erik is now working on all kinds of plugins for Docker
# (https://github.com/contiv) and various open source projects
# in his own repository https://github.com/erikh. You may
# still stumble into him in our issue tracker, or on IRC.
"erikh",
# Jessica Frazelle, also known as the "Keyser Söze of containers",
# runs *everything* in containers. She started contributing to
# Docker with a (fun fun) change involving both iptables and regular
# expressions (coz, YOLO!) on July 10, 2014
# https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995.
# Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed
# many features and improvement, among which "seccomp profiles" (making
# containers a lot more secure). Besides being a maintainer, she
# set up the CI infrastructure for the project, giving everyone
# something to shout at if a PR failed ("noooo Janky!").
# Jess is currently working on the DCOS security team at Mesosphere,
# and contributing to various open source projects.
# Be sure you don't miss her talks at a conference near you (a must-see),
# read her blog at https://blog.jessfraz.com (a must-read), and
# check out her open source projects on GitHub https://github.com/jessfraz (a must-try).
"jessfraz",
# As a docs maintainer, Mary Anthony contributed greatly to the Docker
# docs. She wrote the Docker Contributor Guide and Getting Started
# Guides. She helped create a doc build system independent of
# docker/docker project, and implemented a new docs.docker.com theme and
# nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub
# public repositories was originally referenced in
# maryatdocker/docker-whale back in May 2015.
"moxiegirl",
# Vincent "vbatts!" Batts made his first contribution to the project
# in November 2013, to become a maintainer a few months later, on
# May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778).
# As a maintainer, Vincent made important contributions to core elements
# of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper).
# He also contributed the "tar-split" library, an important element
# for the content-addressable store.
# Vincent is currently a member of the Open Containers Initiative
# Technical Oversight Board (TOB), besides his work at Red Hat and
# Project Atomic. You can still find him regularly hanging out in
# our repository and the #docker-dev and #docker-maintainers IRC channels
# for a chat, as he's always a lot of fun.
"vbatts",
# Vishnu became a maintainer to help out on the daemon codebase and
# libcontainer integration. He's currently involved in the
# Open Containers Initiative, working on the specifications,
# besides his work on cAdvisor and Kubernetes for Google.
"vishh"
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.aaronlehmann]
Name = "Aaron Lehmann"
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.aboch]
Name = "Alessandro Boch"
Email = "aboch@docker.com"
GitHub = "aboch"
[people.akihirosuda]
Name = "Akihiro Suda"
Email = "suda.akihiro@lab.ntt.co.jp"
GitHub = "AkihiroSuda"
[people.aluzzardi]
Name = "Andrea Luzzardi"
Email = "al@docker.com"
GitHub = "aluzzardi"
[people.andrewhsu]
Name = "Andrew Hsu"
Email = "andrewhsu@docker.com"
GitHub = "andrewhsu"
[people.anusha]
Name = "Anusha Ragunathan"
Email = "anusha@docker.com"
GitHub = "anusha-ragunathan"
[people.calavera]
Name = "David Calavera"
Email = "david.calavera@gmail.com"
GitHub = "calavera"
[people.coolljt0725]
Name = "Lei Jitang"
Email = "leijitang@huawei.com"
GitHub = "coolljt0725"
[people.cpuguy83]
Name = "Brian Goff"
Email = "cpuguy83@gmail.com"
Github = "cpuguy83"
[people.crosbymichael]
Name = "Michael Crosby"
Email = "crosbymichael@gmail.com"
GitHub = "crosbymichael"
[people.dnephin]
Name = "Daniel Nephin"
Email = "dnephin@gmail.com"
GitHub = "dnephin"
[people.duglin]
Name = "Doug Davis"
Email = "dug@us.ibm.com"
GitHub = "duglin"
[people.ehazlett]
Name = "Evan Hazlett"
Email = "ejhazlett@gmail.com"
GitHub = "ehazlett"
[people.erikh]
Name = "Erik Hollensbe"
Email = "erik@docker.com"
GitHub = "erikh"
[people.estesp]
Name = "Phil Estes"
Email = "estesp@linux.vnet.ibm.com"
GitHub = "estesp"
[people.icecrime]
Name = "Arnaud Porterie"
Email = "arnaud@docker.com"
GitHub = "icecrime"
[people.jamtur01]
Name = "James Turnbull"
Email = "james@lovedthanlost.net"
GitHub = "jamtur01"
[people.jhowardmsft]
Name = "John Howard"
Email = "jhoward@microsoft.com"
GitHub = "jhowardmsft"
[people.jessfraz]
Name = "Jessie Frazelle"
Email = "jess@linux.com"
GitHub = "jessfraz"
[people.justincormack]
Name = "Justin Cormack"
Email = "justin.cormack@docker.com"
GitHub = "justincormack"
[people.lk4d4]
Name = "Alexander Morozov"
Email = "lk4d4@docker.com"
GitHub = "lk4d4"
[people.mavenugo]
Name = "Madhu Venugopal"
Email = "madhu@docker.com"
GitHub = "mavenugo"
[people.mgoelzer]
Name = "Mike Goelzer"
Email = "mike.goelzer@docker.com"
GitHub = "mgoelzer"
[people.mhbauer]
Name = "Morgan Bauer"
Email = "mbauer@us.ibm.com"
GitHub = "mhbauer"
[people.misty]
Name = "Misty Stanley-Jones"
Email = "misty@docker.com"
GitHub = "mstanleyjones"
[people.mlaventure]
Name = "Kenfe-Mickaël Laventure"
Email = "mickael.laventure@docker.com"
GitHub = "mlaventure"
[people.moxiegirl]
Name = "Mary Anthony"
Email = "mary.anthony@docker.com"
GitHub = "moxiegirl"
[people.mrjana]
Name = "Jana Radhakrishnan"
Email = "mrjana@docker.com"
GitHub = "mrjana"
[people.programmerq]
Name = "Jeff Anderson"
Email = "jeff@docker.com"
GitHub = "programmerq"
[people.runcom]
Name = "Antonio Murdaca"
Email = "runcom@redhat.com"
GitHub = "runcom"
[people.shykes]
Name = "Solomon Hykes"
Email = "solomon@docker.com"
GitHub = "shykes"
[people.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"
[people.sven]
Name = "Sven Dowideit"
Email = "SvenDowideit@home.org.au"
GitHub = "SvenDowideit"
[people.thajeztah]
Name = "Sebastiaan van Stijn"
Email = "github@gone.nl"
GitHub = "thaJeztah"
[people.tianon]
Name = "Tianon Gravi"
Email = "admwiggin@gmail.com"
GitHub = "tianon"
[people.tibor]
Name = "Tibor Vass"
Email = "tibor@docker.com"
GitHub = "tiborvass"
[people.tonistiigi]
Name = "Tõnis Tiigi"
Email = "tonis@docker.com"
GitHub = "tonistiigi"
[people.unclejack]
Name = "Cristian Staretu"
Email = "cristian.staretu@gmail.com"
GitHub = "unclejack"
[people.vbatts]
Name = "Vincent Batts"
Email = "vbatts@redhat.com"
GitHub = "vbatts"
[people.vdemeester]
Name = "Vincent Demeester"
Email = "vincent@sbr.pm"
GitHub = "vdemeester"
[people.vieux]
Name = "Victor Vieux"
Email = "vieux@docker.com"
GitHub = "vieux"
[people.vishh]
Name = "Vishnu Kannan"
Email = "vishnuk@google.com"
GitHub = "vishh"

183
Makefile
View File

@@ -1,147 +1,78 @@
.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win
DOCKER_PACKAGE := github.com/dotcloud/docker
RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
SRCRELEASE := docker-$(RELEASE_VERSION)
BINRELEASE := docker-$(RELEASE_VERSION).tgz
# set the graph driver as the current graphdriver if not set
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
GIT_ROOT := $(shell git rev-parse --show-toplevel)
BUILD_DIR := $(CURDIR)/.gopath
# get OS/Arch of docker engine
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}')
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
GOPATH ?= $(BUILD_DIR)
export GOPATH
# env vars passed through directly to Docker's build scripts
# to allow things like `make KEEPBUNDLE=1 binary` easily
# `project/PACKAGERS.md` have some limited documentation of some of these
DOCKER_ENVS := \
-e BUILD_APT_MIRROR \
-e BUILDFLAGS \
-e KEEPBUNDLE \
-e DOCKER_BUILD_ARGS \
-e DOCKER_BUILD_GOGC \
-e DOCKER_BUILD_PKGS \
-e DOCKER_DEBUG \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT \
-e DOCKER_GRAPHDRIVER=$(DOCKER_GRAPHDRIVER) \
-e DOCKER_INCREMENTAL_BINARY \
-e DOCKER_PORT \
-e DOCKER_REMAP_ROOT \
-e DOCKER_STORAGE_OPTS \
-e DOCKER_USERLANDPROXY \
-e TESTDIRS \
-e TESTFLAGS \
-e TIMEOUT \
-e HTTP_PROXY \
-e HTTPS_PROXY \
-e NO_PROXY \
-e http_proxy \
-e https_proxy \
-e no_proxy
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
# (default to no bind mount if DOCKER_HOST is set)
# note: BINDDIR is supported for backwards-compatibility here
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
# The volume will be cleaned up when the container is removed due to `--rm`.
# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles)
# enable .go-pkg-cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set
PKGCACHE_DIR := $(if $(PKGCACHE_DIR),$(PKGCACHE_DIR),.go-pkg-cache)
PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(CURDIR)/$(PKGCACHE_DIR)/\1"@g'),$(DOCKER_MOUNT))
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD)
BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR))
export BUILD_APT_MIRROR
# if this session isn't interactive, then we don't want to allocate a
# TTY, which would fail, but if it is interactive, we do want to attach
# so that the user can send e.g. ^C through.
INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0)
ifeq ($(INTERACTIVE), 1)
DOCKER_FLAGS += -t
GO_OPTIONS ?=
ifeq ($(VERBOSE), 1)
GO_OPTIONS += -v
endif
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
GIT_COMMIT = $(shell git rev-parse --short HEAD)
GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
default: binary
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
SRC_DIR := $(GOPATH)/src
binary: build ## build the linux binaries
$(DOCKER_RUN_DOCKER) hack/make.sh binary
DOCKER_DIR := $(SRC_DIR)/$(DOCKER_PACKAGE)
DOCKER_MAIN := $(DOCKER_DIR)/docker
build: bundles init-go-pkg-cache
docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
DOCKER_BIN_RELATIVE := bin/docker
DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
bundles:
mkdir bundles
.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
cross: build ## cross build the binaries for darwin, freebsd and\nwindows
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
all: $(DOCKER_BIN)
deb: build ## build the deb packages
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb
$(DOCKER_BIN): $(DOCKER_DIR)
@mkdir -p $(dir $@)
@(cd $(DOCKER_MAIN); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
@echo $(DOCKER_BIN_RELATIVE) is created.
$(DOCKER_DIR):
@mkdir -p $(dir $@)
@rm -f $@
@ln -sf $(CURDIR)/ $@
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS))
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
whichrelease:
echo $(RELEASE_VERSION)
init-go-pkg-cache:
mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g')
release: $(BINRELEASE)
srcrelease: $(SRCRELEASE)
deps: $(DOCKER_DIR)
install: ## install the linux binaries
KEEPBUNDLE=1 hack/make.sh install-binary
# A clean checkout of $RELEASE_VERSION, with vendored dependencies
$(SRCRELEASE):
rm -fr $(SRCRELEASE)
git clone $(GIT_ROOT) $(SRCRELEASE)
cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
manpages: ## Generate man pages from go source and markdown
docker build -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man
docker run --rm \
-v $(PWD):/go/src/github.com/docker/docker/ \
docker-manpage-dev
# A binary release ready to be uploaded to a mirror
$(BINRELEASE): $(SRCRELEASE)
rm -f $(BINRELEASE)
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
rpm: build ## build the rpm packages
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm
clean:
@rm -rf $(dir $(DOCKER_BIN))
ifeq ($(GOPATH), $(BUILD_DIR))
@rm -rf $(BUILD_DIR)
else ifneq ($(DOCKER_DIR), $(realpath $(DOCKER_DIR)))
@rm -f $(DOCKER_DIR)
endif
run: build ## run the docker daemon in a container
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
test: all
@(cd $(DOCKER_DIR); sudo -E go test $(GO_OPTIONS))
shell: build ## start a shell inside the build env
$(DOCKER_RUN_DOCKER) bash
fmt:
@gofmt -s -l -w .
test: build ## run the unit, integration and docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py
test-docker-py: build ## run the docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
test-integration-cli: build ## run the integration tests
$(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary test-integration-cli
test-unit: build ## run the unit tests
$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
$(DOCKER_RUN_DOCKER) hack/validate/all
win: build ## cross build the binary for windows
$(DOCKER_RUN_DOCKER) hack/make.sh win
.PHONY: swagger-gen
swagger-gen:
docker run --rm -v $(PWD):/go/src/github.com/docker/docker \
-w /go/src/github.com/docker/docker \
--entrypoint hack/generate-swagger-api.sh \
-e GOPATH=/go \
quay.io/goswagger/swagger:0.7.4
hack:
cd $(CURDIR)/buildbot && vagrant up

19
NOTICE
View File

@@ -1,19 +1,6 @@
Docker
Copyright 2012-2016 Docker, Inc.
Copyright 2012-2013 dotCloud, inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product includes software developed at dotCloud, inc. (http://www.dotcloud.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License.

541
README.md
View File

@@ -1,304 +1,317 @@
Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest)
============================
Docker: the Linux container runtime
===================================
Docker is an open source project to pack, ship and run any application
as a lightweight container.
Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
This means they can run anywhere, from your laptop to the largest
cloud compute instance and everything in between - and they don't require
you to use a particular language, framework or packaging system. That
makes them great building blocks for deploying and scaling web apps,
databases, and backend services without depending on a particular stack
or provider.
Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
Docker began as an open-source implementation of the deployment engine which
powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/),
a popular Platform-as-a-Service. It benefits directly from the experience
accumulated over several years of large-scale operation and support of hundreds
of thousands of applications and databases.
![Docker L](docs/sources/static_files/lego_docker.jpg "Docker")
![Docker logo](docs/static_files/docker-logo-compressed.png "Docker")
* *Heterogeneous payloads*: any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
## Security Disclosure
* *Any server*: docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
Security is very important to us. If you have any issue regarding security,
please disclose the information responsibly by sending an email to
security@docker.com and not by creating a GitHub issue.
* *Isolation*: docker isolates processes from each other and from the underlying host, using lightweight containers.
## Better than VMs
A common method for distributing applications and sandboxing their
execution is to use virtual machines, or VMs. Typical VM formats are
VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory
these formats should allow every developer to automatically package
their application into a "machine" for easy distribution and deployment.
In practice, that almost never happens, for a few reasons:
* *Size*: VMs are very large which makes them impractical to store
and transfer.
* *Performance*: running VMs consumes significant CPU and memory,
which makes them impractical in many scenarios, for example local
development of multi-tier applications, and large-scale deployment
of cpu and memory-intensive applications on large numbers of
machines.
* *Portability*: competing VM environments don't play well with each
other. Although conversion tools do exist, they are limited and
add even more overhead.
* *Hardware-centric*: VMs were designed with machine operators in
mind, not software developers. As a result, they offer very
limited tooling for what developers need most: building, testing
and running their software. For example, VMs offer no facilities
for application versioning, monitoring, configuration, logging or
service discovery.
By contrast, Docker relies on a different sandboxing method known as
*containerization*. Unlike traditional virtualization, containerization
takes place at the kernel level. Most modern operating system kernels
now support the primitives necessary for containerization, including
Linux with [openvz](https://openvz.org),
[vserver](http://linux-vserver.org) and more recently
[lxc](https://linuxcontainers.org/), Solaris with
[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
and FreeBSD with
[Jails](https://www.freebsd.org/doc/handbook/jails.html).
Docker builds on top of these low-level primitives to offer developers a
portable format and runtime environment that solves all four problems.
Docker containers are small (and their transfer can be optimized with
layers), they have basically zero memory and cpu overhead, they are
completely portable, and are designed from the ground up with an
application-centric design.
Perhaps best of all, because Docker operates at the OS level, it can still be
run inside a VM!
## Plays well with others
Docker does not require you to buy into a particular programming
language, framework, packaging system, or configuration language.
Is your application a Unix process? Does it use files, tcp connections,
environment variables, standard Unix streams and command-line arguments
as inputs and outputs? Then Docker can run it.
Can your application's build be expressed as a sequence of such
commands? Then Docker can build it.
## Escape dependency hell
A common problem for developers is the difficulty of managing all
their application's dependencies in a simple and automated way.
This is usually difficult for several reasons:
* *Cross-platform dependencies*. Modern applications often depend on
a combination of system libraries and binaries, language-specific
packages, framework-specific modules, internal components
developed for another project, etc. These dependencies live in
different "worlds" and require different tools - these tools
typically don't work well with each other, requiring awkward
custom integrations.
* *Conflicting dependencies*. Different applications may depend on
different versions of the same dependency. Packaging tools handle
these situations with various degrees of ease - but they all
handle them in different and incompatible ways, which again forces
the developer to do extra work.
* *Custom dependencies*. A developer may need to prepare a custom
version of their application's dependency. Some packaging systems
can handle custom versions of a dependency, others can't - and all
of them handle it differently.
* *Repeatability*: because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
Docker solves the problem of dependency hell by giving the developer a simple
way to express *all* their application's dependencies in one place, while
streamlining the process of assembling them. If this makes you think of
[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't
*replace* your favorite packaging systems. It simply orchestrates
their use in a simple and repeatable way. How does it do that? With
layers.
Notable features
-----------------
Docker defines a build as running a sequence of Unix commands, one
after the other, in the same container. Build commands modify the
contents of the container (usually by installing new files on the
filesystem), the next command modifies it some more, etc. Since each
build command inherits the result of the previous commands, the
*order* in which the commands are executed expresses *dependencies*.
* Filesystem isolation: each process container runs in a completely separate root filesystem.
Here's a typical Docker build process:
* Resource isolation: system resources like cpu and memory can be allocated differently to each process container, using cgroups.
* Network isolation: each process container runs in its own network namespace, with a virtual interface and IP address of its own.
* Copy-on-write: root filesystems are created using copy-on-write, which makes deployment extremely fast, memory-cheap and disk-cheap.
* Logging: the standard streams (stdout/stderr/stdin) of each process container are collected and logged for real-time or batch retrieval.
* Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.
* Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.
Install instructions
==================
Quick install on Ubuntu 12.04 and 12.10
---------------------------------------
```bash
FROM ubuntu:12.04
RUN apt-get update && apt-get install -y python python-pip curl
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
RUN cd helloflask-master && pip install -r requirements.txt
curl get.docker.io | sh -x
```
Note that Docker doesn't care *how* dependencies are built - as long
as they can be built by running a Unix command in a container.
Binary installs
----------------
Docker supports the following binary installation methods.
Note that some methods are community contributions and not yet officially supported.
Getting started
===============
* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
Docker can be installed either on your computer for building applications or
on servers for running them. To get started, [check out the installation
instructions in the
documentation](https://docs.docker.com/engine/installation/).
Installing from source
----------------------
1. Make sure you have a [Go language](http://golang.org/doc/install) compiler and [git](http://git-scm.com) installed.
2. Checkout the source code
```bash
git clone http://github.com/dotcloud/docker
```
3. Build the docker binary
```bash
cd docker
make VERBOSE=1
sudo cp ./bin/docker /usr/local/bin/docker
```
Usage examples
==============
Docker can be used to run short-lived commands, long-running daemons
(app servers, databases, etc.), interactive shell sessions, etc.
First run the docker daemon
---------------------------
You can find a [list of real-world
examples](https://docs.docker.com/engine/examples/) in the
documentation.
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
```bash
# On a production system you want this running in an init script
sudo docker -d &
```
Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client can run from any account.
```bash
# Now you can run docker commands from any account.
docker help
```
Throwaway shell in a base ubuntu image
--------------------------------------
```bash
docker pull ubuntu:12.10
# Run an interactive shell, allocate a tty, attach stdin and stdout
# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q
docker run -i -t ubuntu:12.10 /bin/bash
```
Starting a long-running worker process
--------------------------------------
```bash
# Start a very useful long-running process
JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
# Collect the output of the job so far
docker logs $JOB
# Kill the job
docker kill $JOB
```
Running an irc bouncer
----------------------
```bash
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
```
Running Redis
-------------
```bash
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
```
Share your own image!
---------------------
```bash
CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl)
docker commit -m "Installed curl" $CONTAINER $USER/betterbase
docker push $USER/betterbase
```
A list of publicly available images is [available here](https://github.com/dotcloud/docker/wiki/Public-docker-images).
Expose a service on a TCP port
------------------------------
```bash
# Expose port 4444 of this container, and tell netcat to listen on it
JOB=$(docker run -d -p 4444 base /bin/nc -l -p 4444)
# Which public port is NATed to my container?
PORT=$(docker port $JOB 4444)
# Connect to the public port via the host's public address
# Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
IP=$(ifconfig eth0 | perl -n -e 'if (m/inet addr:([\d\.]+)/g) { print $1 }')
echo hello world | nc $IP $PORT
# Verify that the network connection worked
echo "Daemon received: $(docker logs $JOB)"
```
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The
[cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt)
and
[namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html)
capabilities of the Linux kernel
* The [Go](https://golang.org) programming language
* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md)
* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md)
Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker)
* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
* The [Go](http://golang.org) programming language;
* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
Contributing to Docker
======================
| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** |
|------------------|----------------------|---------|---------|
| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) |
Want to hack on Docker? Awesome! There are instructions to get you started on the website: http://docs.docker.io/en/latest/contributing/contributing/
Want to hack on Docker? Awesome! We have [instructions to help you get
started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/).
These instructions are probably not perfect, please let us know if anything
feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
Getting the development builds
==============================
Want to run Docker from a master build? You can download
master builds at [master.dockerproject.org](https://master.dockerproject.org).
They are updated with each commit merged into the master branch.
Don't know how to use that super cool new feature in the master build? Check
out the master docs at
[docs.master.dockerproject.org](http://docs.master.dockerproject.org).
How the project is run
======================
Docker is a very, very active project. If you want to learn more about how it is run,
or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
We are always open to suggestions on process improvements, and are always looking for more maintainers.
### Talking to other Docker users and contributors
<table class="tg">
<col width="45%">
<col width="65%">
<tr>
<td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
<td>
<p>
IRC is a direct line to our most knowledgeable Docker users; we have
both the <code>#docker</code> and <code>#docker-dev</code> group on
<strong>irc.freenode.net</strong>.
IRC is a rich chat protocol but it can overwhelm new users. You can search
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
</p>
Read our <a href="https://docs.docker.com/opensource/get-help/#/irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
</td>
</tr>
<tr>
<td>Docker Community Forums</td>
<td>
The <a href="https://forums.docker.com/c/open-source-projects/de" target="_blank">Docker Engine</a>
group is for users of the Docker Engine project.
</td>
</tr>
<tr>
<td>Google Groups</td>
<td>
The <a href="https://groups.google.com/forum/#!forum/docker-dev"
target="_blank">docker-dev</a> group is for contributors and other people
contributing to the Docker project. You can join this group without a
Google account by sending an email to <a
href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
You'll receive a join-request message; simply reply to the message to
confirm your subscription.
</td>
</tr>
<tr>
<td>Twitter</td>
<td>
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
to get updates on our products. You can also tweet us questions or just
share blogs or stories.
</td>
</tr>
<tr>
<td>Stack Overflow</td>
<td>
Stack Overflow has over 7000 Docker questions listed. We regularly
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
and so do many other knowledgeable Docker users.
</td>
</tr>
</table>
### Legal
*Brought to you courtesy of our legal counsel. For more context,
please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.*
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
They are probably not perfect, please let us know if anything feels wrong or incomplete.
Licensing
=========
Docker is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
license text.
Note
----
Other Docker Related Projects
We also keep the documentation in this repository. The website documentation is generated using sphinx using these sources.
Please find it under docs/sources/ and read more about it https://github.com/dotcloud/docker/master/docs/README.md
Please feel free to fix / update the documentation and send us pull requests. More tutorials are also welcome.
Setting up a dev environment
----------------------------
Instructions that have been verified to work on Ubuntu 12.10,
```bash
sudo apt-get -y install lxc wget bsdtar curl golang git
export GOPATH=~/go/
export PATH=$GOPATH/bin:$PATH
mkdir -p $GOPATH/src/github.com/dotcloud
cd $GOPATH/src/github.com/dotcloud
git clone git@github.com:dotcloud/docker.git
cd docker
go get -v github.com/dotcloud/docker/...
go install -v github.com/dotcloud/docker/...
```
Then run the docker daemon,
```bash
sudo $GOPATH/bin/docker -d
```
Run the `go install` command (above) to recompile docker.
What is a Standard Container?
=============================
There are a number of projects under development that are based on Docker's
core technology. These projects expand the tooling built around the
Docker platform to broaden its application and utility.
* [Docker Registry](https://github.com/docker/distribution): Registry
server for Docker (hosting/delivery of repositories and images)
* [Docker Machine](https://github.com/docker/machine): Machine management
for a container-centric world
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering
system
* [Docker Compose](https://github.com/docker/compose) (formerly Fig):
Define and run multi-container apps
* [Kitematic](https://github.com/docker/kitematic): The easiest way to use
Docker on Mac and Windows
Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
a format that is self-describing and portable, so that any compliant runtime can run it without extra dependencies, regardless of the underlying machine and the contents of the container.
The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
### 1. STANDARD OPERATIONS
Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
### 2. CONTENT-AGNOSTIC
Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
### 3. INFRASTRUCTURE-AGNOSTIC
Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
### 4. DESIGNED FOR AUTOMATION
Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
### 5. INDUSTRIAL-GRADE DELIVERY
There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
Standard Container Specification
--------------------------------
(TODO)
### Image format
### Standard operations
* Copy
* Run
* Stop
* Wait
* Commit
* Attach standard streams
* List filesystem changes
* ...
### Execution environment
#### Root filesystem
#### Environment variables
#### Process arguments
#### Networking
#### Process namespacing
#### Resource limits
#### Process monitoring
#### Logging
#### Signals
#### Pseudo-terminal allocation
#### Security
If you know of another project underway that should be listed here, please help
us keep this list up-to-date by submitting a PR.
Awesome-Docker
==============
You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there.

View File

@@ -1,118 +0,0 @@
Docker Engine Roadmap
=====================
### How should I use this document?
This document provides description of items that the project decided to prioritize. This should
serve as a reference point for Docker contributors to understand where the project is going, and
help determine if a contribution could be conflicting with some longer terms plans.
The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be
refused (except for those mentioned as "frozen features" below)! We are always happy to receive
patches for new cool features we haven't thought about, or didn't judge priority. Please however
understand that such patches might take longer for us to review.
### How can I help?
Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described
in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our
goal is to split down the workload in such way that anybody can jump in and help. Please comment on
issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already
assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is
the best way to go.
### How can I add something to the roadmap?
The roadmap process is new to the Docker Engine: we are only beginning to structure and document the
project objectives. Our immediate goal is to be more transparent, and work with our community to
focus our efforts on fewer prioritized topics.
We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but
we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we
won't be accepting pull requests adding or removing items from this file.
# 1. Features and refactoring
## 1.1 Runtime improvements
We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container
execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional
default libcontainer `execdriver`, but the Engine internals were not ready for this.
As runC continued evolving, and the OCI specification along with it, we created
[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is
the new target for Engine integration, as it can entirely replace the whole `execdriver`
architecture, and container monitoring along with it.
Docker Engine will rely on a long-running `containerd` companion daemon for all container execution
related operations. This could open the door in the future for Engine restarts without interrupting
running containers.
## 1.2 Plugins improvements
Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks
extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real
world feedback before optimizing for any particular workflow.
In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of
plugins. This implies in particular making it trivially easy to distribute plugins as containers
through any Registry instance, as well as solving the commonly heard pain points of plugins needing
to be treated as somewhat special (being active at all time, started before any other user
containers, and not as easily dismissed).
## 1.3 Internal decoupling
A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the
API implementation has been refactored, and the Builder side of the daemon is now
[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in
the same repository.
We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the
runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support
with the concept of "special" containers opens the door for bootstrapping more Engine internals
using the same facilities.
## 1.4 Cluster capable Engine
The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent
adding features such as multihost networking, and node discovery down at the Engine level. Yet, the
Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm
for that.
We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the
Docker Engine being already capable of discovering each other and establish overlay networking for
their container to communicate, the next step is for a given Engine to gain ability to dispatch work
to another node in the cluster. This will be introduced in a backward compatible way, such that a
`docker run` invocation on a particular node remains fully deterministic.
# 2 Frozen features
## 2.1 Docker exec
We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a
*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort.
## 2.2 Remote Registry Operations
A large amount of work is ongoing in the area of image distribution and provenance. This includes
moving to the V2 Registry API and heavily refactoring the code that powers these features. The
desired result is more secure, reliable and easier to use image distribution.
Part of the problem with this part of the code base is the lack of a stable and flexible interface.
If new features are added that access the registry without solidifying these interfaces, achieving
feature parity will continue to be elusive. While we get a handle on this situation, we are imposing
a moratorium on new code that accesses the Registry API in commands that don't already make remote
calls.
Currently, only the following commands cause interaction with a remote registry:
- push
- pull
- run
- build
- search
- login
In the interest of stabilizing the registry access model during this ongoing work, we are not
accepting additions to other commands that will cause remote interaction with the Registry API. This
moratorium will lift when the goals of the distribution project have been met.

71
SPECS/data-volumes.md Normal file
View File

@@ -0,0 +1,71 @@
## Spec for data volumes
Spec owner: Solomon Hykes <solomon@dotcloud.com>
Data volumes (issue #111) are a much-requested feature which trigger much discussion and debate. Below is the current authoritative spec for implementing data volumes.
This spec will be deprecated once the feature is fully implemented.
Discussion, requests, trolls, demands, offerings, threats and other forms of supplications concerning this spec should be addressed to Solomon here: https://github.com/dotcloud/docker/issues/111
### 1. Creating data volumes
At container creation, parts of a container's filesystem can be mounted as separate data volumes. Volumes are defined with the -v flag.
For example:
```bash
$ docker run -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres
```
In this example, a new container is created from the 'postgres' image. At the same time, docker creates 2 new data volumes: one will be mapped to the container at /var/lib/postgres, the other at /var/log.
2 important notes:
1) Volumes don't have top-level names. At no point does the user provide a name, or is a name given to him. Volumes are identified by the path at which they are mounted inside their container.
2) The user doesn't choose the source of the volume. Docker only mounts volumes it created itself, in the same way that it only runs containers that it created itself. That is by design.
### 2. Sharing data volumes
Instead of creating its own volumes, a container can share another container's volumes. For example:
```bash
$ docker run --volumes-from $OTHER_CONTAINER_ID postgres /usr/local/bin/postgres-backup
```
In this example, a new container is created from the 'postgres' example. At the same time, docker will *re-use* the 2 data volumes created in the previous example. One volume will be mounted on the /var/lib/postgres of *both* containers, and the other will be mounted on the /var/log of both containers.
### 3. Under the hood
Docker stores volumes in /var/lib/docker/volumes. Each volume receives a globally unique ID at creation, and is stored at /var/lib/docker/volumes/ID.
At creation, volumes are attached to a single container - the source of truth for this mapping will be the container's configuration.
Mounting a volume consists of calling "mount --bind" from the volume's directory to the appropriate sub-directory of the container mountpoint. This may be done by Docker itself, or farmed out to lxc (which supports mount-binding) if possible.
### 4. Backups, transfers and other volume operations
Volumes sometimes need to be backed up, transfered between hosts, synchronized, etc. These operations typically are application-specific or site-specific, eg. rsync vs. S3 upload vs. replication vs...
Rather than attempting to implement all these scenarios directly, Docker will allow for custom implementations using an extension mechanism.
### 5. Custom volume handlers
Docker allows for arbitrary code to be executed against a container's volumes, to implement any custom action: backup, transfer, synchronization across hosts, etc.
Here's an example:
```bash
$ DB=$(docker run -d -v /var/lib/postgres -v /var/log postgres /usr/bin/postgres)
$ BACKUP_JOB=$(docker run -d --volumes-from $DB shykes/backuper /usr/local/bin/backup-postgres --s3creds=$S3CREDS)
$ docker wait $BACKUP_JOB
```
Congratulations, you just implemented a custom volume handler, using Docker's built-in ability to 1) execute arbitrary code and 2) share volumes between containers.

View File

@@ -1,45 +0,0 @@
# Vendoring policies
This document outlines recommended Vendoring policies for Docker repositories.
(Example, libnetwork is a Docker repo and logrus is not.)
## Vendoring using tags
Commit ID based vendoring provides little/no information about the updates
vendored. To fix this, vendors will now require that repositories use annotated
tags along with commit ids to snapshot commits. Annotated tags by themselves
are not sufficient, since the same tag can be force updated to reference
different commits.
Each tag should:
- Follow Semantic Versioning rules (refer to section on "Semantic Versioning")
- Have a corresponding entry in the change tracking document.
Each repo should:
- Have a change tracking document between tags/releases. Ex: CHANGELOG.md,
github releases file.
The goal here is for consuming repos to be able to use the tag version and
changelog updates to determine whether the vendoring will cause any breaking or
backward incompatible changes. This also means that repos can specify having
dependency on a package of a specific version or greater up to the next major
release, without encountering breaking changes.
## Semantic Versioning
Annotated version tags should follow Schema Versioning policies.
According to http://semver.org:
"Given a version number MAJOR.MINOR.PATCH, increment the:
MAJOR version when you make incompatible API changes,
MINOR version when you add functionality in a backwards-compatible manner, and
PATCH version when you make backwards-compatible bug fixes.
Additional labels for pre-release and build metadata are available as extensions
to the MAJOR.MINOR.PATCH format."
## Vendoring cadence
In order to avoid huge vendoring changes, it is recommended to have a regular
cadence for vendoring updates. e.g. monthly.
## Pre-merge vendoring tests
All related repos will be vendored into docker/docker.
CI on docker/docker should catch any breaking changes involving multiple repos.

View File

@@ -1 +0,0 @@
1.13.1

82
Vagrantfile vendored Normal file
View File

@@ -0,0 +1,82 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
def v10(config)
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
# Install ubuntu packaging dependencies and create ubuntu packages
config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker'
end
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
v10(config)
end
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("1") do |config|
v10(config)
end
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
aws.region = "us-east-1"
aws.ami = "ami-d0f89fb9"
aws.ssh_username = "ubuntu"
aws.instance_type = "t1.micro"
end
config.vm.provider :rackspace do |rs|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
rs.flavor = /512MB/
rs.image = /Ubuntu/
end
config.vm.provider :virtualbox do |vb|
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
end
end
Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws, override|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
override.ssh.username = "ubuntu"
aws.region = "us-east-1"
aws.ami = "ami-d0f89fb9"
aws.instance_type = "t1.micro"
end
config.vm.provider :rackspace do |rs|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
rs.flavor = /512MB/
rs.image = /Ubuntu/
end
config.vm.provider :virtualbox do |vb|
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
end
end

View File

@@ -1,42 +0,0 @@
# Working on the Engine API
The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
It consists of various components in this repository:
- `api/swagger.yaml` A Swagger definition of the API.
- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
- `cli/` The command-line client.
- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
- `daemon/` The daemon, which serves the API.
## Swagger definition
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
1. To automatically generate documentation.
2. To automatically generate the Go server and client. (A work-in-progress.)
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
## Updating the API documentation
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation.
The file is split into two main sections:
- `definitions`, which defines re-usable objects used in requests and responses
- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919)
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing.
## Viewing the API documentation
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
All the documentation generation is done in the documentation repository, [docker/docker.github.io](https://github.com/docker/docker.github.io). The Swagger definition is vendored periodically into this repository, but you can manually copy over the Swagger definition to test changes.
Copy `api/swagger.yaml` in this repository to `engine/api/[VERSION_NUMBER]/swagger.yaml` in the documentation repository, overwriting what is already there. Then, run `docker-compose up` in the documentation repository and browse to [http://localhost:4000/engine/api/](http://localhost:4000/engine/api/) when it finishes rendering.

View File

@@ -1,166 +0,0 @@
package api
import (
"encoding/json"
"encoding/pem"
"fmt"
"mime"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/system"
"github.com/docker/libtrust"
)
// Common constants for daemon and client.
const (
// DefaultVersion of Current REST API
DefaultVersion string = "1.26"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.
NoBaseImageSpecifier string = "scratch"
)
// byPortInfo is a temporary type used to sort types.Port by its fields
type byPortInfo []types.Port
func (r byPortInfo) Len() int { return len(r) }
func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byPortInfo) Less(i, j int) bool {
if r[i].PrivatePort != r[j].PrivatePort {
return r[i].PrivatePort < r[j].PrivatePort
}
if r[i].IP != r[j].IP {
return r[i].IP < r[j].IP
}
if r[i].PublicPort != r[j].PublicPort {
return r[i].PublicPort < r[j].PublicPort
}
return r[i].Type < r[j].Type
}
// DisplayablePorts returns formatted string representing open ports of container
// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp"
// it's used by command 'docker ps'
func DisplayablePorts(ports []types.Port) string {
type portGroup struct {
first uint16
last uint16
}
groupMap := make(map[string]*portGroup)
var result []string
var hostMappings []string
var groupMapKeys []string
sort.Sort(byPortInfo(ports))
for _, port := range ports {
current := port.PrivatePort
portKey := port.Type
if port.IP != "" {
if port.PublicPort != current {
hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
continue
}
portKey = fmt.Sprintf("%s/%s", port.IP, port.Type)
}
group := groupMap[portKey]
if group == nil {
groupMap[portKey] = &portGroup{first: current, last: current}
// record order that groupMap keys are created
groupMapKeys = append(groupMapKeys, portKey)
continue
}
if current == (group.last + 1) {
group.last = current
continue
}
result = append(result, formGroup(portKey, group.first, group.last))
groupMap[portKey] = &portGroup{first: current, last: current}
}
for _, portKey := range groupMapKeys {
g := groupMap[portKey]
result = append(result, formGroup(portKey, g.first, g.last))
}
result = append(result, hostMappings...)
return strings.Join(result, ", ")
}
func formGroup(key string, start, last uint16) string {
parts := strings.Split(key, "/")
groupType := parts[0]
var ip string
if len(parts) > 1 {
ip = parts[0]
groupType = parts[1]
}
group := strconv.Itoa(int(start))
if start != last {
group = fmt.Sprintf("%s-%d", group, last)
}
if ip != "" {
group = fmt.Sprintf("%s:%s->%s", ip, group, group)
}
return fmt.Sprintf("%s/%s", group, groupType)
}
// MatchesContentType validates the content type against the expected one
func MatchesContentType(contentType, expectedType string) bool {
mimetype, _, err := mime.ParseMediaType(contentType)
if err != nil {
logrus.Errorf("Error parsing media type: %s error: %v", contentType, err)
}
return err == nil && mimetype == expectedType
}
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
// otherwise generates a new one
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700)
if err != nil {
return nil, err
}
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
if err == libtrust.ErrKeyFileDoesNotExist {
trustKey, err = libtrust.GenerateECP256PrivateKey()
if err != nil {
return nil, fmt.Errorf("Error generating key: %s", err)
}
encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
if err != nil {
return nil, fmt.Errorf("Error serializing key: %s", err)
}
if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
return nil, fmt.Errorf("Error saving key file: %s", err)
}
} else if err != nil {
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
}
return trustKey, nil
}
func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
if ext == ".json" || ext == ".jwk" {
encoded, err = json.Marshal(key)
if err != nil {
return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
}
} else {
pemBlock, err := key.PEMBlock()
if err != nil {
return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
}
encoded = pem.EncodeToMemory(pemBlock)
}
return
}

View File

@@ -1,341 +0,0 @@
package api
import (
"io/ioutil"
"path/filepath"
"testing"
"os"
"github.com/docker/docker/api/types"
)
type ports struct {
ports []types.Port
expected string
}
// DisplayablePorts
func TestDisplayablePorts(t *testing.T) {
cases := []ports{
{
[]types.Port{
{
PrivatePort: 9988,
Type: "tcp",
},
},
"9988/tcp"},
{
[]types.Port{
{
PrivatePort: 9988,
Type: "udp",
},
},
"9988/udp",
},
{
[]types.Port{
{
IP: "0.0.0.0",
PrivatePort: 9988,
Type: "tcp",
},
},
"0.0.0.0:0->9988/tcp",
},
{
[]types.Port{
{
PrivatePort: 9988,
PublicPort: 8899,
Type: "tcp",
},
},
"9988/tcp",
},
{
[]types.Port{
{
IP: "4.3.2.1",
PrivatePort: 9988,
PublicPort: 8899,
Type: "tcp",
},
},
"4.3.2.1:8899->9988/tcp",
},
{
[]types.Port{
{
IP: "4.3.2.1",
PrivatePort: 9988,
PublicPort: 9988,
Type: "tcp",
},
},
"4.3.2.1:9988->9988/tcp",
},
{
[]types.Port{
{
PrivatePort: 9988,
Type: "udp",
}, {
PrivatePort: 9988,
Type: "udp",
},
},
"9988/udp, 9988/udp",
},
{
[]types.Port{
{
IP: "1.2.3.4",
PublicPort: 9998,
PrivatePort: 9998,
Type: "udp",
}, {
IP: "1.2.3.4",
PublicPort: 9999,
PrivatePort: 9999,
Type: "udp",
},
},
"1.2.3.4:9998-9999->9998-9999/udp",
},
{
[]types.Port{
{
IP: "1.2.3.4",
PublicPort: 8887,
PrivatePort: 9998,
Type: "udp",
}, {
IP: "1.2.3.4",
PublicPort: 8888,
PrivatePort: 9999,
Type: "udp",
},
},
"1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp",
},
{
[]types.Port{
{
PrivatePort: 9998,
Type: "udp",
}, {
PrivatePort: 9999,
Type: "udp",
},
},
"9998-9999/udp",
},
{
[]types.Port{
{
IP: "1.2.3.4",
PrivatePort: 6677,
PublicPort: 7766,
Type: "tcp",
}, {
PrivatePort: 9988,
PublicPort: 8899,
Type: "udp",
},
},
"9988/udp, 1.2.3.4:7766->6677/tcp",
},
{
[]types.Port{
{
IP: "1.2.3.4",
PrivatePort: 9988,
PublicPort: 8899,
Type: "udp",
}, {
IP: "1.2.3.4",
PrivatePort: 9988,
PublicPort: 8899,
Type: "tcp",
}, {
IP: "4.3.2.1",
PrivatePort: 2233,
PublicPort: 3322,
Type: "tcp",
},
},
"4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp",
},
{
[]types.Port{
{
PrivatePort: 9988,
PublicPort: 8899,
Type: "udp",
}, {
IP: "1.2.3.4",
PrivatePort: 6677,
PublicPort: 7766,
Type: "tcp",
}, {
IP: "4.3.2.1",
PrivatePort: 2233,
PublicPort: 3322,
Type: "tcp",
},
},
"9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp",
},
{
[]types.Port{
{
PrivatePort: 80,
Type: "tcp",
}, {
PrivatePort: 1024,
Type: "tcp",
}, {
PrivatePort: 80,
Type: "udp",
}, {
PrivatePort: 1024,
Type: "udp",
}, {
IP: "1.1.1.1",
PublicPort: 80,
PrivatePort: 1024,
Type: "tcp",
}, {
IP: "1.1.1.1",
PublicPort: 80,
PrivatePort: 1024,
Type: "udp",
}, {
IP: "1.1.1.1",
PublicPort: 1024,
PrivatePort: 80,
Type: "tcp",
}, {
IP: "1.1.1.1",
PublicPort: 1024,
PrivatePort: 80,
Type: "udp",
}, {
IP: "2.1.1.1",
PublicPort: 80,
PrivatePort: 1024,
Type: "tcp",
}, {
IP: "2.1.1.1",
PublicPort: 80,
PrivatePort: 1024,
Type: "udp",
}, {
IP: "2.1.1.1",
PublicPort: 1024,
PrivatePort: 80,
Type: "tcp",
}, {
IP: "2.1.1.1",
PublicPort: 1024,
PrivatePort: 80,
Type: "udp",
},
},
"80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp",
},
}
for _, port := range cases {
actual := DisplayablePorts(port.ports)
if port.expected != actual {
t.Fatalf("Expected %s, got %s.", port.expected, actual)
}
}
}
// MatchesContentType
func TestJsonContentType(t *testing.T) {
if !MatchesContentType("application/json", "application/json") {
t.Fail()
}
if !MatchesContentType("application/json; charset=utf-8", "application/json") {
t.Fail()
}
if MatchesContentType("dockerapplication/json", "application/json") {
t.Fail()
}
}
// LoadOrCreateTrustKey
func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) {
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpKeyFolderPath)
tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile")
if err != nil {
t.Fatal(err)
}
if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil {
t.Fatalf("expected an error, got nothing.")
}
}
func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) {
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpKeyFolderPath)
// Without the need to create the folder hierarchy
tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile")
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
t.Fatalf("expected a new key file, got : %v and %v", err, key)
}
if _, err := os.Stat(tmpKeyFile); err != nil {
t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
}
// With the need to create the folder hierarchy as tmpKeyFie is in a path
// where some folders do not exist.
tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile")
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
t.Fatalf("expected a new key file, got : %v and %v", err, key)
}
if _, err := os.Stat(tmpKeyFile); err != nil {
t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
}
// With no path at all
defer os.Remove("keyfile")
if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil {
t.Fatalf("expected a new key file, got : %v and %v", err, key)
}
if _, err := os.Stat("keyfile"); err != nil {
t.Fatalf("Expected to find a file keyfile, got %v", err)
}
}
func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) {
tmpKeyFile := filepath.Join("fixtures", "keyfile")
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
t.Fatalf("expected a key file, got : %v and %v", err, key)
}
}

View File

@@ -1,6 +0,0 @@
// +build !windows
package api
// MinVersion represents Minimum REST API version supported
const MinVersion string = "1.12"

View File

@@ -1,8 +0,0 @@
package api
// MinVersion represents Minimum REST API version supported
// Technically the first daemon API version released on Windows is v1.25 in
// engine version 1.13. However, some clients are explicitly using downlevel
// APIs (eg docker-compose v2.1 file format) and that is just too restrictive.
// Hence also allowing 1.24 on Windows.
const MinVersion string = "1.24"

View File

@@ -1,47 +0,0 @@
package errors
import "net/http"
// apiError is an error wrapper that also
// holds information about response status codes.
type apiError struct {
error
statusCode int
}
// HTTPErrorStatusCode returns a status code.
func (e apiError) HTTPErrorStatusCode() int {
return e.statusCode
}
// NewErrorWithStatusCode allows you to associate
// a specific HTTP Status Code to an error.
// The Server will take that code and set
// it as the response status.
func NewErrorWithStatusCode(err error, code int) error {
return apiError{err, code}
}
// NewBadRequestError creates a new API error
// that has the 400 HTTP status code associated to it.
func NewBadRequestError(err error) error {
return NewErrorWithStatusCode(err, http.StatusBadRequest)
}
// NewRequestForbiddenError creates a new API error
// that has the 403 HTTP status code associated to it.
func NewRequestForbiddenError(err error) error {
return NewErrorWithStatusCode(err, http.StatusForbidden)
}
// NewRequestNotFoundError creates a new API error
// that has the 404 HTTP status code associated to it.
func NewRequestNotFoundError(err error) error {
return NewErrorWithStatusCode(err, http.StatusNotFound)
}
// NewRequestConflictError creates a new API error
// that has the 409 HTTP status code associated to it.
func NewRequestConflictError(err error) error {
return NewErrorWithStatusCode(err, http.StatusConflict)
}

View File

@@ -1,7 +0,0 @@
-----BEGIN EC PRIVATE KEY-----
keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY
MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49
AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky
NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA==
-----END EC PRIVATE KEY-----

View File

@@ -1,16 +0,0 @@
package httputils
import (
"io"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
)
// ContainerDecoder specifies how
// to translate an io.Reader into
// container configuration.
type ContainerDecoder interface {
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
DecodeHostConfig(src io.Reader) (*container.HostConfig, error)
}

View File

@@ -1,101 +0,0 @@
package httputils
import (
"net/http"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/gorilla/mux"
"google.golang.org/grpc"
)
// httpStatusError is an interface
// that errors with custom status codes
// implement to tell the api layer
// which response status to set.
type httpStatusError interface {
HTTPErrorStatusCode() int
}
// inputValidationError is an interface
// that errors generated by invalid
// inputs can implement to tell the
// api layer to set a 400 status code
// in the response.
type inputValidationError interface {
IsValidationError() bool
}
// GetHTTPErrorStatusCode retrieves status code from error message
func GetHTTPErrorStatusCode(err error) int {
if err == nil {
logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
return http.StatusInternalServerError
}
var statusCode int
errMsg := err.Error()
switch e := err.(type) {
case httpStatusError:
statusCode = e.HTTPErrorStatusCode()
case inputValidationError:
statusCode = http.StatusBadRequest
default:
// FIXME: this is brittle and should not be necessary, but we still need to identify if
// there are errors falling back into this logic.
// If we need to differentiate between different possible error types,
// we should create appropriate error types that implement the httpStatusError interface.
errStr := strings.ToLower(errMsg)
for _, status := range []struct {
keyword string
code int
}{
{"not found", http.StatusNotFound},
{"no such", http.StatusNotFound},
{"bad parameter", http.StatusBadRequest},
{"no command", http.StatusBadRequest},
{"conflict", http.StatusConflict},
{"impossible", http.StatusNotAcceptable},
{"wrong login/password", http.StatusUnauthorized},
{"unauthorized", http.StatusUnauthorized},
{"hasn't been activated", http.StatusForbidden},
{"this node", http.StatusServiceUnavailable},
} {
if strings.Contains(errStr, status.keyword) {
statusCode = status.code
break
}
}
}
if statusCode == 0 {
statusCode = http.StatusInternalServerError
}
return statusCode
}
func apiVersionSupportsJSONErrors(version string) bool {
const firstAPIVersionWithJSONErrors = "1.23"
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
}
// MakeErrorHandler makes an HTTP handler that decodes a Docker error and
// returns it in the response.
func MakeErrorHandler(err error) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
statusCode := GetHTTPErrorStatusCode(err)
vars := mux.Vars(r)
if apiVersionSupportsJSONErrors(vars["version"]) {
response := &types.ErrorResponse{
Message: err.Error(),
}
WriteJSON(w, statusCode, response)
} else {
http.Error(w, grpc.ErrorDesc(err), statusCode)
}
}
}

View File

@@ -1,73 +0,0 @@
package httputils
import (
"fmt"
"net/http"
"path/filepath"
"strconv"
"strings"
)
// BoolValue transforms a form value in different formats into a boolean type.
func BoolValue(r *http.Request, k string) bool {
s := strings.ToLower(strings.TrimSpace(r.FormValue(k)))
return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none")
}
// BoolValueOrDefault returns the default bool passed if the query param is
// missing, otherwise it's just a proxy to boolValue above
func BoolValueOrDefault(r *http.Request, k string, d bool) bool {
if _, ok := r.Form[k]; !ok {
return d
}
return BoolValue(r, k)
}
// Int64ValueOrZero parses a form value into an int64 type.
// It returns 0 if the parsing fails.
func Int64ValueOrZero(r *http.Request, k string) int64 {
val, err := Int64ValueOrDefault(r, k, 0)
if err != nil {
return 0
}
return val
}
// Int64ValueOrDefault parses a form value into an int64 type. If there is an
// error, returns the error. If there is no value returns the default value.
func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) {
if r.Form.Get(field) != "" {
value, err := strconv.ParseInt(r.Form.Get(field), 10, 64)
if err != nil {
return value, err
}
return value, nil
}
return def, nil
}
// ArchiveOptions stores archive information for different operations.
type ArchiveOptions struct {
Name string
Path string
}
// ArchiveFormValues parses form values and turns them into ArchiveOptions.
// It fails if the archive name and path are not in the request.
func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) {
if err := ParseForm(r); err != nil {
return ArchiveOptions{}, err
}
name := vars["name"]
path := filepath.FromSlash(r.Form.Get("path"))
switch {
case name == "":
return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty")
case path == "":
return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty")
}
return ArchiveOptions{name, path}, nil
}

View File

@@ -1,105 +0,0 @@
package httputils
import (
"net/http"
"net/url"
"testing"
)
func TestBoolValue(t *testing.T) {
cases := map[string]bool{
"": false,
"0": false,
"no": false,
"false": false,
"none": false,
"1": true,
"yes": true,
"true": true,
"one": true,
"100": true,
}
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a := BoolValue(r, "test")
if a != e {
t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
}
}
}
func TestBoolValueOrDefault(t *testing.T) {
r, _ := http.NewRequest("GET", "", nil)
if !BoolValueOrDefault(r, "queryparam", true) {
t.Fatal("Expected to get true default value, got false")
}
v := url.Values{}
v.Set("param", "")
r, _ = http.NewRequest("GET", "", nil)
r.Form = v
if BoolValueOrDefault(r, "param", true) {
t.Fatal("Expected not to get true")
}
}
func TestInt64ValueOrZero(t *testing.T) {
cases := map[string]int64{
"": 0,
"asdf": 0,
"0": 0,
"1": 1,
}
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a := Int64ValueOrZero(r, "test")
if a != e {
t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
}
}
}
func TestInt64ValueOrDefault(t *testing.T) {
cases := map[string]int64{
"": -1,
"-1": -1,
"42": 42,
}
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a, err := Int64ValueOrDefault(r, "test", -1)
if a != e {
t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
}
if err != nil {
t.Fatalf("Error should be nil, but received: %s", err)
}
}
}
func TestInt64ValueOrDefaultWithError(t *testing.T) {
v := url.Values{}
v.Set("test", "invalid")
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
_, err := Int64ValueOrDefault(r, "test", -1)
if err == nil {
t.Fatalf("Expected an error.")
}
}

View File

@@ -1,90 +0,0 @@
package httputils
import (
"fmt"
"io"
"net/http"
"strings"
"golang.org/x/net/context"
"github.com/docker/docker/api"
)
// APIVersionKey is the client's requested API version.
const APIVersionKey = "api-version"
// UAStringKey is used as key type for user-agent string in net/context struct
const UAStringKey = "upstream-user-agent"
// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints.
// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion).
type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error
// HijackConnection interrupts the http response writer to get the
// underlying connection and operate with it.
func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
conn, _, err := w.(http.Hijacker).Hijack()
if err != nil {
return nil, nil, err
}
// Flush the options to make sure the client sets the raw mode
conn.Write([]byte{})
return conn, conn, nil
}
// CloseStreams ensures that a list for http streams are properly closed.
func CloseStreams(streams ...interface{}) {
for _, stream := range streams {
if tcpc, ok := stream.(interface {
CloseWrite() error
}); ok {
tcpc.CloseWrite()
} else if closer, ok := stream.(io.Closer); ok {
closer.Close()
}
}
}
// CheckForJSON makes sure that the request's Content-Type is application/json.
func CheckForJSON(r *http.Request) error {
ct := r.Header.Get("Content-Type")
// No Content-Type header is ok as long as there's no Body
if ct == "" {
if r.Body == nil || r.ContentLength == 0 {
return nil
}
}
// Otherwise it better be json
if api.MatchesContentType(ct, "application/json") {
return nil
}
return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct)
}
// ParseForm ensures the request form is parsed even with invalid content types.
// If we don't do this, POST method without Content-type (even with empty body) will fail.
func ParseForm(r *http.Request) error {
if r == nil {
return nil
}
if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
return err
}
return nil
}
// VersionFromContext returns an API version from the context using APIVersionKey.
// It panics if the context value does not have version.Version type.
func VersionFromContext(ctx context.Context) (ver string) {
if ctx == nil {
return
}
val := ctx.Value(APIVersionKey)
if val == nil {
return
}
return val.(string)
}

View File

@@ -1,17 +0,0 @@
// +build go1.7
package httputils
import (
"encoding/json"
"net/http"
)
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
return enc.Encode(v)
}

View File

@@ -1,16 +0,0 @@
// +build go1.6,!go1.7
package httputils
import (
"encoding/json"
"net/http"
)
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
enc := json.NewEncoder(w)
return enc.Encode(v)
}

View File

@@ -1,24 +0,0 @@
package server
import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/middleware"
)
// handlerWithGlobalMiddlewares wraps the handler function for a request with
// the server's global middlewares. The order of the middlewares is backwards,
// meaning that the first in the list will be evaluated last.
func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc {
next := handler
for _, m := range s.middlewares {
next = m.WrapHandler(next)
}
if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel {
next = middleware.DebugRequestMiddleware(next)
}
return next
}

View File

@@ -1,37 +0,0 @@
package middleware
import (
"net/http"
"github.com/Sirupsen/logrus"
"golang.org/x/net/context"
)
// CORSMiddleware injects CORS headers to each request
// when it's configured.
type CORSMiddleware struct {
defaultHeaders string
}
// NewCORSMiddleware creates a new CORSMiddleware with default headers.
func NewCORSMiddleware(d string) CORSMiddleware {
return CORSMiddleware{defaultHeaders: d}
}
// WrapHandler returns a new handler function wrapping the previous one in the request chain.
func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*"
// otherwise, all head values will be passed to HTTP handler
corsHeaders := c.defaultHeaders
if corsHeaders == "" {
corsHeaders = "*"
}
logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS")
return handler(ctx, w, r, vars)
}
}

View File

@@ -1,76 +0,0 @@
package middleware
import (
"bufio"
"encoding/json"
"io"
"net/http"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/pkg/ioutils"
"golang.org/x/net/context"
)
// DebugRequestMiddleware dumps the request to logger
func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
if r.Method != "POST" {
return handler(ctx, w, r, vars)
}
if err := httputils.CheckForJSON(r); err != nil {
return handler(ctx, w, r, vars)
}
maxBodySize := 4096 // 4KB
if r.ContentLength > int64(maxBodySize) {
return handler(ctx, w, r, vars)
}
body := r.Body
bufReader := bufio.NewReaderSize(body, maxBodySize)
r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() })
b, err := bufReader.Peek(maxBodySize)
if err != io.EOF {
// either there was an error reading, or the buffer is full (in which case the request is too large)
return handler(ctx, w, r, vars)
}
var postForm map[string]interface{}
if err := json.Unmarshal(b, &postForm); err == nil {
maskSecretKeys(postForm)
formStr, errMarshal := json.Marshal(postForm)
if errMarshal == nil {
logrus.Debugf("form data: %s", string(formStr))
} else {
logrus.Debugf("form data: %q", postForm)
}
}
return handler(ctx, w, r, vars)
}
}
func maskSecretKeys(inp interface{}) {
if arr, ok := inp.([]interface{}); ok {
for _, f := range arr {
maskSecretKeys(f)
}
return
}
if form, ok := inp.(map[string]interface{}); ok {
loop0:
for k, v := range form {
for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} {
if strings.EqualFold(m, k) {
form[k] = "*****"
continue loop0
}
}
maskSecretKeys(v)
}
}
}

View File

@@ -1,29 +0,0 @@
package middleware
import (
"net/http"
"golang.org/x/net/context"
)
// ExperimentalMiddleware is a the middleware in charge of adding the
// 'Docker-Experimental' header to every outgoing request
type ExperimentalMiddleware struct {
experimental string
}
// NewExperimentalMiddleware creates a new ExperimentalMiddleware
func NewExperimentalMiddleware(experimentalEnabled bool) ExperimentalMiddleware {
if experimentalEnabled {
return ExperimentalMiddleware{"true"}
}
return ExperimentalMiddleware{"false"}
}
// WrapHandler returns a new handler function wrapping the previous one in the request chain.
func (e ExperimentalMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Docker-Experimental", e.experimental)
return handler(ctx, w, r, vars)
}
}

View File

@@ -1,13 +0,0 @@
package middleware
import (
"net/http"
"golang.org/x/net/context"
)
// Middleware is an interface to allow the use of ordinary functions as Docker API filters.
// Any struct that has the appropriate signature can be registered as a middleware.
type Middleware interface {
WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error
}

View File

@@ -1,50 +0,0 @@
package middleware
import (
"fmt"
"net/http"
"runtime"
"github.com/docker/docker/api/errors"
"github.com/docker/docker/api/types/versions"
"golang.org/x/net/context"
)
// VersionMiddleware is a middleware that
// validates the client and server versions.
type VersionMiddleware struct {
serverVersion string
defaultVersion string
minVersion string
}
// NewVersionMiddleware creates a new VersionMiddleware
// with the default versions.
func NewVersionMiddleware(s, d, m string) VersionMiddleware {
return VersionMiddleware{
serverVersion: s,
defaultVersion: d,
minVersion: m,
}
}
// WrapHandler returns a new handler function wrapping the previous one in the request chain.
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
apiVersion := vars["version"]
if apiVersion == "" {
apiVersion = v.defaultVersion
}
if versions.LessThan(apiVersion, v.minVersion) {
return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion))
}
header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS)
w.Header().Set("Server", header)
w.Header().Set("API-Version", v.defaultVersion)
ctx = context.WithValue(ctx, "api-version", apiVersion)
return handler(ctx, w, r, vars)
}
}

View File

@@ -1,57 +0,0 @@
package middleware
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/docker/docker/api/server/httputils"
"golang.org/x/net/context"
)
func TestVersionMiddleware(t *testing.T) {
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if httputils.VersionFromContext(ctx) == "" {
t.Fatalf("Expected version, got empty string")
}
return nil
}
defaultVersion := "1.10.0"
minVersion := "1.2.0"
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
h := m.WrapHandler(handler)
req, _ := http.NewRequest("GET", "/containers/json", nil)
resp := httptest.NewRecorder()
ctx := context.Background()
if err := h(ctx, resp, req, map[string]string{}); err != nil {
t.Fatal(err)
}
}
func TestVersionMiddlewareWithErrors(t *testing.T) {
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if httputils.VersionFromContext(ctx) == "" {
t.Fatalf("Expected version, got empty string")
}
return nil
}
defaultVersion := "1.10.0"
minVersion := "1.2.0"
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
h := m.WrapHandler(handler)
req, _ := http.NewRequest("GET", "/containers/json", nil)
resp := httptest.NewRecorder()
ctx := context.Background()
vars := map[string]string{"version": "0.1"}
err := h(ctx, resp, req, vars)
if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") {
t.Fatalf("Expected too old client error, got %v", err)
}
}

View File

@@ -1,41 +0,0 @@
package server
import (
"expvar"
"fmt"
"net/http"
"net/http/pprof"
"github.com/gorilla/mux"
)
const debugPathPrefix = "/debug/"
func profilerSetup(mainRouter *mux.Router) {
var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter()
r.HandleFunc("/vars", expVars)
r.HandleFunc("/pprof/", pprof.Index)
r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
r.HandleFunc("/pprof/profile", pprof.Profile)
r.HandleFunc("/pprof/symbol", pprof.Symbol)
r.HandleFunc("/pprof/trace", pprof.Trace)
r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP)
r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP)
r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
}
// Replicated from expvar.go as not public.
func expVars(w http.ResponseWriter, r *http.Request) {
first := true
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}

View File

@@ -1,20 +0,0 @@
package build
import (
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"golang.org/x/net/context"
)
// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID.
type Backend interface {
// Build builds a Docker image referenced by an imageID string.
//
// Note: Tagging an image should not be done by a Builder, it should instead be done
// by the caller.
//
// TODO: make this return a reference instead of string
BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error)
}

View File

@@ -1,29 +0,0 @@
package build
import "github.com/docker/docker/api/server/router"
// buildRouter is a router to talk with the build controller
type buildRouter struct {
backend Backend
routes []router.Route
}
// NewRouter initializes a new build router
func NewRouter(b Backend) router.Router {
r := &buildRouter{
backend: b,
}
r.initRoutes()
return r
}
// Routes returns the available routers to the build controller
func (r *buildRouter) Routes() []router.Route {
return r.routes
}
func (r *buildRouter) initRoutes() {
r.routes = []router.Route{
router.Cancellable(router.NewPostRoute("/build", r.postBuild)),
}
}

View File

@@ -1,225 +0,0 @@
package build
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"runtime"
"strconv"
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/go-units"
"golang.org/x/net/context"
)
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
version := httputils.VersionFromContext(ctx)
options := &types.ImageBuildOptions{}
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
options.Remove = true
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
options.Remove = true
} else {
options.Remove = httputils.BoolValue(r, "rm")
}
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
options.PullParent = true
}
options.Dockerfile = r.FormValue("dockerfile")
options.SuppressOutput = httputils.BoolValue(r, "q")
options.NoCache = httputils.BoolValue(r, "nocache")
options.ForceRemove = httputils.BoolValue(r, "forcerm")
options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap")
options.Memory = httputils.Int64ValueOrZero(r, "memory")
options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares")
options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod")
options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota")
options.CPUSetCPUs = r.FormValue("cpusetcpus")
options.CPUSetMems = r.FormValue("cpusetmems")
options.CgroupParent = r.FormValue("cgroupparent")
options.NetworkMode = r.FormValue("networkmode")
options.Tags = r.Form["t"]
options.SecurityOpt = r.Form["securityopt"]
options.Squash = httputils.BoolValue(r, "squash")
if r.Form.Get("shmsize") != "" {
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
if err != nil {
return nil, err
}
options.ShmSize = shmSize
}
if i := container.Isolation(r.FormValue("isolation")); i != "" {
if !container.Isolation.IsValid(i) {
return nil, fmt.Errorf("Unsupported isolation: %q", i)
}
options.Isolation = i
}
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
return nil, fmt.Errorf("the daemon on this platform does not support --security-opt to build")
}
var buildUlimits = []*units.Ulimit{}
ulimitsJSON := r.FormValue("ulimits")
if ulimitsJSON != "" {
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
return nil, err
}
options.Ulimits = buildUlimits
}
var buildArgs = map[string]*string{}
buildArgsJSON := r.FormValue("buildargs")
// Note that there are two ways a --build-arg might appear in the
// json of the query param:
// "foo":"bar"
// and "foo":nil
// The first is the normal case, ie. --build-arg foo=bar
// or --build-arg foo
// where foo's value was picked up from an env var.
// The second ("foo":nil) is where they put --build-arg foo
// but "foo" isn't set as an env var. In that case we can't just drop
// the fact they mentioned it, we need to pass that along to the builder
// so that it can print a warning about "foo" being unused if there is
// no "ARG foo" in the Dockerfile.
if buildArgsJSON != "" {
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
return nil, err
}
options.BuildArgs = buildArgs
}
var labels = map[string]string{}
labelsJSON := r.FormValue("labels")
if labelsJSON != "" {
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
return nil, err
}
options.Labels = labels
}
var cacheFrom = []string{}
cacheFromJSON := r.FormValue("cachefrom")
if cacheFromJSON != "" {
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
return nil, err
}
options.CacheFrom = cacheFrom
}
return options, nil
}
type syncWriter struct {
w io.Writer
mu sync.Mutex
}
func (s *syncWriter) Write(b []byte) (count int, err error) {
s.mu.Lock()
count, err = s.w.Write(b)
s.mu.Unlock()
return
}
func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var (
authConfigs = map[string]types.AuthConfig{}
authConfigsEncoded = r.Header.Get("X-Registry-Config")
notVerboseBuffer = bytes.NewBuffer(nil)
)
if authConfigsEncoded != "" {
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting
// to be empty.
}
}
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
sf := streamformatter.NewJSONStreamFormatter()
errf := func(err error) error {
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
output.Write(notVerboseBuffer.Bytes())
}
// Do not write the error in the http output if it's still empty.
// This prevents from writing a 200(OK) when there is an internal error.
if !output.Flushed() {
return err
}
_, err = w.Write(sf.FormatError(err))
if err != nil {
logrus.Warnf("could not write error response: %v", err)
}
return nil
}
buildOptions, err := newImageBuildOptions(ctx, r)
if err != nil {
return errf(err)
}
buildOptions.AuthConfigs = authConfigs
remoteURL := r.FormValue("remote")
// Currently, only used if context is from a remote url.
// Look at code in DetectContextFromRemoteURL for more information.
createProgressReader := func(in io.ReadCloser) io.ReadCloser {
progressOutput := sf.NewProgressOutput(output, true)
if buildOptions.SuppressOutput {
progressOutput = sf.NewProgressOutput(notVerboseBuffer, true)
}
return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL)
}
out := io.Writer(output)
if buildOptions.SuppressOutput {
out = notVerboseBuffer
}
out = &syncWriter{w: out}
stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf}
stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf}
pg := backend.ProgressWriter{
Output: out,
StdoutFormatter: stdout,
StderrFormatter: stderr,
ProgressReaderFunc: createProgressReader,
}
imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg)
if err != nil {
return errf(err)
}
// Everything worked so if -q was provided the output from the daemon
// should be just the image ID and we'll print that to stdout.
if buildOptions.SuppressOutput {
stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf}
fmt.Fprintf(stdout, "%s\n", string(imgID))
}
return nil
}

View File

@@ -1,10 +0,0 @@
package checkpoint
import "github.com/docker/docker/api/types"
// Backend for Checkpoint
type Backend interface {
CheckpointCreate(container string, config types.CheckpointCreateOptions) error
CheckpointDelete(container string, config types.CheckpointDeleteOptions) error
CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error)
}

View File

@@ -1,36 +0,0 @@
package checkpoint
import (
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router"
)
// checkpointRouter is a router to talk with the checkpoint controller
type checkpointRouter struct {
backend Backend
decoder httputils.ContainerDecoder
routes []router.Route
}
// NewRouter initializes a new checkpoint router
func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
r := &checkpointRouter{
backend: b,
decoder: decoder,
}
r.initRoutes()
return r
}
// Routes returns the available routers to the checkpoint controller
func (r *checkpointRouter) Routes() []router.Route {
return r.routes
}
func (r *checkpointRouter) initRoutes() {
r.routes = []router.Route{
router.Experimental(router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints)),
router.Experimental(router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint)),
router.Experimental(router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint)),
}
}

View File

@@ -1,65 +0,0 @@
package checkpoint
import (
"encoding/json"
"net/http"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
)
func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var options types.CheckpointCreateOptions
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&options); err != nil {
return err
}
err := s.backend.CheckpointCreate(vars["name"], options)
if err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
return nil
}
func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{
CheckpointDir: r.Form.Get("dir"),
})
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, checkpoints)
}
func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{
CheckpointDir: r.Form.Get("dir"),
CheckpointID: vars["checkpoint"],
})
if err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}

View File

@@ -1,79 +0,0 @@
package container
import (
"io"
"time"
"golang.org/x/net/context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/archive"
)
// execBackend includes functions to implement to provide exec functionality.
type execBackend interface {
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
ContainerExecInspect(id string) (*backend.ExecInspect, error)
ContainerExecResize(name string, height, width int) error
ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error
ExecExists(name string) (bool, error)
}
// copyBackend includes functions to implement to provide container copy functionality.
type copyBackend interface {
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
ContainerCopy(name string, res string) (io.ReadCloser, error)
ContainerExport(name string, out io.Writer) error
ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
}
// stateBackend includes functions to implement to provide container state lifecycle functionality.
type stateBackend interface {
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
ContainerKill(name string, sig uint64) error
ContainerPause(name string) error
ContainerRename(oldName, newName string) error
ContainerResize(name string, height, width int) error
ContainerRestart(name string, seconds *int) error
ContainerRm(name string, config *types.ContainerRmConfig) error
ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
ContainerStop(name string, seconds *int) error
ContainerUnpause(name string) error
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
ContainerWait(name string, timeout time.Duration) (int, error)
}
// monitorBackend includes functions to implement to provide containers monitoring functionality.
type monitorBackend interface {
ContainerChanges(name string) ([]archive.Change, error)
ContainerInspect(name string, size bool, version string) (interface{}, error)
ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error
ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error
ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error)
Containers(config *types.ContainerListOptions) ([]*types.Container, error)
}
// attachBackend includes function to implement to provide container attaching functionality.
type attachBackend interface {
ContainerAttach(name string, c *backend.ContainerAttachConfig) error
}
// systemBackend includes functions to implement to provide system wide containers functionality
type systemBackend interface {
ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error)
}
// Backend is all the methods that need to be implemented to provide container specific functionality.
type Backend interface {
execBackend
copyBackend
stateBackend
monitorBackend
attachBackend
systemBackend
}

View File

@@ -1,77 +0,0 @@
package container
import (
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router"
)
type validationError struct {
error
}
func (validationError) IsValidationError() bool {
return true
}
// containerRouter is a router to talk with the container controller
type containerRouter struct {
backend Backend
decoder httputils.ContainerDecoder
routes []router.Route
}
// NewRouter initializes a new container router
func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
r := &containerRouter{
backend: b,
decoder: decoder,
}
r.initRoutes()
return r
}
// Routes returns the available routes to the container controller
func (r *containerRouter) Routes() []router.Route {
return r.routes
}
// initRoutes initializes the routes in container router
func (r *containerRouter) initRoutes() {
r.routes = []router.Route{
// HEAD
router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive),
// GET
router.NewGetRoute("/containers/json", r.getContainersJSON),
router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport),
router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges),
router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName),
router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop),
router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)),
router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)),
router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach),
router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID),
router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive),
// POST
router.NewPostRoute("/containers/create", r.postContainersCreate),
router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill),
router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause),
router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause),
router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart),
router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart),
router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop),
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
router.NewPostRoute("/containers/prune", r.postContainersPrune),
// PUT
router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
// DELETE
router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers),
}
}

View File

@@ -1,554 +0,0 @@
package container
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/signal"
"golang.org/x/net/context"
"golang.org/x/net/websocket"
)
func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
config := &types.ContainerListOptions{
All: httputils.BoolValue(r, "all"),
Size: httputils.BoolValue(r, "size"),
Since: r.Form.Get("since"),
Before: r.Form.Get("before"),
Filters: filter,
}
if tmpLimit := r.Form.Get("limit"); tmpLimit != "" {
limit, err := strconv.Atoi(tmpLimit)
if err != nil {
return err
}
config.Limit = limit
}
containers, err := s.backend.Containers(config)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, containers)
}
func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
stream := httputils.BoolValueOrDefault(r, "stream", true)
if !stream {
w.Header().Set("Content-Type", "application/json")
}
config := &backend.ContainerStatsConfig{
Stream: stream,
OutStream: w,
Version: string(httputils.VersionFromContext(ctx)),
}
return s.backend.ContainerStats(ctx, vars["name"], config)
}
func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
// Args are validated before the stream starts because when it starts we're
// sending HTTP 200 by writing an empty chunk of data to tell the client that
// daemon is going to stream. By sending this initial HTTP 200 we can't report
// any error after the stream starts (i.e. container not found, wrong parameters)
// with the appropriate status code.
stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr")
if !(stdout || stderr) {
return fmt.Errorf("Bad parameters: you must choose at least one stream")
}
containerName := vars["name"]
logsConfig := &backend.ContainerLogsConfig{
ContainerLogsOptions: types.ContainerLogsOptions{
Follow: httputils.BoolValue(r, "follow"),
Timestamps: httputils.BoolValue(r, "timestamps"),
Since: r.Form.Get("since"),
Tail: r.Form.Get("tail"),
ShowStdout: stdout,
ShowStderr: stderr,
Details: httputils.BoolValue(r, "details"),
},
OutStream: w,
}
chStarted := make(chan struct{})
if err := s.backend.ContainerLogs(ctx, containerName, logsConfig, chStarted); err != nil {
select {
case <-chStarted:
// The client may be expecting all of the data we're sending to
// be multiplexed, so send it through OutStream, which will
// have been set up to handle that if needed.
fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err)
default:
return err
}
}
return nil
}
func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return s.backend.ContainerExport(vars["name"], w)
}
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// If contentLength is -1, we can assumed chunked encoding
// or more technically that the length is unknown
// https://golang.org/src/pkg/net/http/request.go#L139
// net/http otherwise seems to swallow any headers related to chunked encoding
// including r.TransferEncoding
// allow a nil body for backwards compatibility
version := httputils.VersionFromContext(ctx)
var hostConfig *container.HostConfig
// A non-nil json object is at least 7 characters.
if r.ContentLength > 7 || r.ContentLength == -1 {
if versions.GreaterThanOrEqualTo(version, "1.24") {
return validationError{fmt.Errorf("starting container with non-empty request body was deprecated since v1.10 and removed in v1.12")}
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
c, err := s.decoder.DecodeHostConfig(r.Body)
if err != nil {
return err
}
hostConfig = c
}
if err := httputils.ParseForm(r); err != nil {
return err
}
checkpoint := r.Form.Get("checkpoint")
checkpointDir := r.Form.Get("checkpoint-dir")
if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var seconds *int
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
valSeconds, err := strconv.Atoi(tmpSeconds)
if err != nil {
return err
}
seconds = &valSeconds
}
if err := s.backend.ContainerStop(vars["name"], seconds); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
type errContainerIsRunning interface {
ContainerIsRunning() bool
}
func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var sig syscall.Signal
name := vars["name"]
// If we have a signal, look at it. Otherwise, do nothing
if sigStr := r.Form.Get("signal"); sigStr != "" {
var err error
if sig, err = signal.ParseSignal(sigStr); err != nil {
return err
}
}
if err := s.backend.ContainerKill(name, uint64(sig)); err != nil {
var isStopped bool
if e, ok := err.(errContainerIsRunning); ok {
isStopped = !e.ContainerIsRunning()
}
// Return error that's not caused because the container is stopped.
// Return error if the container is not running and the api is >= 1.20
// to keep backwards compatibility.
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped {
return fmt.Errorf("Cannot kill container %s: %v", name, err)
}
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var seconds *int
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
valSeconds, err := strconv.Atoi(tmpSeconds)
if err != nil {
return err
}
seconds = &valSeconds
}
if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := s.backend.ContainerPause(vars["name"]); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := s.backend.ContainerUnpause(vars["name"]); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
status, err := s.backend.ContainerWait(vars["name"], -1*time.Second)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, &container.ContainerWaitOKBody{
StatusCode: int64(status),
})
}
func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
changes, err := s.backend.ContainerChanges(vars["name"])
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, changes)
}
func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args"))
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, procList)
}
func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
name := vars["name"]
newName := r.Form.Get("name")
if err := s.backend.ContainerRename(name, newName); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
var updateConfig container.UpdateConfig
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&updateConfig); err != nil {
return err
}
hostConfig := &container.HostConfig{
Resources: updateConfig.Resources,
RestartPolicy: updateConfig.RestartPolicy,
}
name := vars["name"]
resp, err := s.backend.ContainerUpdate(name, hostConfig)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, resp)
}
func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
name := r.Form.Get("name")
config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body)
if err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
adjustCPUShares := versions.LessThan(version, "1.19")
ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{
Name: name,
Config: config,
HostConfig: hostConfig,
NetworkingConfig: networkingConfig,
AdjustCPUShares: adjustCPUShares,
})
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusCreated, ccr)
}
func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
name := vars["name"]
config := &types.ContainerRmConfig{
ForceRemove: httputils.BoolValue(r, "force"),
RemoveVolume: httputils.BoolValue(r, "v"),
RemoveLink: httputils.BoolValue(r, "link"),
}
if err := s.backend.ContainerRm(name, config); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
height, err := strconv.Atoi(r.Form.Get("h"))
if err != nil {
return err
}
width, err := strconv.Atoi(r.Form.Get("w"))
if err != nil {
return err
}
return s.backend.ContainerResize(vars["name"], height, width)
}
func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
err := httputils.ParseForm(r)
if err != nil {
return err
}
containerName := vars["name"]
_, upgrade := r.Header["Upgrade"]
detachKeys := r.FormValue("detachKeys")
hijacker, ok := w.(http.Hijacker)
if !ok {
return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName)
}
setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) {
conn, _, err := hijacker.Hijack()
if err != nil {
return nil, nil, nil, err
}
// set raw mode
conn.Write([]byte{})
if upgrade {
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
} else {
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
}
closer := func() error {
httputils.CloseStreams(conn)
return nil
}
return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil
}
attachConfig := &backend.ContainerAttachConfig{
GetStreams: setupStreams,
UseStdin: httputils.BoolValue(r, "stdin"),
UseStdout: httputils.BoolValue(r, "stdout"),
UseStderr: httputils.BoolValue(r, "stderr"),
Logs: httputils.BoolValue(r, "logs"),
Stream: httputils.BoolValue(r, "stream"),
DetachKeys: detachKeys,
MuxStreams: true,
}
if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil {
logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
// Remember to close stream if error happens
conn, _, errHijack := hijacker.Hijack()
if errHijack == nil {
statusCode := httputils.GetHTTPErrorStatusCode(err)
statusText := http.StatusText(statusCode)
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error())
httputils.CloseStreams(conn)
} else {
logrus.Errorf("Error Hijacking: %v", err)
}
}
return nil
}
func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
containerName := vars["name"]
var err error
detachKeys := r.FormValue("detachKeys")
done := make(chan struct{})
started := make(chan struct{})
setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) {
wsChan := make(chan *websocket.Conn)
h := func(conn *websocket.Conn) {
wsChan <- conn
<-done
}
srv := websocket.Server{Handler: h, Handshake: nil}
go func() {
close(started)
srv.ServeHTTP(w, r)
}()
conn := <-wsChan
return conn, conn, conn, nil
}
attachConfig := &backend.ContainerAttachConfig{
GetStreams: setupStreams,
Logs: httputils.BoolValue(r, "logs"),
Stream: httputils.BoolValue(r, "stream"),
DetachKeys: detachKeys,
UseStdin: true,
UseStdout: true,
UseStderr: true,
MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr
}
err = s.backend.ContainerAttach(containerName, attachConfig)
close(done)
select {
case <-started:
logrus.Errorf("Error attaching websocket: %s", err)
return nil
default:
}
return err
}
func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
pruneReport, err := s.backend.ContainersPrune(pruneFilters)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
}

View File

@@ -1,119 +0,0 @@
package container
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"golang.org/x/net/context"
)
// postContainersCopy is deprecated in favor of getContainersArchive.
func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// Deprecated since 1.8, Errors out since 1.12
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.24") {
w.WriteHeader(http.StatusNotFound)
return nil
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
cfg := types.CopyConfig{}
if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
return err
}
if cfg.Resource == "" {
return fmt.Errorf("Path cannot be empty")
}
data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource)
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), "no such container") {
w.WriteHeader(http.StatusNotFound)
return nil
}
if os.IsNotExist(err) {
return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"])
}
return err
}
defer data.Close()
w.Header().Set("Content-Type", "application/x-tar")
if _, err := io.Copy(w, data); err != nil {
return err
}
return nil
}
// // Encode the stat to JSON, base64 encode, and place in a header.
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
statJSON, err := json.Marshal(stat)
if err != nil {
return err
}
header.Set(
"X-Docker-Container-Path-Stat",
base64.StdEncoding.EncodeToString(statJSON),
)
return nil
}
func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v, err := httputils.ArchiveFormValues(r, vars)
if err != nil {
return err
}
stat, err := s.backend.ContainerStatPath(v.Name, v.Path)
if err != nil {
return err
}
return setContainerPathStatHeader(stat, w.Header())
}
func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v, err := httputils.ArchiveFormValues(r, vars)
if err != nil {
return err
}
tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path)
if err != nil {
return err
}
defer tarArchive.Close()
if err := setContainerPathStatHeader(stat, w.Header()); err != nil {
return err
}
w.Header().Set("Content-Type", "application/x-tar")
_, err = io.Copy(w, tarArchive)
return err
}
func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v, err := httputils.ArchiveFormValues(r, vars)
if err != nil {
return err
}
noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir")
return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body)
}

View File

@@ -1,140 +0,0 @@
package container
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/stdcopy"
"golang.org/x/net/context"
)
func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
eConfig, err := s.backend.ContainerExecInspect(vars["id"])
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, eConfig)
}
func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
name := vars["name"]
execConfig := &types.ExecConfig{}
if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil {
return err
}
if len(execConfig.Cmd) == 0 {
return fmt.Errorf("No exec command specified")
}
// Register an instance of Exec in container.
id, err := s.backend.ContainerExecCreate(name, execConfig)
if err != nil {
logrus.Errorf("Error setting up exec command in container %s: %v", name, err)
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{
ID: id,
})
}
// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start.
func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
if versions.GreaterThan(version, "1.21") {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
}
var (
execName = vars["name"]
stdin, inStream io.ReadCloser
stdout, stderr, outStream io.Writer
)
execStartCheck := &types.ExecStartCheck{}
if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil {
return err
}
if exists, err := s.backend.ExecExists(execName); !exists {
return err
}
if !execStartCheck.Detach {
var err error
// Setting up the streaming http interface.
inStream, outStream, err = httputils.HijackConnection(w)
if err != nil {
return err
}
defer httputils.CloseStreams(inStream, outStream)
if _, ok := r.Header["Upgrade"]; ok {
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
} else {
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
}
// copy headers that were removed as part of hijack
if err := w.Header().WriteSubset(outStream, nil); err != nil {
return err
}
fmt.Fprint(outStream, "\r\n")
stdin = inStream
stdout = outStream
if !execStartCheck.Tty {
stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
}
}
// Now run the user process in container.
// Maybe we should we pass ctx here if we're not detaching?
if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil {
if execStartCheck.Detach {
return err
}
stdout.Write([]byte(err.Error() + "\r\n"))
logrus.Errorf("Error running exec in container: %v", err)
}
return nil
}
func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
height, err := strconv.Atoi(r.Form.Get("h"))
if err != nil {
return err
}
width, err := strconv.Atoi(r.Form.Get("w"))
if err != nil {
return err
}
return s.backend.ContainerExecResize(vars["name"], height, width)
}

View File

@@ -1,21 +0,0 @@
package container
import (
"net/http"
"github.com/docker/docker/api/server/httputils"
"golang.org/x/net/context"
)
// getContainersByName inspects container's configuration and serializes it as json.
func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
displaySize := httputils.BoolValue(r, "size")
version := httputils.VersionFromContext(ctx)
json, err := s.backend.ContainerInspect(vars["name"], displaySize, version)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, json)
}

View File

@@ -1,67 +0,0 @@
package router
import (
"errors"
"net/http"
"golang.org/x/net/context"
apierrors "github.com/docker/docker/api/errors"
"github.com/docker/docker/api/server/httputils"
)
var (
errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon with --experimental in order to enable it.")
)
// ExperimentalRoute defines an experimental API route that can be enabled or disabled.
type ExperimentalRoute interface {
Route
Enable()
Disable()
}
// experimentalRoute defines an experimental API route that can be enabled or disabled.
// It implements ExperimentalRoute
type experimentalRoute struct {
local Route
handler httputils.APIFunc
}
// Enable enables this experimental route
func (r *experimentalRoute) Enable() {
r.handler = r.local.Handler()
}
// Disable disables the experimental route
func (r *experimentalRoute) Disable() {
r.handler = experimentalHandler
}
func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return apierrors.NewErrorWithStatusCode(errExperimentalFeature, http.StatusNotImplemented)
}
// Handler returns returns the APIFunc to let the server wrap it in middlewares.
func (r *experimentalRoute) Handler() httputils.APIFunc {
return r.handler
}
// Method returns the http method that the route responds to.
func (r *experimentalRoute) Method() string {
return r.local.Method()
}
// Path returns the subpath where the route responds to.
func (r *experimentalRoute) Path() string {
return r.local.Path()
}
// Experimental will mark a route as experimental.
func Experimental(r Route) Route {
return &experimentalRoute{
local: r,
handler: experimentalHandler,
}
}

View File

@@ -1,45 +0,0 @@
package image
import (
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
"golang.org/x/net/context"
)
// Backend is all the methods that need to be implemented
// to provide image specific functionality.
type Backend interface {
containerBackend
imageBackend
importExportBackend
registryBackend
}
type containerBackend interface {
Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error)
}
type imageBackend interface {
ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error)
ImageHistory(imageName string) ([]*types.ImageHistory, error)
Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error)
LookupImage(name string) (*types.ImageInspect, error)
TagImage(imageName, repository, tag string) error
ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error)
}
type importExportBackend interface {
LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error
ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error
ExportImage(names []string, outStream io.Writer) error
}
type registryBackend interface {
PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
}

View File

@@ -1,50 +0,0 @@
package image
import (
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router"
)
// imageRouter is a router to talk with the image controller
type imageRouter struct {
backend Backend
decoder httputils.ContainerDecoder
routes []router.Route
}
// NewRouter initializes a new image router
func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router {
r := &imageRouter{
backend: backend,
decoder: decoder,
}
r.initRoutes()
return r
}
// Routes returns the available routes to the image controller
func (r *imageRouter) Routes() []router.Route {
return r.routes
}
// initRoutes initializes the routes in the image router
func (r *imageRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/images/json", r.getImagesJSON),
router.NewGetRoute("/images/search", r.getImagesSearch),
router.NewGetRoute("/images/get", r.getImagesGet),
router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet),
router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory),
router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName),
// POST
router.NewPostRoute("/commit", r.postCommit),
router.NewPostRoute("/images/load", r.postImagesLoad),
router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)),
router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)),
router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
router.NewPostRoute("/images/prune", r.postImagesPrune),
// DELETE
router.NewDeleteRoute("/images/{name:.*}", r.deleteImages),
}
}

View File

@@ -1,344 +0,0 @@
package image
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/registry"
"golang.org/x/net/context"
)
func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
cname := r.Form.Get("container")
pause := httputils.BoolValue(r, "pause")
version := httputils.VersionFromContext(ctx)
if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") {
pause = true
}
c, _, _, err := s.decoder.DecodeConfig(r.Body)
if err != nil && err != io.EOF { //Do not fail if body is empty.
return err
}
if c == nil {
c = &container.Config{}
}
commitCfg := &backend.ContainerCommitConfig{
ContainerCommitConfig: types.ContainerCommitConfig{
Pause: pause,
Repo: r.Form.Get("repo"),
Tag: r.Form.Get("tag"),
Author: r.Form.Get("author"),
Comment: r.Form.Get("comment"),
Config: c,
MergeConfigs: true,
},
Changes: r.Form["changes"],
}
imgID, err := s.backend.Commit(cname, commitCfg)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{
ID: string(imgID),
})
}
// Creates an image from Pull or from Import
func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var (
image = r.Form.Get("fromImage")
repo = r.Form.Get("repo")
tag = r.Form.Get("tag")
message = r.Form.Get("message")
err error
output = ioutils.NewWriteFlusher(w)
)
defer output.Close()
w.Header().Set("Content-Type", "application/json")
if image != "" { //pull
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
metaHeaders[k] = v
}
}
authEncoded := r.Header.Get("X-Registry-Auth")
authConfig := &types.AuthConfig{}
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
authConfig = &types.AuthConfig{}
}
}
err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output)
} else { //import
src := r.Form.Get("fromSrc")
// 'err' MUST NOT be defined within this block, we need any error
// generated from the download to be available to the output
// stream processing below
err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"])
}
if err != nil {
if !output.Flushed() {
return err
}
sf := streamformatter.NewJSONStreamFormatter()
output.Write(sf.FormatError(err))
}
return nil
}
func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
metaHeaders[k] = v
}
}
if err := httputils.ParseForm(r); err != nil {
return err
}
authConfig := &types.AuthConfig{}
authEncoded := r.Header.Get("X-Registry-Auth")
if authEncoded != "" {
// the new format is to handle the authConfig as a header
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
// to increase compatibility to existing api it is defaulting to be empty
authConfig = &types.AuthConfig{}
}
} else {
// the old format is supported for compatibility if there was no authConfig header
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err)
}
}
image := vars["name"]
tag := r.Form.Get("tag")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
w.Header().Set("Content-Type", "application/json")
if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil {
if !output.Flushed() {
return err
}
sf := streamformatter.NewJSONStreamFormatter()
output.Write(sf.FormatError(err))
}
return nil
}
func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
w.Header().Set("Content-Type", "application/x-tar")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
var names []string
if name, ok := vars["name"]; ok {
names = []string{name}
} else {
names = r.Form["names"]
}
if err := s.backend.ExportImage(names, output); err != nil {
if !output.Flushed() {
return err
}
sf := streamformatter.NewJSONStreamFormatter()
output.Write(sf.FormatError(err))
}
return nil
}
func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
quiet := httputils.BoolValueOrDefault(r, "quiet", true)
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
if err := s.backend.LoadImage(r.Body, output, quiet); err != nil {
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
}
func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
name := vars["name"]
if strings.TrimSpace(name) == "" {
return fmt.Errorf("image name cannot be blank")
}
force := httputils.BoolValue(r, "force")
prune := !httputils.BoolValue(r, "noprune")
list, err := s.backend.ImageDelete(name, force, prune)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, list)
}
func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
imageInspect, err := s.backend.LookupImage(vars["name"])
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
}
func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
imageFilters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
filterParam := r.Form.Get("filter")
if versions.LessThan(version, "1.28") && filterParam != "" {
imageFilters.Add("reference", filterParam)
}
images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, images)
}
func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
name := vars["name"]
history, err := s.backend.ImageHistory(name)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, history)
}
func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
return nil
}
func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var (
config *types.AuthConfig
authEncoded = r.Header.Get("X-Registry-Auth")
headers = map[string][]string{}
)
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
// for a search it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
config = &types.AuthConfig{}
}
}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
headers[k] = v
}
}
limit := registry.DefaultSearchLimit
if r.Form.Get("limit") != "" {
limitValue, err := strconv.Atoi(r.Form.Get("limit"))
if err != nil {
return err
}
limit = limitValue
}
query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, query.Results)
}
func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
pruneReport, err := s.backend.ImagesPrune(pruneFilters)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
}

View File

@@ -1,96 +0,0 @@
package router
import (
"net/http"
"github.com/docker/docker/api/server/httputils"
"golang.org/x/net/context"
)
// localRoute defines an individual API route to connect
// with the docker daemon. It implements Route.
type localRoute struct {
method string
path string
handler httputils.APIFunc
}
// Handler returns the APIFunc to let the server wrap it in middlewares.
func (l localRoute) Handler() httputils.APIFunc {
return l.handler
}
// Method returns the http method that the route responds to.
func (l localRoute) Method() string {
return l.method
}
// Path returns the subpath where the route responds to.
func (l localRoute) Path() string {
return l.path
}
// NewRoute initializes a new local route for the router.
func NewRoute(method, path string, handler httputils.APIFunc) Route {
return localRoute{method, path, handler}
}
// NewGetRoute initializes a new route with the http method GET.
func NewGetRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("GET", path, handler)
}
// NewPostRoute initializes a new route with the http method POST.
func NewPostRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("POST", path, handler)
}
// NewPutRoute initializes a new route with the http method PUT.
func NewPutRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("PUT", path, handler)
}
// NewDeleteRoute initializes a new route with the http method DELETE.
func NewDeleteRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("DELETE", path, handler)
}
// NewOptionsRoute initializes a new route with the http method OPTIONS.
func NewOptionsRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("OPTIONS", path, handler)
}
// NewHeadRoute initializes a new route with the http method HEAD.
func NewHeadRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("HEAD", path, handler)
}
func cancellableHandler(h httputils.APIFunc) httputils.APIFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if notifier, ok := w.(http.CloseNotifier); ok {
notify := notifier.CloseNotify()
notifyCtx, cancel := context.WithCancel(ctx)
finished := make(chan struct{})
defer close(finished)
ctx = notifyCtx
go func() {
select {
case <-notify:
cancel()
case <-finished:
}
}()
}
return h(ctx, w, r, vars)
}
}
// Cancellable makes new route which embeds http.CloseNotifier feature to
// context.Context of handler.
func Cancellable(r Route) Route {
return localRoute{
method: r.Method(),
path: r.Path(),
handler: cancellableHandler(r.Handler()),
}
}

View File

@@ -1,22 +0,0 @@
package network
import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/libnetwork"
)
// Backend is all the methods that need to be implemented
// to provide network specific functionality.
type Backend interface {
FindNetwork(idName string) (libnetwork.Network, error)
GetNetworkByName(idName string) (libnetwork.Network, error)
GetNetworksByID(partialID string) []libnetwork.Network
GetNetworks() []libnetwork.Network
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
DeleteNetwork(name string) error
NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error)
}

View File

@@ -1,96 +0,0 @@
package network
import (
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/runconfig"
)
var (
// AcceptedFilters is an acceptable filters for validation
AcceptedFilters = map[string]bool{
"driver": true,
"type": true,
"name": true,
"id": true,
"label": true,
}
)
func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) {
switch netType {
case "builtin":
for _, nw := range nws {
if runconfig.IsPreDefinedNetwork(nw.Name) {
retNws = append(retNws, nw)
}
}
case "custom":
for _, nw := range nws {
if !runconfig.IsPreDefinedNetwork(nw.Name) {
retNws = append(retNws, nw)
}
}
default:
return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType)
}
return retNws, nil
}
// filterNetworks filters network list according to user specified filter
// and returns user chosen networks
func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) {
// if filter is empty, return original network list
if filter.Len() == 0 {
return nws, nil
}
if err := filter.Validate(AcceptedFilters); err != nil {
return nil, err
}
displayNet := []types.NetworkResource{}
for _, nw := range nws {
if filter.Include("driver") {
if !filter.ExactMatch("driver", nw.Driver) {
continue
}
}
if filter.Include("name") {
if !filter.Match("name", nw.Name) {
continue
}
}
if filter.Include("id") {
if !filter.Match("id", nw.ID) {
continue
}
}
if filter.Include("label") {
if !filter.MatchKVList("label", nw.Labels) {
continue
}
}
displayNet = append(displayNet, nw)
}
if filter.Include("type") {
var typeNet []types.NetworkResource
errFilter := filter.WalkValues("type", func(fval string) error {
passList, err := filterNetworkByType(displayNet, fval)
if err != nil {
return err
}
typeNet = append(typeNet, passList...)
return nil
})
if errFilter != nil {
return nil, errFilter
}
displayNet = typeNet
}
return displayNet, nil
}

View File

@@ -1,44 +0,0 @@
package network
import (
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/daemon/cluster"
)
// networkRouter is a router to talk with the network controller
type networkRouter struct {
backend Backend
clusterProvider *cluster.Cluster
routes []router.Route
}
// NewRouter initializes a new network router
func NewRouter(b Backend, c *cluster.Cluster) router.Router {
r := &networkRouter{
backend: b,
clusterProvider: c,
}
r.initRoutes()
return r
}
// Routes returns the available routes to the network controller
func (r *networkRouter) Routes() []router.Route {
return r.routes
}
func (r *networkRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/networks", r.getNetworksList),
router.NewGetRoute("/networks/", r.getNetworksList),
router.NewGetRoute("/networks/{id:.+}", r.getNetwork),
// POST
router.NewPostRoute("/networks/create", r.postNetworkCreate),
router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect),
router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect),
router.NewPostRoute("/networks/prune", r.postNetworksPrune),
// DELETE
router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork),
}
}

View File

@@ -1,308 +0,0 @@
package network
import (
"encoding/json"
"net/http"
"golang.org/x/net/context"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/networkdb"
)
func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter := r.Form.Get("filters")
netFilters, err := filters.FromParam(filter)
if err != nil {
return err
}
list := []types.NetworkResource{}
if nr, err := n.clusterProvider.GetNetworks(); err == nil {
list = append(list, nr...)
}
// Combine the network list returned by Docker daemon if it is not already
// returned by the cluster manager
SKIP:
for _, nw := range n.backend.GetNetworks() {
for _, nl := range list {
if nl.ID == nw.ID() {
continue SKIP
}
}
list = append(list, *n.buildNetworkResource(nw))
}
list, err = filterNetworks(list, netFilters)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, list)
}
func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
nw, err := n.backend.FindNetwork(vars["id"])
if err != nil {
if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil {
return httputils.WriteJSON(w, http.StatusOK, nr)
}
return err
}
return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw))
}
func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var create types.NetworkCreateRequest
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&create); err != nil {
return err
}
if nws, err := n.clusterProvider.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
return libnetwork.NetworkNameError(create.Name)
}
nw, err := n.backend.CreateNetwork(create)
if err != nil {
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
return err
}
id, err := n.clusterProvider.CreateNetwork(create)
if err != nil {
return err
}
nw = &types.NetworkCreateResponse{ID: id}
}
return httputils.WriteJSON(w, http.StatusCreated, nw)
}
func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var connect types.NetworkConnect
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&connect); err != nil {
return err
}
return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig)
}
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var disconnect types.NetworkDisconnect
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil {
return err
}
return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force)
}
func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil {
if err = n.clusterProvider.RemoveNetwork(vars["id"]); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
if err := n.backend.DeleteNetwork(vars["id"]); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
r := &types.NetworkResource{}
if nw == nil {
return r
}
info := nw.Info()
r.Name = nw.Name()
r.ID = nw.ID()
r.Created = info.Created()
r.Scope = info.Scope()
if n.clusterProvider.IsManager() {
if _, err := n.clusterProvider.GetNetwork(nw.ID()); err == nil {
r.Scope = "swarm"
}
} else if info.Dynamic() {
r.Scope = "swarm"
}
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()
r.Attachable = info.Attachable()
r.Options = info.DriverOptions()
r.Containers = make(map[string]types.EndpointResource)
buildIpamResources(r, info)
r.Labels = info.Labels()
peers := info.Peers()
if len(peers) != 0 {
r.Peers = buildPeerInfoResources(peers)
}
epl := nw.Endpoints()
for _, e := range epl {
ei := e.Info()
if ei == nil {
continue
}
sb := ei.Sandbox()
tmpID := e.ID()
key := "ep-" + tmpID
if sb != nil {
key = sb.ContainerID()
}
r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei)
}
return r
}
func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo {
peerInfo := make([]network.PeerInfo, 0, len(peers))
for _, peer := range peers {
peerInfo = append(peerInfo, network.PeerInfo{
Name: peer.Name,
IP: peer.IP,
})
}
return peerInfo
}
func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) {
id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig()
ipv4Info, ipv6Info := nwInfo.IpamInfo()
r.IPAM.Driver = id
r.IPAM.Options = opts
r.IPAM.Config = []network.IPAMConfig{}
for _, ip4 := range ipv4conf {
if ip4.PreferredPool == "" {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip4.PreferredPool
iData.IPRange = ip4.SubPool
iData.Gateway = ip4.Gateway
iData.AuxAddress = ip4.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if len(r.IPAM.Config) == 0 {
for _, ip4Info := range ipv4Info {
iData := network.IPAMConfig{}
iData.Subnet = ip4Info.IPAMData.Pool.String()
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
hasIpv6Conf := false
for _, ip6 := range ipv6conf {
if ip6.PreferredPool == "" {
continue
}
hasIpv6Conf = true
iData := network.IPAMConfig{}
iData.Subnet = ip6.PreferredPool
iData.IPRange = ip6.SubPool
iData.Gateway = ip6.Gateway
iData.AuxAddress = ip6.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if !hasIpv6Conf {
for _, ip6Info := range ipv6Info {
if ip6Info.IPAMData.Pool == nil {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip6Info.IPAMData.Pool.String()
iData.Gateway = ip6Info.IPAMData.Gateway.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
}
func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource {
er := types.EndpointResource{}
er.EndpointID = id
er.Name = name
ei := info
if ei == nil {
return er
}
if iface := ei.Iface(); iface != nil {
if mac := iface.MacAddress(); mac != nil {
er.MacAddress = mac.String()
}
if ip := iface.Address(); ip != nil && len(ip.IP) > 0 {
er.IPv4Address = ip.String()
}
if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 {
er.IPv6Address = ipv6.String()
}
}
return er
}
func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
pruneReport, err := n.backend.NetworksPrune(filters.Args{})
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
}

View File

@@ -1,25 +0,0 @@
package plugin
import (
"io"
"net/http"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/reference"
"golang.org/x/net/context"
)
// Backend for Plugin
type Backend interface {
Disable(name string, config *enginetypes.PluginDisableConfig) error
Enable(name string, config *enginetypes.PluginEnableConfig) error
List() ([]enginetypes.Plugin, error)
Inspect(name string) (*enginetypes.Plugin, error)
Remove(name string, config *enginetypes.PluginRmConfig) error
Set(name string, args []string) error
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error
}

View File

@@ -1,39 +0,0 @@
package plugin
import "github.com/docker/docker/api/server/router"
// pluginRouter is a router to talk with the plugin controller
type pluginRouter struct {
backend Backend
routes []router.Route
}
// NewRouter initializes a new plugin router
func NewRouter(b Backend) router.Router {
r := &pluginRouter{
backend: b,
}
r.initRoutes()
return r
}
// Routes returns the available routers to the plugin controller
func (r *pluginRouter) Routes() []router.Route {
return r.routes
}
func (r *pluginRouter) initRoutes() {
r.routes = []router.Route{
router.NewGetRoute("/plugins", r.listPlugins),
router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin),
router.NewGetRoute("/plugins/privileges", r.getPrivileges),
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)),
router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)),
router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin)),
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
router.NewPostRoute("/plugins/create", r.createPlugin),
}
}

View File

@@ -1,314 +0,0 @@
package plugin
import (
"encoding/base64"
"encoding/json"
"net/http"
"strconv"
"strings"
distreference "github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/reference"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) {
metaHeaders := map[string][]string{}
for k, v := range headers {
if strings.HasPrefix(k, "X-Meta-") {
metaHeaders[k] = v
}
}
// Get X-Registry-Auth
authEncoded := headers.Get("X-Registry-Auth")
authConfig := &types.AuthConfig{}
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
authConfig = &types.AuthConfig{}
}
}
return metaHeaders, authConfig
}
// parseRemoteRef parses the remote reference into a reference.Named
// returning the tag associated with the reference. In the case the
// given reference string includes both digest and tag, the returned
// reference will have the digest without the tag, but the tag will
// be returned.
func parseRemoteRef(remote string) (reference.Named, string, error) {
// Parse remote reference, supporting remotes with name and tag
// NOTE: Using distribution reference to handle references
// containing both a name and digest
remoteRef, err := distreference.ParseNamed(remote)
if err != nil {
return nil, "", err
}
var tag string
if t, ok := remoteRef.(distreference.Tagged); ok {
tag = t.Tag()
}
// Convert distribution reference to docker reference
// TODO: remove when docker reference changes reconciled upstream
ref, err := reference.WithName(remoteRef.Name())
if err != nil {
return nil, "", err
}
if d, ok := remoteRef.(distreference.Digested); ok {
ref, err = reference.WithDigest(ref, d.Digest())
if err != nil {
return nil, "", err
}
} else if tag != "" {
ref, err = reference.WithTag(ref, tag)
if err != nil {
return nil, "", err
}
} else {
ref = reference.WithDefaultTag(ref)
}
return ref, tag, nil
}
func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
metaHeaders, authConfig := parseHeaders(r.Header)
ref, _, err := parseRemoteRef(r.FormValue("remote"))
if err != nil {
return err
}
privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, privileges)
}
func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return errors.Wrap(err, "failed to parse form")
}
var privileges types.PluginPrivileges
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&privileges); err != nil {
return errors.Wrap(err, "failed to parse privileges")
}
if dec.More() {
return errors.New("invalid privileges")
}
metaHeaders, authConfig := parseHeaders(r.Header)
ref, tag, err := parseRemoteRef(r.FormValue("remote"))
if err != nil {
return err
}
name, err := getName(ref, tag, vars["name"])
if err != nil {
return err
}
w.Header().Set("Docker-Plugin-Name", name)
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
if err := pr.backend.Upgrade(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil {
if !output.Flushed() {
return err
}
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
}
func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return errors.Wrap(err, "failed to parse form")
}
var privileges types.PluginPrivileges
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&privileges); err != nil {
return errors.Wrap(err, "failed to parse privileges")
}
if dec.More() {
return errors.New("invalid privileges")
}
metaHeaders, authConfig := parseHeaders(r.Header)
ref, tag, err := parseRemoteRef(r.FormValue("remote"))
if err != nil {
return err
}
name, err := getName(ref, tag, r.FormValue("name"))
if err != nil {
return err
}
w.Header().Set("Docker-Plugin-Name", name)
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil {
if !output.Flushed() {
return err
}
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
}
func getName(ref reference.Named, tag, name string) (string, error) {
if name == "" {
if _, ok := ref.(reference.Canonical); ok {
trimmed := reference.TrimNamed(ref)
if tag != "" {
nt, err := reference.WithTag(trimmed, tag)
if err != nil {
return "", err
}
name = nt.String()
} else {
name = reference.WithDefaultTag(trimmed).String()
}
} else {
name = ref.String()
}
} else {
localRef, err := reference.ParseNamed(name)
if err != nil {
return "", err
}
if _, ok := localRef.(reference.Canonical); ok {
return "", errors.New("cannot use digest in plugin tag")
}
if distreference.IsNameOnly(localRef) {
// TODO: log change in name to out stream
name = reference.WithDefaultTag(localRef).String()
}
}
return name, nil
}
func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
options := &types.PluginCreateOptions{
RepoName: r.FormValue("name")}
if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
return err
}
//TODO: send progress bar
w.WriteHeader(http.StatusNoContent)
return nil
}
func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
name := vars["name"]
timeout, err := strconv.Atoi(r.Form.Get("timeout"))
if err != nil {
return err
}
config := &types.PluginEnableConfig{Timeout: timeout}
return pr.backend.Enable(name, config)
}
func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
name := vars["name"]
config := &types.PluginDisableConfig{
ForceDisable: httputils.BoolValue(r, "force"),
}
return pr.backend.Disable(name, config)
}
func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
name := vars["name"]
config := &types.PluginRmConfig{
ForceRemove: httputils.BoolValue(r, "force"),
}
return pr.backend.Remove(name, config)
}
func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return errors.Wrap(err, "failed to parse form")
}
metaHeaders, authConfig := parseHeaders(r.Header)
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil {
if !output.Flushed() {
return err
}
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
}
func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var args []string
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
return err
}
if err := pr.backend.Set(vars["name"], args); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
l, err := pr.backend.List()
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, l)
}
func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
result, err := pr.backend.Inspect(vars["name"])
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, result)
}

View File

@@ -1,19 +0,0 @@
package router
import "github.com/docker/docker/api/server/httputils"
// Router defines an interface to specify a group of routes to add to the docker server.
type Router interface {
// Routes returns the list of routes to add to the docker server.
Routes() []Route
}
// Route defines an individual API route in the docker server.
type Route interface {
// Handler returns the raw function to create the http handler.
Handler() httputils.APIFunc
// Method returns the http method that the route responds to.
Method() string
// Path returns the subpath where the route responds to.
Path() string
}

View File

@@ -1,36 +0,0 @@
package swarm
import (
basictypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
types "github.com/docker/docker/api/types/swarm"
"golang.org/x/net/context"
)
// Backend abstracts an swarm commands manager.
type Backend interface {
Init(req types.InitRequest) (string, error)
Join(req types.JoinRequest) error
Leave(force bool) error
Inspect() (types.Swarm, error)
Update(uint64, types.Spec, types.UpdateFlags) error
GetUnlockKey() (string, error)
UnlockSwarm(req types.UnlockRequest) error
GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
GetService(string) (types.Service, error)
CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error)
UpdateService(string, uint64, types.ServiceSpec, string, string) (*basictypes.ServiceUpdateResponse, error)
RemoveService(string) error
ServiceLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error
GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
GetNode(string) (types.Node, error)
UpdateNode(string, uint64, types.NodeSpec) error
RemoveNode(string, bool) error
GetTasks(basictypes.TaskListOptions) ([]types.Task, error)
GetTask(string) (types.Task, error)
GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error)
CreateSecret(s types.SecretSpec) (string, error)
RemoveSecret(id string) error
GetSecret(id string) (types.Secret, error)
UpdateSecret(id string, version uint64, spec types.SecretSpec) error
}

View File

@@ -1,52 +0,0 @@
package swarm
import "github.com/docker/docker/api/server/router"
// swarmRouter is a router to talk with the build controller
type swarmRouter struct {
backend Backend
routes []router.Route
}
// NewRouter initializes a new build router
func NewRouter(b Backend) router.Router {
r := &swarmRouter{
backend: b,
}
r.initRoutes()
return r
}
// Routes returns the available routers to the swarm controller
func (sr *swarmRouter) Routes() []router.Route {
return sr.routes
}
func (sr *swarmRouter) initRoutes() {
sr.routes = []router.Route{
router.NewPostRoute("/swarm/init", sr.initCluster),
router.NewPostRoute("/swarm/join", sr.joinCluster),
router.NewPostRoute("/swarm/leave", sr.leaveCluster),
router.NewGetRoute("/swarm", sr.inspectCluster),
router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey),
router.NewPostRoute("/swarm/update", sr.updateCluster),
router.NewPostRoute("/swarm/unlock", sr.unlockCluster),
router.NewGetRoute("/services", sr.getServices),
router.NewGetRoute("/services/{id}", sr.getService),
router.NewPostRoute("/services/create", sr.createService),
router.NewPostRoute("/services/{id}/update", sr.updateService),
router.NewDeleteRoute("/services/{id}", sr.removeService),
router.Experimental(router.Cancellable(router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs))),
router.NewGetRoute("/nodes", sr.getNodes),
router.NewGetRoute("/nodes/{id}", sr.getNode),
router.NewDeleteRoute("/nodes/{id}", sr.removeNode),
router.NewPostRoute("/nodes/{id}/update", sr.updateNode),
router.NewGetRoute("/tasks", sr.getTasks),
router.NewGetRoute("/tasks/{id}", sr.getTask),
router.NewGetRoute("/secrets", sr.getSecrets),
router.NewPostRoute("/secrets/create", sr.createSecret),
router.NewDeleteRoute("/secrets/{id}", sr.removeSecret),
router.NewGetRoute("/secrets/{id}", sr.getSecret),
router.NewPostRoute("/secrets/{id}/update", sr.updateSecret),
}
}

View File

@@ -1,418 +0,0 @@
package swarm
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/errors"
"github.com/docker/docker/api/server/httputils"
basictypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
types "github.com/docker/docker/api/types/swarm"
"golang.org/x/net/context"
)
func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var req types.InitRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
nodeID, err := sr.backend.Init(req)
if err != nil {
logrus.Errorf("Error initializing swarm: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, nodeID)
}
func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var req types.JoinRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
return sr.backend.Join(req)
}
func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
force := httputils.BoolValue(r, "force")
return sr.backend.Leave(force)
}
func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
swarm, err := sr.backend.Inspect()
if err != nil {
logrus.Errorf("Error getting swarm: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, swarm)
}
func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var swarm types.Spec
if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil {
return err
}
rawVersion := r.URL.Query().Get("version")
version, err := strconv.ParseUint(rawVersion, 10, 64)
if err != nil {
return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error())
}
var flags types.UpdateFlags
if value := r.URL.Query().Get("rotateWorkerToken"); value != "" {
rot, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for rotateWorkerToken: %s", value)
}
flags.RotateWorkerToken = rot
}
if value := r.URL.Query().Get("rotateManagerToken"); value != "" {
rot, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for rotateManagerToken: %s", value)
}
flags.RotateManagerToken = rot
}
if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" {
rot, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value)
}
flags.RotateManagerUnlockKey = rot
}
if err := sr.backend.Update(version, swarm, flags); err != nil {
logrus.Errorf("Error configuring swarm: %v", err)
return err
}
return nil
}
func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var req types.UnlockRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
if err := sr.backend.UnlockSwarm(req); err != nil {
logrus.Errorf("Error unlocking swarm: %v", err)
return err
}
return nil
}
func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
unlockKey, err := sr.backend.GetUnlockKey()
if err != nil {
logrus.WithError(err).Errorf("Error retrieving swarm unlock key")
return err
}
return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{
UnlockKey: unlockKey,
})
}
func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter})
if err != nil {
logrus.Errorf("Error getting services: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, services)
}
func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
service, err := sr.backend.GetService(vars["id"])
if err != nil {
logrus.Errorf("Error getting service %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, service)
}
func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var service types.ServiceSpec
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
return err
}
// Get returns "" if the header does not exist
encodedAuth := r.Header.Get("X-Registry-Auth")
resp, err := sr.backend.CreateService(service, encodedAuth)
if err != nil {
logrus.Errorf("Error creating service %s: %v", service.Name, err)
return err
}
return httputils.WriteJSON(w, http.StatusCreated, resp)
}
func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var service types.ServiceSpec
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
return err
}
rawVersion := r.URL.Query().Get("version")
version, err := strconv.ParseUint(rawVersion, 10, 64)
if err != nil {
return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error())
}
// Get returns "" if the header does not exist
encodedAuth := r.Header.Get("X-Registry-Auth")
registryAuthFrom := r.URL.Query().Get("registryAuthFrom")
resp, err := sr.backend.UpdateService(vars["id"], version, service, encodedAuth, registryAuthFrom)
if err != nil {
logrus.Errorf("Error updating service %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, resp)
}
func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := sr.backend.RemoveService(vars["id"]); err != nil {
logrus.Errorf("Error removing service %s: %v", vars["id"], err)
return err
}
return nil
}
func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
// Args are validated before the stream starts because when it starts we're
// sending HTTP 200 by writing an empty chunk of data to tell the client that
// daemon is going to stream. By sending this initial HTTP 200 we can't report
// any error after the stream starts (i.e. container not found, wrong parameters)
// with the appropriate status code.
stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr")
if !(stdout || stderr) {
return fmt.Errorf("Bad parameters: you must choose at least one stream")
}
serviceName := vars["id"]
logsConfig := &backend.ContainerLogsConfig{
ContainerLogsOptions: basictypes.ContainerLogsOptions{
Follow: httputils.BoolValue(r, "follow"),
Timestamps: httputils.BoolValue(r, "timestamps"),
Since: r.Form.Get("since"),
Tail: r.Form.Get("tail"),
ShowStdout: stdout,
ShowStderr: stderr,
Details: httputils.BoolValue(r, "details"),
},
OutStream: w,
}
if logsConfig.Details {
return fmt.Errorf("Bad parameters: details is not currently supported")
}
chStarted := make(chan struct{})
if err := sr.backend.ServiceLogs(ctx, serviceName, logsConfig, chStarted); err != nil {
select {
case <-chStarted:
// The client may be expecting all of the data we're sending to
// be multiplexed, so send it through OutStream, which will
// have been set up to handle that if needed.
fmt.Fprintf(logsConfig.OutStream, "Error grabbing service logs: %v\n", err)
default:
return err
}
}
return nil
}
func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter})
if err != nil {
logrus.Errorf("Error getting nodes: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, nodes)
}
func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
node, err := sr.backend.GetNode(vars["id"])
if err != nil {
logrus.Errorf("Error getting node %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, node)
}
func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var node types.NodeSpec
if err := json.NewDecoder(r.Body).Decode(&node); err != nil {
return err
}
rawVersion := r.URL.Query().Get("version")
version, err := strconv.ParseUint(rawVersion, 10, 64)
if err != nil {
return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error())
}
if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
logrus.Errorf("Error updating node %s: %v", vars["id"], err)
return err
}
return nil
}
func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
force := httputils.BoolValue(r, "force")
if err := sr.backend.RemoveNode(vars["id"], force); err != nil {
logrus.Errorf("Error removing node %s: %v", vars["id"], err)
return err
}
return nil
}
func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter})
if err != nil {
logrus.Errorf("Error getting tasks: %v", err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, tasks)
}
func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
task, err := sr.backend.GetTask(vars["id"])
if err != nil {
logrus.Errorf("Error getting task %s: %v", vars["id"], err)
return err
}
return httputils.WriteJSON(w, http.StatusOK, task)
}
func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters})
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, secrets)
}
func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var secret types.SecretSpec
if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
return err
}
id, err := sr.backend.CreateSecret(secret)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{
ID: id,
})
}
func (sr *swarmRouter) removeSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := sr.backend.RemoveSecret(vars["id"]); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
secret, err := sr.backend.GetSecret(vars["id"])
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, secret)
}
func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var secret types.SecretSpec
if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
return errors.NewBadRequestError(err)
}
rawVersion := r.URL.Query().Get("version")
version, err := strconv.ParseUint(rawVersion, 10, 64)
if err != nil {
return errors.NewBadRequestError(fmt.Errorf("invalid secret version"))
}
id := vars["id"]
if err := sr.backend.UpdateSecret(id, version, secret); err != nil {
return err
}
return nil
}

View File

@@ -1,21 +0,0 @@
package system
import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"golang.org/x/net/context"
)
// Backend is the methods that need to be implemented to provide
// system specific functionality.
type Backend interface {
SystemInfo() (*types.Info, error)
SystemVersion() types.Version
SystemDiskUsage() (*types.DiskUsage, error)
SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{})
UnsubscribeFromEvents(chan interface{})
AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error)
}

View File

@@ -1,39 +0,0 @@
package system
import (
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/daemon/cluster"
)
// systemRouter provides information about the Docker system overall.
// It gathers information about host, daemon and container events.
type systemRouter struct {
backend Backend
clusterProvider *cluster.Cluster
routes []router.Route
}
// NewRouter initializes a new system router
func NewRouter(b Backend, c *cluster.Cluster) router.Router {
r := &systemRouter{
backend: b,
clusterProvider: c,
}
r.routes = []router.Route{
router.NewOptionsRoute("/{anyroute:.*}", optionsHandler),
router.NewGetRoute("/_ping", pingHandler),
router.Cancellable(router.NewGetRoute("/events", r.getEvents)),
router.NewGetRoute("/info", r.getInfo),
router.NewGetRoute("/version", r.getVersion),
router.NewGetRoute("/system/df", r.getDiskUsage),
router.NewPostRoute("/auth", r.postAuth),
}
return r
}
// Routes returns all the API routes dedicated to the docker system
func (s *systemRouter) Routes() []router.Route {
return s.routes
}

View File

@@ -1,186 +0,0 @@
package system
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
"github.com/docker/docker/api/errors"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/registry"
timetypes "github.com/docker/docker/api/types/time"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
"golang.org/x/net/context"
)
func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.WriteHeader(http.StatusOK)
return nil
}
func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
_, err := w.Write([]byte{'O', 'K'})
return err
}
func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
info, err := s.backend.SystemInfo()
if err != nil {
return err
}
if s.clusterProvider != nil {
info.Swarm = s.clusterProvider.Info()
}
if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") {
// TODO: handle this conversion in engine-api
type oldInfo struct {
*types.Info
ExecutionDriver string
}
old := &oldInfo{
Info: info,
ExecutionDriver: "<not supported>",
}
nameOnlySecurityOptions := []string{}
kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions)
if err != nil {
return err
}
for _, s := range kvSecOpts {
nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name)
}
old.SecurityOptions = nameOnlySecurityOptions
return httputils.WriteJSON(w, http.StatusOK, old)
}
return httputils.WriteJSON(w, http.StatusOK, info)
}
func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
info := s.backend.SystemVersion()
info.APIVersion = api.DefaultVersion
return httputils.WriteJSON(w, http.StatusOK, info)
}
func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
du, err := s.backend.SystemDiskUsage()
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, du)
}
func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
since, err := eventTime(r.Form.Get("since"))
if err != nil {
return err
}
until, err := eventTime(r.Form.Get("until"))
if err != nil {
return err
}
var (
timeout <-chan time.Time
onlyPastEvents bool
)
if !until.IsZero() {
if until.Before(since) {
return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until")))
}
now := time.Now()
onlyPastEvents = until.Before(now)
if !onlyPastEvents {
dur := until.Sub(now)
timeout = time.NewTimer(dur).C
}
}
ef, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
output.Flush()
enc := json.NewEncoder(output)
buffered, l := s.backend.SubscribeToEvents(since, until, ef)
defer s.backend.UnsubscribeFromEvents(l)
for _, ev := range buffered {
if err := enc.Encode(ev); err != nil {
return err
}
}
if onlyPastEvents {
return nil
}
for {
select {
case ev := <-l:
jev, ok := ev.(events.Message)
if !ok {
logrus.Warnf("unexpected event message: %q", ev)
continue
}
if err := enc.Encode(jev); err != nil {
return err
}
case <-timeout:
return nil
case <-ctx.Done():
logrus.Debug("Client context cancelled, stop sending events")
return nil
}
}
}
func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var config *types.AuthConfig
err := json.NewDecoder(r.Body).Decode(&config)
r.Body.Close()
if err != nil {
return err
}
status, token, err := s.backend.AuthenticateToRegistry(ctx, config)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{
Status: status,
IdentityToken: token,
})
}
func eventTime(formTime string) (time.Time, error) {
t, tNano, err := timetypes.ParseTimestamps(formTime, -1)
if err != nil {
return time.Time{}, err
}
if t == -1 {
return time.Time{}, nil
}
return time.Unix(t, tNano), nil
}

View File

@@ -1,17 +0,0 @@
package volume
import (
// TODO return types need to be refactored into pkg
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
)
// Backend is the methods that need to be implemented to provide
// volume specific functionality
type Backend interface {
Volumes(filter string) ([]*types.Volume, []string, error)
VolumeInspect(name string) (*types.Volume, error)
VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
VolumeRm(name string, force bool) error
VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error)
}

View File

@@ -1,36 +0,0 @@
package volume
import "github.com/docker/docker/api/server/router"
// volumeRouter is a router to talk with the volumes controller
type volumeRouter struct {
backend Backend
routes []router.Route
}
// NewRouter initializes a new volume router
func NewRouter(b Backend) router.Router {
r := &volumeRouter{
backend: b,
}
r.initRoutes()
return r
}
// Routes returns the available routes to the volumes controller
func (r *volumeRouter) Routes() []router.Route {
return r.routes
}
func (r *volumeRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/volumes", r.getVolumesList),
router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName),
// POST
router.NewPostRoute("/volumes/create", r.postVolumesCreate),
router.NewPostRoute("/volumes/prune", r.postVolumesPrune),
// DELETE
router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes),
}
}

View File

@@ -1,80 +0,0 @@
package volume
import (
"encoding/json"
"net/http"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types/filters"
volumetypes "github.com/docker/docker/api/types/volume"
"golang.org/x/net/context"
)
func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters"))
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumesListOKBody{Volumes: volumes, Warnings: warnings})
}
func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
volume, err := v.backend.VolumeInspect(vars["name"])
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, volume)
}
func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
var req volumetypes.VolumesCreateBody
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusCreated, volume)
}
func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
force := httputils.BoolValue(r, "force")
if err := v.backend.VolumeRm(vars["name"], force); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
pruneReport, err := v.backend.VolumesPrune(filters.Args{})
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
}

View File

@@ -1,30 +0,0 @@
package server
import (
"net/http"
"sync"
"github.com/gorilla/mux"
)
// routerSwapper is an http.Handler that allows you to swap
// mux routers.
type routerSwapper struct {
mu sync.Mutex
router *mux.Router
}
// Swap changes the old router with the new one.
func (rs *routerSwapper) Swap(newRouter *mux.Router) {
rs.mu.Lock()
rs.router = newRouter
rs.mu.Unlock()
}
// ServeHTTP makes the routerSwapper to implement the http.Handler interface.
func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) {
rs.mu.Lock()
router := rs.router
rs.mu.Unlock()
router.ServeHTTP(w, r)
}

View File

@@ -1,210 +0,0 @@
package server
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/errors"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/middleware"
"github.com/docker/docker/api/server/router"
"github.com/gorilla/mux"
"golang.org/x/net/context"
)
// versionMatcher defines a variable matcher to be parsed by the router
// when a request is about to be served.
const versionMatcher = "/v{version:[0-9.]+}"
// Config provides the configuration for the API server
type Config struct {
Logging bool
EnableCors bool
CorsHeaders string
Version string
SocketGroup string
TLSConfig *tls.Config
}
// Server contains instance details for the server
type Server struct {
cfg *Config
servers []*HTTPServer
routers []router.Router
routerSwapper *routerSwapper
middlewares []middleware.Middleware
}
// New returns a new instance of the server based on the specified configuration.
// It allocates resources which will be needed for ServeAPI(ports, unix-sockets).
func New(cfg *Config) *Server {
return &Server{
cfg: cfg,
}
}
// UseMiddleware appends a new middleware to the request chain.
// This needs to be called before the API routes are configured.
func (s *Server) UseMiddleware(m middleware.Middleware) {
s.middlewares = append(s.middlewares, m)
}
// Accept sets a listener the server accepts connections into.
func (s *Server) Accept(addr string, listeners ...net.Listener) {
for _, listener := range listeners {
httpServer := &HTTPServer{
srv: &http.Server{
Addr: addr,
},
l: listener,
}
s.servers = append(s.servers, httpServer)
}
}
// Close closes servers and thus stop receiving requests
func (s *Server) Close() {
for _, srv := range s.servers {
if err := srv.Close(); err != nil {
logrus.Error(err)
}
}
}
// serveAPI loops through all initialized servers and spawns goroutine
// with Serve method for each. It sets createMux() as Handler also.
func (s *Server) serveAPI() error {
var chErrors = make(chan error, len(s.servers))
for _, srv := range s.servers {
srv.srv.Handler = s.routerSwapper
go func(srv *HTTPServer) {
var err error
logrus.Infof("API listen on %s", srv.l.Addr())
if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
err = nil
}
chErrors <- err
}(srv)
}
for i := 0; i < len(s.servers); i++ {
err := <-chErrors
if err != nil {
return err
}
}
return nil
}
// HTTPServer contains an instance of http server and the listener.
// srv *http.Server, contains configuration to create an http server and a mux router with all api end points.
// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router.
type HTTPServer struct {
srv *http.Server
l net.Listener
}
// Serve starts listening for inbound requests.
func (s *HTTPServer) Serve() error {
return s.srv.Serve(s.l)
}
// Close closes the HTTPServer from listening for the inbound requests.
func (s *HTTPServer) Close() error {
return s.l.Close()
}
func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Define the context that we'll pass around to share info
// like the docker-request-id.
//
// The 'context' will be used for global data that should
// apply to all requests. Data that is specific to the
// immediate function being called should still be passed
// as 'args' on the function call.
ctx := context.WithValue(context.Background(), httputils.UAStringKey, r.Header.Get("User-Agent"))
handlerFunc := s.handlerWithGlobalMiddlewares(handler)
vars := mux.Vars(r)
if vars == nil {
vars = make(map[string]string)
}
if err := handlerFunc(ctx, w, r, vars); err != nil {
statusCode := httputils.GetHTTPErrorStatusCode(err)
errFormat := "%v"
if statusCode == http.StatusInternalServerError {
errFormat = "%+v"
}
logrus.Errorf("Handler for %s %s returned error: "+errFormat, r.Method, r.URL.Path, err)
httputils.MakeErrorHandler(err)(w, r)
}
}
}
// InitRouter initializes the list of routers for the server.
// This method also enables the Go profiler if enableProfiler is true.
func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) {
s.routers = append(s.routers, routers...)
m := s.createMux()
if enableProfiler {
profilerSetup(m)
}
s.routerSwapper = &routerSwapper{
router: m,
}
}
// createMux initializes the main router the server uses.
func (s *Server) createMux() *mux.Router {
m := mux.NewRouter()
logrus.Debug("Registering routers")
for _, apiRouter := range s.routers {
for _, r := range apiRouter.Routes() {
f := s.makeHTTPHandler(r.Handler())
logrus.Debugf("Registering %s, %s", r.Method(), r.Path())
m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
m.Path(r.Path()).Methods(r.Method()).Handler(f)
}
}
err := errors.NewRequestNotFoundError(fmt.Errorf("page not found"))
notFoundHandler := httputils.MakeErrorHandler(err)
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
m.NotFoundHandler = notFoundHandler
return m
}
// Wait blocks the server goroutine until it exits.
// It sends an error message if there is any error during
// the API execution.
func (s *Server) Wait(waitChan chan error) {
if err := s.serveAPI(); err != nil {
logrus.Errorf("ServeAPI error: %v", err)
waitChan <- err
return
}
waitChan <- nil
}
// DisableProfiler reloads the server mux without adding the profiler routes.
func (s *Server) DisableProfiler() {
s.routerSwapper.Swap(s.createMux())
}
// EnableProfiler reloads the server mux adding the profiler routes.
func (s *Server) EnableProfiler() {
m := s.createMux()
profilerSetup(m)
s.routerSwapper.Swap(m)
}

View File

@@ -1,46 +0,0 @@
package server
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/docker/docker/api"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/middleware"
"golang.org/x/net/context"
)
func TestMiddlewares(t *testing.T) {
cfg := &Config{
Version: "0.1omega2",
}
srv := &Server{
cfg: cfg,
}
srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion))
req, _ := http.NewRequest("GET", "/containers/json", nil)
resp := httptest.NewRecorder()
ctx := context.Background()
localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if httputils.VersionFromContext(ctx) == "" {
t.Fatalf("Expected version, got empty string")
}
if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") {
t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv)
}
return nil
}
handlerFunc := srv.handlerWithGlobalMiddlewares(localHandler)
if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil {
t.Fatal(err)
}
}

View File

@@ -1,12 +0,0 @@
layout:
models:
- name: definition
source: asset:model
target: "{{ joinFilePath .Target .ModelPackage }}"
file_name: "{{ (snakize (pascalize .Name)) }}.go"
operations:
- name: handler
source: asset:serverOperation
target: "{{ joinFilePath .Target .APIPackage .Package }}"
file_name: "{{ (snakize (pascalize .Name)) }}.go"

File diff suppressed because it is too large Load Diff

View File

@@ -1,26 +0,0 @@
package {{ .Package }}
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/swagger-gen.sh
// ----------------------------------------------------------------------------
import (
"net/http"
context "golang.org/x/net/context"
{{ range .DefaultImports }}{{ printf "%q" . }}
{{ end }}
{{ range $key, $value := .Imports }}{{ $key }} {{ printf "%q" $value }}
{{ end }}
)
{{ range .ExtraSchemas }}
// {{ .Name }} {{ template "docstring" . }}
// swagger:model {{ .Name }}
{{ template "schema" . }}
{{ end }}

View File

@@ -1,22 +0,0 @@
package types
// AuthConfig contains authorization information for connecting to a Registry
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
// Email is an optional value associated with the username.
// This field is deprecated and will be removed in a later
// version of docker.
Email string `json:"email,omitempty"`
ServerAddress string `json:"serveraddress,omitempty"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `json:"identitytoken,omitempty"`
// RegistryToken is a bearer token to be sent to a registry
RegistryToken string `json:"registrytoken,omitempty"`
}

View File

@@ -1,84 +0,0 @@
// Package backend includes types to send information to server backends.
package backend
import (
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/streamformatter"
)
// ContainerAttachConfig holds the streams to use when connecting to a container to view logs.
type ContainerAttachConfig struct {
GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error)
UseStdin bool
UseStdout bool
UseStderr bool
Logs bool
Stream bool
DetachKeys string
// Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly.
// TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change...
// HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream.
// Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately.
MuxStreams bool
}
// ContainerLogsConfig holds configs for logging operations. Exists
// for users of the backend to to pass it a logging configuration.
type ContainerLogsConfig struct {
types.ContainerLogsOptions
OutStream io.Writer
}
// ContainerStatsConfig holds information for configuring the runtime
// behavior of a backend.ContainerStats() call.
type ContainerStatsConfig struct {
Stream bool
OutStream io.Writer
Version string
}
// ExecInspect holds information about a running process started
// with docker exec.
type ExecInspect struct {
ID string
Running bool
ExitCode *int
ProcessConfig *ExecProcessConfig
OpenStdin bool
OpenStderr bool
OpenStdout bool
CanRemove bool
ContainerID string
DetachKeys []byte
Pid int
}
// ExecProcessConfig holds information about the exec process
// running on the host.
type ExecProcessConfig struct {
Tty bool `json:"tty"`
Entrypoint string `json:"entrypoint"`
Arguments []string `json:"arguments"`
Privileged *bool `json:"privileged,omitempty"`
User string `json:"user,omitempty"`
}
// ContainerCommitConfig is a wrapper around
// types.ContainerCommitConfig that also
// transports configuration changes for a container.
type ContainerCommitConfig struct {
types.ContainerCommitConfig
Changes []string
}
// ProgressWriter is an interface
// to transport progress streams.
type ProgressWriter struct {
Output io.Writer
StdoutFormatter *streamformatter.StdoutFormatter
StderrFormatter *streamformatter.StderrFormatter
ProgressReaderFunc func(io.ReadCloser) io.ReadCloser
}

View File

@@ -1,23 +0,0 @@
package blkiodev
import "fmt"
// WeightDevice is a structure that holds device:weight pair
type WeightDevice struct {
Path string
Weight uint16
}
func (w *WeightDevice) String() string {
return fmt.Sprintf("%s:%d", w.Path, w.Weight)
}
// ThrottleDevice is a structure that holds device:rate_per_second pair
type ThrottleDevice struct {
Path string
Rate uint64
}
func (t *ThrottleDevice) String() string {
return fmt.Sprintf("%s:%d", t.Path, t.Rate)
}

View File

@@ -1,378 +0,0 @@
package types
import (
"bufio"
"io"
"net"
"os"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-units"
)
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
type CheckpointCreateOptions struct {
CheckpointID string
CheckpointDir string
Exit bool
}
// CheckpointListOptions holds parameters to list checkpoints for a container
type CheckpointListOptions struct {
CheckpointDir string
}
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
type CheckpointDeleteOptions struct {
CheckpointID string
CheckpointDir string
}
// ContainerAttachOptions holds parameters to attach to a container.
type ContainerAttachOptions struct {
Stream bool
Stdin bool
Stdout bool
Stderr bool
DetachKeys string
Logs bool
}
// ContainerCommitOptions holds parameters to commit changes into a container.
type ContainerCommitOptions struct {
Reference string
Comment string
Author string
Changes []string
Pause bool
Config *container.Config
}
// ContainerExecInspect holds information returned by exec inspect.
type ContainerExecInspect struct {
ExecID string
ContainerID string
Running bool
ExitCode int
Pid int
}
// ContainerListOptions holds parameters to list containers with.
type ContainerListOptions struct {
Quiet bool
Size bool
All bool
Latest bool
Since string
Before string
Limit int
Filters filters.Args
}
// ContainerLogsOptions holds parameters to filter logs with.
type ContainerLogsOptions struct {
ShowStdout bool
ShowStderr bool
Since string
Timestamps bool
Follow bool
Tail string
Details bool
}
// ContainerRemoveOptions holds parameters to remove containers.
type ContainerRemoveOptions struct {
RemoveVolumes bool
RemoveLinks bool
Force bool
}
// ContainerStartOptions holds parameters to start containers.
type ContainerStartOptions struct {
CheckpointID string
CheckpointDir string
}
// CopyToContainerOptions holds information
// about files to copy into a container
type CopyToContainerOptions struct {
AllowOverwriteDirWithFile bool
}
// EventsOptions holds parameters to filter events with.
type EventsOptions struct {
Since string
Until string
Filters filters.Args
}
// NetworkListOptions holds parameters to filter the list of networks with.
type NetworkListOptions struct {
Filters filters.Args
}
// HijackedResponse holds connection information for a hijacked request.
type HijackedResponse struct {
Conn net.Conn
Reader *bufio.Reader
}
// Close closes the hijacked connection and reader.
func (h *HijackedResponse) Close() {
h.Conn.Close()
}
// CloseWriter is an interface that implements structs
// that close input streams to prevent from writing.
type CloseWriter interface {
CloseWrite() error
}
// CloseWrite closes a readWriter for writing.
func (h *HijackedResponse) CloseWrite() error {
if conn, ok := h.Conn.(CloseWriter); ok {
return conn.CloseWrite()
}
return nil
}
// ImageBuildOptions holds the information
// necessary to build images.
type ImageBuildOptions struct {
Tags []string
SuppressOutput bool
RemoteContext string
NoCache bool
Remove bool
ForceRemove bool
PullParent bool
Isolation container.Isolation
CPUSetCPUs string
CPUSetMems string
CPUShares int64
CPUQuota int64
CPUPeriod int64
Memory int64
MemorySwap int64
CgroupParent string
NetworkMode string
ShmSize int64
Dockerfile string
Ulimits []*units.Ulimit
// See the parsing of buildArgs in api/server/router/build/build_routes.go
// for an explaination of why BuildArgs needs to use *string instead of
// just a string
BuildArgs map[string]*string
AuthConfigs map[string]AuthConfig
Context io.Reader
Labels map[string]string
// squash the resulting image's layers to the parent
// preserves the original image and creates a new one from the parent with all
// the changes applied to a single layer
Squash bool
// CacheFrom specifies images that are used for matching cache. Images
// specified here do not need to have a valid parent chain to match cache.
CacheFrom []string
SecurityOpt []string
}
// ImageBuildResponse holds information
// returned by a server after building
// an image.
type ImageBuildResponse struct {
Body io.ReadCloser
OSType string
}
// ImageCreateOptions holds information to create images.
type ImageCreateOptions struct {
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
}
// ImageImportSource holds source information for ImageImport
type ImageImportSource struct {
Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
}
// ImageImportOptions holds information to import images from the client host.
type ImageImportOptions struct {
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
Message string // Message is the message to tag the image with
Changes []string // Changes are the raw changes to apply to this image
}
// ImageListOptions holds parameters to filter the list of images with.
type ImageListOptions struct {
All bool
Filters filters.Args
}
// ImageLoadResponse returns information to the client about a load process.
type ImageLoadResponse struct {
// Body must be closed to avoid a resource leak
Body io.ReadCloser
JSON bool
}
// ImagePullOptions holds information to pull images.
type ImagePullOptions struct {
All bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
PrivilegeFunc RequestPrivilegeFunc
}
// RequestPrivilegeFunc is a function interface that
// clients can supply to retry operations after
// getting an authorization error.
// This function returns the registry authentication
// header value in base 64 format, or an error
// if the privilege request fails.
type RequestPrivilegeFunc func() (string, error)
//ImagePushOptions holds information to push images.
type ImagePushOptions ImagePullOptions
// ImageRemoveOptions holds parameters to remove images.
type ImageRemoveOptions struct {
Force bool
PruneChildren bool
}
// ImageSearchOptions holds parameters to search images with.
type ImageSearchOptions struct {
RegistryAuth string
PrivilegeFunc RequestPrivilegeFunc
Filters filters.Args
Limit int
}
// ResizeOptions holds parameters to resize a tty.
// It can be used to resize container ttys and
// exec process ttys too.
type ResizeOptions struct {
Height uint
Width uint
}
// VersionResponse holds version information for the client and the server
type VersionResponse struct {
Client *Version
Server *Version
}
// ServerOK returns true when the client could connect to the docker server
// and parse the information received. It returns false otherwise.
func (v VersionResponse) ServerOK() bool {
return v.Server != nil
}
// NodeListOptions holds parameters to list nodes with.
type NodeListOptions struct {
Filters filters.Args
}
// NodeRemoveOptions holds parameters to remove nodes with.
type NodeRemoveOptions struct {
Force bool
}
// ServiceCreateOptions contains the options to use when creating a service.
type ServiceCreateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
// use when updating the service.
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
}
// ServiceCreateResponse contains the information returned to a client
// on the creation of a new service.
type ServiceCreateResponse struct {
// ID is the ID of the created service.
ID string
// Warnings is a set of non-fatal warning messages to pass on to the user.
Warnings []string `json:",omitempty"`
}
// Values for RegistryAuthFrom in ServiceUpdateOptions
const (
RegistryAuthFromSpec = "spec"
RegistryAuthFromPreviousSpec = "previous-spec"
)
// ServiceUpdateOptions contains the options to be used for updating services.
type ServiceUpdateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
// use when updating the service.
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
// into this field. While it does open API users up to racy writes, most
// users may not need that level of consistency in practice.
// RegistryAuthFrom specifies where to find the registry authorization
// credentials if they are not given in EncodedRegistryAuth. Valid
// values are "spec" and "previous-spec".
RegistryAuthFrom string
}
// ServiceListOptions holds parameters to list services with.
type ServiceListOptions struct {
Filters filters.Args
}
// TaskListOptions holds parameters to list tasks with.
type TaskListOptions struct {
Filters filters.Args
}
// PluginRemoveOptions holds parameters to remove plugins.
type PluginRemoveOptions struct {
Force bool
}
// PluginEnableOptions holds parameters to enable plugins.
type PluginEnableOptions struct {
Timeout int
}
// PluginDisableOptions holds parameters to disable plugins.
type PluginDisableOptions struct {
Force bool
}
// PluginInstallOptions holds parameters to install a plugin.
type PluginInstallOptions struct {
Disabled bool
AcceptAllPermissions bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
RemoteRef string // RemoteRef is the plugin name on the registry
PrivilegeFunc RequestPrivilegeFunc
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
Args []string
}
// SecretRequestOption is a type for requesting secrets
type SecretRequestOption struct {
Source string
Target string
UID string
GID string
Mode os.FileMode
}
// SwarmUnlockKeyResponse contains the response for Engine API:
// GET /swarm/unlockkey
type SwarmUnlockKeyResponse struct {
// UnlockKey is the unlock key in ASCII-armored format.
UnlockKey string
}
// PluginCreateOptions hold all options to plugin create.
type PluginCreateOptions struct {
RepoName string
}

View File

@@ -1,69 +0,0 @@
package types
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
)
// configs holds structs used for internal communication between the
// frontend (such as an http server) and the backend (such as the
// docker daemon).
// ContainerCreateConfig is the parameter set to ContainerCreate()
type ContainerCreateConfig struct {
Name string
Config *container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
AdjustCPUShares bool
}
// ContainerRmConfig holds arguments for the container remove
// operation. This struct is used to tell the backend what operations
// to perform.
type ContainerRmConfig struct {
ForceRemove, RemoveVolume, RemoveLink bool
}
// ContainerCommitConfig contains build configs for commit operation,
// and is used when making a commit with the current state of the container.
type ContainerCommitConfig struct {
Pause bool
Repo string
Tag string
Author string
Comment string
// merge container config into commit config before commit
MergeConfigs bool
Config *container.Config
}
// ExecConfig is a small subset of the Config struct that holds the configuration
// for the exec feature of docker.
type ExecConfig struct {
User string // User that will run the command
Privileged bool // Is the container in privileged mode
Tty bool // Attach standard streams to a tty.
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStderr bool // Attach the standard error
AttachStdout bool // Attach the standard output
Detach bool // Execute in detach mode
DetachKeys string // Escape keys for detach
Env []string // Environment variables
Cmd []string // Execution commands and args
}
// PluginRmConfig holds arguments for plugin remove.
type PluginRmConfig struct {
ForceRemove bool
}
// PluginEnableConfig holds arguments for plugin enable
type PluginEnableConfig struct {
Timeout int
}
// PluginDisableConfig holds arguments for plugin disable.
type PluginDisableConfig struct {
ForceDisable bool
}

View File

@@ -1,62 +0,0 @@
package container
import (
"time"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
)
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
// Config contains the configuration data about a container.
// It should hold only portable information about the container.
// Here, "portable" means "independent from the host we are running on".
// Non-portable information *should* appear in HostConfig.
// All fields added to this struct must be marked `omitempty` to keep getting
// predictable hashes from the old `v1Compatibility` configuration.
type Config struct {
Hostname string // Hostname
Domainname string // Domainname
User string // User that will run the command(s) inside the container, also support user:group
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd strslice.StrSlice // Command to run when starting the container
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Labels map[string]string // List of labels set to this container
StopSignal string `json:",omitempty"` // Signal to stop a container
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}

View File

@@ -1,21 +0,0 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/swagger-gen.sh
// ----------------------------------------------------------------------------
// ContainerCreateCreatedBody container create created body
// swagger:model ContainerCreateCreatedBody
type ContainerCreateCreatedBody struct {
// The ID of the created container
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@@ -1,17 +0,0 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/swagger-gen.sh
// ----------------------------------------------------------------------------
// ContainerUpdateOKBody container update o k body
// swagger:model ContainerUpdateOKBody
type ContainerUpdateOKBody struct {
// warnings
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@@ -1,17 +0,0 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/swagger-gen.sh
// ----------------------------------------------------------------------------
// ContainerWaitOKBody container wait o k body
// swagger:model ContainerWaitOKBody
type ContainerWaitOKBody struct {
// Exit code of the container
// Required: true
StatusCode int64 `json:"StatusCode"`
}

View File

@@ -1,333 +0,0 @@
package container
import (
"strings"
"github.com/docker/docker/api/types/blkiodev"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
)
// NetworkMode represents the container network stack.
type NetworkMode string
// Isolation represents the isolation technology of a container. The supported
// values are platform specific
type Isolation string
// IsDefault indicates the default isolation technology of a container. On Linux this
// is the native driver. On Windows, this is a Windows Server Container.
func (i Isolation) IsDefault() bool {
return strings.ToLower(string(i)) == "default" || string(i) == ""
}
// IpcMode represents the container ipc stack.
type IpcMode string
// IsPrivate indicates whether the container uses its private ipc stack.
func (n IpcMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's ipc stack.
func (n IpcMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's ipc stack.
func (n IpcMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the ipc stack is valid.
func (n IpcMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
case "container":
if len(parts) != 2 || parts[1] == "" {
return false
}
default:
return false
}
return true
}
// Container returns the name of the container ipc stack is going to be used.
func (n IpcMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// UsernsMode represents userns mode in the container.
type UsernsMode string
// IsHost indicates whether the container uses the host's userns.
func (n UsernsMode) IsHost() bool {
return n == "host"
}
// IsPrivate indicates whether the container uses the a private userns.
func (n UsernsMode) IsPrivate() bool {
return !(n.IsHost())
}
// Valid indicates whether the userns is valid.
func (n UsernsMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
default:
return false
}
return true
}
// CgroupSpec represents the cgroup to use for the container.
type CgroupSpec string
// IsContainer indicates whether the container is using another container cgroup
func (c CgroupSpec) IsContainer() bool {
parts := strings.SplitN(string(c), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the cgroup spec is valid.
func (c CgroupSpec) Valid() bool {
return c.IsContainer() || c == ""
}
// Container returns the name of the container whose cgroup will be used.
func (c CgroupSpec) Container() string {
parts := strings.SplitN(string(c), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// UTSMode represents the UTS namespace of the container.
type UTSMode string
// IsPrivate indicates whether the container uses its private UTS namespace.
func (n UTSMode) IsPrivate() bool {
return !(n.IsHost())
}
// IsHost indicates whether the container uses the host's UTS namespace.
func (n UTSMode) IsHost() bool {
return n == "host"
}
// Valid indicates whether the UTS namespace is valid.
func (n UTSMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
default:
return false
}
return true
}
// PidMode represents the pid namespace of the container.
type PidMode string
// IsPrivate indicates whether the container uses its own new pid namespace.
func (n PidMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's pid namespace.
func (n PidMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's pid namespace.
func (n PidMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the pid namespace is valid.
func (n PidMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
case "container":
if len(parts) != 2 || parts[1] == "" {
return false
}
default:
return false
}
return true
}
// Container returns the name of the container whose pid namespace is going to be used.
func (n PidMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// DeviceMapping represents the device mapping between the host and the container.
type DeviceMapping struct {
PathOnHost string
PathInContainer string
CgroupPermissions string
}
// RestartPolicy represents the restart policies of the container.
type RestartPolicy struct {
Name string
MaximumRetryCount int
}
// IsNone indicates whether the container has the "no" restart policy.
// This means the container will not automatically restart when exiting.
func (rp *RestartPolicy) IsNone() bool {
return rp.Name == "no" || rp.Name == ""
}
// IsAlways indicates whether the container has the "always" restart policy.
// This means the container will automatically restart regardless of the exit status.
func (rp *RestartPolicy) IsAlways() bool {
return rp.Name == "always"
}
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
// This means the container will automatically restart of exiting with a non-zero exit status.
func (rp *RestartPolicy) IsOnFailure() bool {
return rp.Name == "on-failure"
}
// IsUnlessStopped indicates whether the container has the
// "unless-stopped" restart policy. This means the container will
// automatically restart unless user has put it to stopped state.
func (rp *RestartPolicy) IsUnlessStopped() bool {
return rp.Name == "unless-stopped"
}
// IsSame compares two RestartPolicy to see if they are the same
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
}
// LogConfig represents the logging configuration of the container.
type LogConfig struct {
Type string
Config map[string]string
}
// Resources contains container's resources (cgroups config, ulimits...)
type Resources struct {
// Applicable to all platforms
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
Memory int64 // Memory limit (in bytes)
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
// Applicable to UNIX platforms
CgroupParent string // Parent cgroup.
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
BlkioWeightDevice []*blkiodev.WeightDevice
BlkioDeviceReadBps []*blkiodev.ThrottleDevice
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
CpusetCpus string // CpusetCpus 0-2, 0,1
CpusetMems string // CpusetMems 0-2, 0,1
Devices []DeviceMapping // List of devices to map inside the container
DiskQuota int64 // Disk limit (in bytes)
KernelMemory int64 // Kernel memory limit (in bytes)
MemoryReservation int64 // Memory soft limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
OomKillDisable *bool // Whether to disable OOM Killer or not
PidsLimit int64 // Setting pids limit for a container
Ulimits []*units.Ulimit // List of ulimits to be set in the container
// Applicable to Windows
CPUCount int64 `json:"CpuCount"` // CPU count
CPUPercent int64 `json:"CpuPercent"` // CPU percent
IOMaximumIOps uint64 // Maximum IOps for the container system drive
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
}
// UpdateConfig holds the mutable attributes of a Container.
// Those attributes can be updated at runtime.
type UpdateConfig struct {
// Contains container's resources (cgroups, ulimits)
Resources
RestartPolicy RestartPolicy
}
// HostConfig the non-portable Config structure of a container.
// Here, "non-portable" means "dependent of the host we are running on".
// Portable information *should* appear in Config.
type HostConfig struct {
// Applicable to all platforms
Binds []string // List of volume bindings for this container
ContainerIDFile string // File (path) where the containerId is written
LogConfig LogConfig // Configuration of the logs for this container
NetworkMode NetworkMode // Network mode to use for the container
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
RestartPolicy RestartPolicy // Restart policy to be used for the container
AutoRemove bool // Automatically remove container when it exits
VolumeDriver string // Name of the volume driver used to mount volumes
VolumesFrom []string // List of volumes to take from other container
// Applicable to UNIX platforms
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
DNS []string `json:"Dns"` // List of DNS server to lookup
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
ExtraHosts []string // List of extra hosts
GroupAdd []string // List of additional groups that the container process will run as
IpcMode IpcMode // IPC namespace to use for the container
Cgroup CgroupSpec // Cgroup to use for the container
Links []string // List of links (in the name:alias form)
OomScoreAdj int // Container preference for OOM-killing
PidMode PidMode // PID namespace to use for the container
Privileged bool // Is the container in privileged mode
PublishAllPorts bool // Should docker publish all exposed port for the container
ReadonlyRootfs bool // Is the container root filesystem in read-only
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
UTSMode UTSMode // UTS namespace to use for the container
UsernsMode UsernsMode // The user namespace to use for the container
ShmSize int64 // Total shm memory usage
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
Runtime string `json:",omitempty"` // Runtime to use with this container
// Applicable to Windows
ConsoleSize [2]uint // Initial console size (height,width)
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
// Contains container's resources (cgroups, ulimits)
Resources
// Mounts specs used by the container
Mounts []mount.Mount `json:",omitempty"`
// Run a custom init inside the container, if null, use the daemon's configured settings
Init *bool `json:",omitempty"`
// Custom init path
InitPath string `json:",omitempty"`
}

View File

@@ -1,81 +0,0 @@
// +build !windows
package container
import "strings"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault()
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsBridge() {
return "bridge"
} else if n.IsHost() {
return "host"
} else if n.IsContainer() {
return "container"
} else if n.IsNone() {
return "none"
} else if n.IsDefault() {
return "default"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
// IsBridge indicates whether container uses the bridge network stack
func (n NetworkMode) IsBridge() bool {
return n == "bridge"
}
// IsHost indicates whether container uses the host network stack.
func (n NetworkMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@@ -1,87 +0,0 @@
package container
import (
"strings"
)
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// IsContainer indicates whether container uses a container network stack.
// Returns false as windows doesn't support this mode
func (n NetworkMode) IsContainer() bool {
return false
}
// IsBridge indicates whether container uses the bridge network stack
// in windows it is given the name NAT
func (n NetworkMode) IsBridge() bool {
return n == "nat"
}
// IsHost indicates whether container uses the host network stack.
// returns false as this is not supported by windows
func (n NetworkMode) IsHost() bool {
return false
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// ConnectedContainer is the id of the container which network this container is connected to.
// Returns blank string on windows
func (n NetworkMode) ConnectedContainer() string {
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
func (i Isolation) IsHyperV() bool {
return strings.ToLower(string(i)) == "hyperv"
}
// IsProcess indicates the use of process isolation
func (i Isolation) IsProcess() bool {
return strings.ToLower(string(i)) == "process"
}
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsDefault() {
return "default"
} else if n.IsBridge() {
return "nat"
} else if n.IsNone() {
return "none"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

Some files were not shown because too many files have changed in this diff Show More